]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/nouveau/mmu: serialise mmu invalidations with private mutex
authorBen Skeggs <bskeggs@redhat.com>
Wed, 2 Dec 2020 22:32:31 +0000 (08:32 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Thu, 11 Feb 2021 00:14:14 +0000 (10:14 +1000)
nvkm_subdev.mutex is going away.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c

index 54cdcb017518660b5509604524db932eee391690..f0a5373192474dbfc8a6980b7225f339787ccf00 100644 (file)
@@ -117,6 +117,8 @@ struct nvkm_mmu {
                struct list_head list;
        } ptc, ptp;
 
+       struct mutex mutex; /* serialises mmu invalidations */
+
        struct nvkm_device_oclass user;
 };
 
index 6d5212ae2fd57b741715e7d7c9e4c83fe5bdd13e..a01191d6bef8871304caf0bbe50c41a7ad18183b 100644 (file)
@@ -402,6 +402,7 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev)
        nvkm_vmm_unref(&mmu->vmm);
 
        nvkm_mmu_ptc_fini(mmu);
+       mutex_destroy(&mmu->mutex);
        return mmu;
 }
 
@@ -420,6 +421,7 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
        mmu->func = func;
        mmu->dma_bits = func->dma_bits;
        nvkm_mmu_ptc_init(mmu);
+       mutex_init(&mmu->mutex);
        mmu->user.ctor = nvkm_ummu_new;
        mmu->user.base = func->mmu.user;
 }
index 6a2d9eb8e1ea8fbb6885c5064a1e2e988afd8782..5438384d9a67445625fce05b77565704424495d5 100644 (file)
@@ -187,12 +187,11 @@ gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
 void
 gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
 {
-       struct nvkm_subdev *subdev = &vmm->mmu->subdev;
-       struct nvkm_device *device = subdev->device;
+       struct nvkm_device *device = vmm->mmu->subdev.device;
        struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
        u64 addr = 0;
 
-       mutex_lock(&subdev->mutex);
+       mutex_lock(&vmm->mmu->mutex);
        /* Looks like maybe a "free flush slots" counter, the
         * faster you write to 0x100cbc to more it decreases.
         */
@@ -222,7 +221,7 @@ gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
                if (nvkm_rd32(device, 0x100c80) & 0x00008000)
                        break;
        );
-       mutex_unlock(&subdev->mutex);
+       mutex_unlock(&vmm->mmu->mutex);
 }
 
 void
index 1d3369683a21f3616e4bf4aec507fe6c806045f0..31984671daf84aadd7b5aa4ffcc68b768e2f3607 100644 (file)
@@ -80,17 +80,16 @@ nv41_vmm_desc_12[] = {
 static void
 nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
 {
-       struct nvkm_subdev *subdev = &vmm->mmu->subdev;
-       struct nvkm_device *device = subdev->device;
+       struct nvkm_device *device = vmm->mmu->subdev.device;
 
-       mutex_lock(&subdev->mutex);
+       mutex_lock(&vmm->mmu->mutex);
        nvkm_wr32(device, 0x100810, 0x00000022);
        nvkm_msec(device, 2000,
                if (nvkm_rd32(device, 0x100810) & 0x00000020)
                        break;
        );
        nvkm_wr32(device, 0x100810, 0x00000000);
-       mutex_unlock(&subdev->mutex);
+       mutex_unlock(&vmm->mmu->mutex);
 }
 
 static const struct nvkm_vmm_func
index 2d89e27e8e9e5eb60e7164957dc1e36c27b4fc3d..0f904486168fe03a0a4f7fb2bf3be176fea1dde7 100644 (file)
@@ -184,7 +184,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
        struct nvkm_device *device = subdev->device;
        int i, id;
 
-       mutex_lock(&subdev->mutex);
+       mutex_lock(&vmm->mmu->mutex);
        for (i = 0; i < NVKM_SUBDEV_NR; i++) {
                if (!atomic_read(&vmm->engref[i]))
                        continue;
@@ -220,7 +220,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
                        nvkm_error(subdev, "%s mmu invalidate timeout\n",
                                   nvkm_subdev_name[i]);
        }
-       mutex_unlock(&subdev->mutex);
+       mutex_unlock(&vmm->mmu->mutex);
 }
 
 int
index b1294d0076c081b83f18f697754d18d6558db017..6cb5eefa45e9aa321d4c71ae73ba34d93457c951 100644 (file)
 static void
 tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
 {
-       struct nvkm_subdev *subdev = &vmm->mmu->subdev;
-       struct nvkm_device *device = subdev->device;
+       struct nvkm_device *device = vmm->mmu->subdev.device;
        u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
 
        type |= 0x00000001; /* PAGE_ALL */
        if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
                type |= 0x00000004; /* HUB_ONLY */
 
-       mutex_lock(&subdev->mutex);
+       mutex_lock(&vmm->mmu->mutex);
 
        nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
        nvkm_wr32(device, 0xb830a4, 0x00000000);
@@ -46,7 +45,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
                        break;
        );
 
-       mutex_unlock(&subdev->mutex);
+       mutex_unlock(&vmm->mmu->mutex);
 }
 
 static const struct nvkm_vmm_func