]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm_amdgpu: Add job fence to resv conditionally
authorxinhui pan <xinhui.pan@amd.com>
Mon, 16 Mar 2020 03:45:14 +0000 (11:45 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 16 Mar 2020 20:21:32 +0000 (16:21 -0400)
Job fence on page table should be a shared one, so add it to the root
page talbe bo resv.
last_delayed field is not needed anymore. so remove it.

Cc: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Suggested-by: Christian König <christian.koenig@amd.com>
Signed-off-by: xinhui pan <xinhui.pan@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c

index b6c960363d556308689d741a6bcf15a3b98651e3..870c7fb56b8f966b357ce583257e18386a870df6 100644 (file)
@@ -1608,9 +1608,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
                if (!dma_fence_is_signaled(vm->last_direct))
                        amdgpu_bo_fence(root, vm->last_direct, true);
-
-               if (!dma_fence_is_signaled(vm->last_delayed))
-                       amdgpu_bo_fence(root, vm->last_delayed, true);
        }
 
        r = vm->update_funcs->prepare(&params, resv, sync_mode);
@@ -2588,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
                return false;
 
        /* Don't evict VM page tables while they are updated */
-       if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
-           !dma_fence_is_signaled(bo_base->vm->last_delayed)) {
+       if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
                amdgpu_vm_eviction_unlock(bo_base->vm);
                return false;
        }
@@ -2766,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
        if (timeout <= 0)
                return timeout;
 
-       timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
-       if (timeout <= 0)
-               return timeout;
-
-       return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
+       return dma_fence_wait_timeout(vm->last_direct, true, timeout);
 }
 
 /**
@@ -2843,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                vm->update_funcs = &amdgpu_vm_sdma_funcs;
        vm->last_update = NULL;
        vm->last_direct = dma_fence_get_stub();
-       vm->last_delayed = dma_fence_get_stub();
 
        mutex_init(&vm->eviction_lock);
        vm->evicting = false;
@@ -2898,7 +2889,6 @@ error_free_root:
 
 error_free_delayed:
        dma_fence_put(vm->last_direct);
-       dma_fence_put(vm->last_delayed);
        drm_sched_entity_destroy(&vm->delayed);
 
 error_free_direct:
@@ -3101,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
        dma_fence_wait(vm->last_direct, false);
        dma_fence_put(vm->last_direct);
-       dma_fence_wait(vm->last_delayed, false);
-       dma_fence_put(vm->last_delayed);
 
        list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
                if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
index d00648ee8d54ae66fc8e8ff3327e3a1a5cafdcd7..06fe30e1492d69e66d15233af1fa168be406c8b2 100644 (file)
@@ -276,7 +276,6 @@ struct amdgpu_vm {
 
        /* Last submission to the scheduler entities */
        struct dma_fence        *last_direct;
-       struct dma_fence        *last_delayed;
 
        unsigned int            pasid;
        /* dedicated to vm */
index 4cc7881f438c984afde0a19ccdb8ce8e8167ed55..cf96c335b258b479c37e10fadad3739a51862ad2 100644 (file)
@@ -104,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
        if (r)
                goto error;
 
-       tmp = dma_fence_get(f);
-       if (p->direct)
+       if (p->direct) {
+               tmp = dma_fence_get(f);
                swap(p->vm->last_direct, tmp);
-       else
-               swap(p->vm->last_delayed, tmp);
-       dma_fence_put(tmp);
+               dma_fence_put(tmp);
+       } else {
+               dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
+       }
 
        if (fence && !p->direct)
                swap(*fence, f);