]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/amdgpu: Remap all page faults to per process dummy page.
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Wed, 12 May 2021 14:26:38 +0000 (10:26 -0400)
committerAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Thu, 20 May 2021 03:50:27 +0000 (23:50 -0400)
On device removal reroute all CPU mappings to dummy page
per drm_file instance or imported GEM object.

v4:
Update for modified ttm_bo_vm_dummy_page

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210512142648.666476-7-andrey.grodzovsky@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index 8c7ec09eb1a4f93e0d98c948948c6f427f5fa6a1..0d54e70278ca03da0cba845fe7b6ed05b3b2970a 100644 (file)
@@ -48,6 +48,7 @@
 #include <drm/ttm/ttm_placement.h>
 
 #include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
 
 #include "amdgpu.h"
 #include "amdgpu_object.h"
@@ -1905,18 +1906,28 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
 {
        struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+       struct drm_device *ddev = bo->base.dev;
        vm_fault_t ret;
+       int idx;
 
        ret = ttm_bo_vm_reserve(bo, vmf);
        if (ret)
                return ret;
 
-       ret = amdgpu_bo_fault_reserve_notify(bo);
-       if (ret)
-               goto unlock;
+       if (drm_dev_enter(ddev, &idx)) {
+               ret = amdgpu_bo_fault_reserve_notify(bo);
+               if (ret) {
+                       drm_dev_exit(idx);
+                       goto unlock;
+               }
 
-       ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
-                                      TTM_BO_VM_NUM_PREFAULT, 1);
+                ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+                                               TTM_BO_VM_NUM_PREFAULT, 1);
+
+                drm_dev_exit(idx);
+       } else {
+               ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
+       }
        if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
                return ret;