]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/ttm: Remap all page faults to per process dummy page.
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Wed, 12 May 2021 14:26:33 +0000 (10:26 -0400)
committerAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Thu, 20 May 2021 03:45:49 +0000 (23:45 -0400)
On device removal reroute all CPU mappings to dummy page.

v3:
Remove loop to find DRM file and instead access it
by vma->vm_file->private_data. Move dummy page installation
into a separate function.

v4:
Map the entire BOs VA space into on demand allocated dummy page
on the first fault for that BO.

v5: Remove duplicate return.

v6: Polish ttm_bo_vm_dummy_page, remove superfluous code.

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210512142648.666476-2-andrey.grodzovsky@amd.com
drivers/gpu/drm/ttm/ttm_bo_vm.c
include/drm/ttm/ttm_bo_api.h

index b31b18058965e8c98e896e9f8843cff4c403b521..7ff9fd5513573421a102d6eaf79778e6ed925317 100644 (file)
@@ -34,6 +34,8 @@
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drm_vma_manager.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
 #include <linux/mm.h>
 #include <linux/pfn_t.h>
 #include <linux/rbtree.h>
@@ -380,19 +382,69 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 }
 EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
 
+static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res)
+{
+       struct page *dummy_page = (struct page *)res;
+
+       __free_page(dummy_page);
+}
+
+vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       struct ttm_buffer_object *bo = vma->vm_private_data;
+       struct drm_device *ddev = bo->base.dev;
+       vm_fault_t ret = VM_FAULT_NOPAGE;
+       unsigned long address;
+       unsigned long pfn;
+       struct page *page;
+
+       /* Allocate new dummy page to map all the VA range in this VMA to it*/
+       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (!page)
+               return VM_FAULT_OOM;
+
+       /* Set the page to be freed using drmm release action */
+       if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
+               return VM_FAULT_OOM;
+
+       pfn = page_to_pfn(page);
+
+       /* Prefault the entire VMA range right away to avoid further faults */
+       for (address = vma->vm_start; address < vma->vm_end; address += PAGE_SIZE) {
+
+               if (vma->vm_flags & VM_MIXEDMAP)
+                       ret = vmf_insert_mixed_prot(vma, address,
+                                                   __pfn_to_pfn_t(pfn, PFN_DEV),
+                                                   prot);
+               else
+                       ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_vm_dummy_page);
+
 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        pgprot_t prot;
        struct ttm_buffer_object *bo = vma->vm_private_data;
+       struct drm_device *ddev = bo->base.dev;
        vm_fault_t ret;
+       int idx;
 
        ret = ttm_bo_vm_reserve(bo, vmf);
        if (ret)
                return ret;
 
        prot = vma->vm_page_prot;
-       ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+       if (drm_dev_enter(ddev, &idx)) {
+               ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+               drm_dev_exit(idx);
+       } else {
+               ret = ttm_bo_vm_dummy_page(vmf, prot);
+       }
        if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
                return ret;
 
index 639521880c29bd45473e60d2e28712829183e5c0..254ede97f8e359a7c882c431311fa3496407ad67 100644 (file)
@@ -620,4 +620,6 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
                     void *buf, int len, int write);
 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
 
+vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
+
 #endif