]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/selftests: add drm buddy pathological testcase
authorArunpravin <Arunpravin.PaneerSelvam@amd.com>
Tue, 22 Feb 2022 17:48:45 +0000 (23:18 +0530)
committerChristian König <christian.koenig@amd.com>
Wed, 23 Feb 2022 09:46:32 +0000 (10:46 +0100)
create a pot-sized mm, then allocate one of each possible
order within. This should leave the mm with exactly one
page left. Free the largest block, then whittle down again.
Eventually we will have a fully 50% fragmented mm.

v2(Matthew Auld):
  - removed unnecessary test succeeded print
  - replace list_del()/list_add_tail() with list_move_tail()

Signed-off-by: Arunpravin <Arunpravin.PaneerSelvam@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220222174845.2175-7-Arunpravin.PaneerSelvam@amd.com
Signed-off-by: Christian König <christian.koenig@amd.com>
drivers/gpu/drm/selftests/drm_buddy_selftests.h
drivers/gpu/drm/selftests/test-drm_buddy.c

index 411d072cbfc508ba2a592bb98f6953d312ef5257..455b756c4ae581733272a28a3a21b525ba3456a9 100644 (file)
@@ -12,3 +12,4 @@ selftest(buddy_alloc_range, igt_buddy_alloc_range)
 selftest(buddy_alloc_optimistic, igt_buddy_alloc_optimistic)
 selftest(buddy_alloc_pessimistic, igt_buddy_alloc_pessimistic)
 selftest(buddy_alloc_smoke, igt_buddy_alloc_smoke)
+selftest(buddy_alloc_pathological, igt_buddy_alloc_pathological)
index e1cc2353a47649d6b1454bf7d69b4bf9dee70720..fa997f89522b6675b5df5b763c01a76fe9d9d2cb 100644 (file)
@@ -338,6 +338,136 @@ static void igt_mm_config(u64 *size, u64 *chunk_size)
        *size = (u64)s << 12;
 }
 
+static int igt_buddy_alloc_pathological(void *arg)
+{
+       u64 mm_size, size, min_page_size, start = 0;
+       struct drm_buddy_block *block;
+       const int max_order = 3;
+       unsigned long flags = 0;
+       int order, top, err;
+       struct drm_buddy mm;
+       LIST_HEAD(blocks);
+       LIST_HEAD(holes);
+       LIST_HEAD(tmp);
+
+       /*
+        * Create a pot-sized mm, then allocate one of each possible
+        * order within. This should leave the mm with exactly one
+        * page left. Free the largest block, then whittle down again.
+        * Eventually we will have a fully 50% fragmented mm.
+        */
+
+       mm_size = PAGE_SIZE << max_order;
+       err = drm_buddy_init(&mm, mm_size, PAGE_SIZE);
+       if (err) {
+               pr_err("buddy_init failed(%d)\n", err);
+               return err;
+       }
+       BUG_ON(mm.max_order != max_order);
+
+       for (top = max_order; top; top--) {
+               /* Make room by freeing the largest allocated block */
+               block = list_first_entry_or_null(&blocks, typeof(*block), link);
+               if (block) {
+                       list_del(&block->link);
+                       drm_buddy_free_block(&mm, block);
+               }
+
+               for (order = top; order--; ) {
+                       size = min_page_size = get_size(order, PAGE_SIZE);
+                       err = drm_buddy_alloc_blocks(&mm, start, mm_size, size,
+                                                    min_page_size, &tmp, flags);
+                       if (err) {
+                               pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
+                                       order, top);
+                               goto err;
+                       }
+
+                       block = list_first_entry_or_null(&tmp,
+                                                        struct drm_buddy_block,
+                                                        link);
+                       if (!block) {
+                               pr_err("alloc_blocks has no blocks\n");
+                               err = -EINVAL;
+                               goto err;
+                       }
+
+                       list_move_tail(&block->link, &blocks);
+               }
+
+               /* There should be one final page for this sub-allocation */
+               size = min_page_size = get_size(0, PAGE_SIZE);
+               err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+               if (err) {
+                       pr_info("buddy_alloc hit -ENOMEM for hole\n");
+                       goto err;
+               }
+
+               block = list_first_entry_or_null(&tmp,
+                                                struct drm_buddy_block,
+                                                link);
+               if (!block) {
+                       pr_err("alloc_blocks has no blocks\n");
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               list_move_tail(&block->link, &holes);
+
+               size = min_page_size = get_size(top, PAGE_SIZE);
+               err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+               if (!err) {
+                       pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
+                               top, max_order);
+                       block = list_first_entry_or_null(&tmp,
+                                                        struct drm_buddy_block,
+                                                        link);
+                       if (!block) {
+                               pr_err("alloc_blocks has no blocks\n");
+                               err = -EINVAL;
+                               goto err;
+                       }
+
+                       list_move_tail(&block->link, &blocks);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+       drm_buddy_free_list(&mm, &holes);
+
+       /* Nothing larger than blocks of chunk_size now available */
+       for (order = 1; order <= max_order; order++) {
+               size = min_page_size = get_size(order, PAGE_SIZE);
+               err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+               if (!err) {
+                       pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+                               order);
+                       block = list_first_entry_or_null(&tmp,
+                                                        struct drm_buddy_block,
+                                                        link);
+                       if (!block) {
+                               pr_err("alloc_blocks has no blocks\n");
+                               err = -EINVAL;
+                               goto err;
+                       }
+
+                       list_move_tail(&block->link, &blocks);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+       if (err)
+               err = 0;
+
+err:
+       list_splice_tail(&holes, &blocks);
+       drm_buddy_free_list(&mm, &blocks);
+       drm_buddy_fini(&mm);
+       return err;
+}
+
 static int igt_buddy_alloc_smoke(void *arg)
 {
        u64 mm_size, min_page_size, chunk_size, start = 0;