]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/amdgpu: cleanup IB pool handling a bit
authorChristian König <christian.koenig@amd.com>
Wed, 1 Apr 2020 09:18:21 +0000 (11:18 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 28 Apr 2020 20:20:30 +0000 (16:20 -0400)
Fix the coding style, move and rename the definitions to
better match what they are supposed to be doing.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c

index 589d8783fa21e86903664894ec2c5e19eab85d29..99e5f474505d372aaaf333670f47cc519c488dbd 100644 (file)
@@ -204,8 +204,6 @@ extern int amdgpu_cik_support;
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
 #define AMDGPU_MAX_USEC_TIMEOUT                        100000  /* 100 ms */
 #define AMDGPU_FENCE_JIFFIES_TIMEOUT           (HZ / 2)
-/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
-#define AMDGPU_IB_POOL_SIZE                    16
 #define AMDGPU_DEBUGFS_MAX_COMPONENTS          32
 #define AMDGPUFB_CONN_LIMIT                    4
 #define AMDGPU_BIOS_NUM_SCRATCH                        16
@@ -402,13 +400,6 @@ struct amdgpu_sa_bo {
 int amdgpu_fence_slab_init(void);
 void amdgpu_fence_slab_fini(void);
 
-enum amdgpu_ib_pool_type {
-       AMDGPU_IB_POOL_NORMAL = 0,
-       AMDGPU_IB_POOL_VM,
-       AMDGPU_IB_POOL_DIRECT,
-
-       AMDGPU_IB_POOL_MAX
-};
 /*
  * IRQS.
  */
@@ -866,7 +857,7 @@ struct amdgpu_device {
        unsigned                        num_rings;
        struct amdgpu_ring              *rings[AMDGPU_MAX_RINGS];
        bool                            ib_pool_ready;
-       struct amdgpu_sa_manager        ring_tmp_bo[AMDGPU_IB_POOL_MAX];
+       struct amdgpu_sa_manager        ib_pools[AMDGPU_IB_POOL_MAX];
        struct amdgpu_sched             gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
 
        /* interrupts */
index 3eee5c7d83e0372195faaf06e52bbb3210855572..7653f62b1b2d8ea806c0a6b627f7e513a3837eb2 100644 (file)
@@ -924,7 +924,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 
                ring = to_amdgpu_ring(entity->rq->sched);
                r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
-                                  chunk_ib->ib_bytes : 0, AMDGPU_IB_POOL_NORMAL, ib);
+                                  chunk_ib->ib_bytes : 0,
+                                  AMDGPU_IB_POOL_DELAYED, ib);
                if (r) {
                        DRM_ERROR("Failed to get ib !\n");
                        return r;
index aebbbb573884c315398e7231ceef8f86f9ba3a18..c24366aacf3afaa9f5620fb966fa6d79c1c05500 100644 (file)
  * Returns 0 on success, error on failure.
  */
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-               unsigned size,
-               enum amdgpu_ib_pool_type pool_type,
-               struct amdgpu_ib *ib)
+                 unsigned size, enum amdgpu_ib_pool_type pool_type,
+                 struct amdgpu_ib *ib)
 {
        int r;
 
        if (size) {
-               r = amdgpu_sa_bo_new(&adev->ring_tmp_bo[pool_type],
+               r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
                                      &ib->sa_bo, size, 256);
                if (r) {
                        dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -305,30 +304,32 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
  */
 int amdgpu_ib_pool_init(struct amdgpu_device *adev)
 {
-       int r, i;
        unsigned size;
+       int r, i;
 
-       if (adev->ib_pool_ready) {
+       if (adev->ib_pool_ready)
                return 0;
-       }
+
        for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
                if (i == AMDGPU_IB_POOL_DIRECT)
                        size = PAGE_SIZE * 2;
                else
-                       size = AMDGPU_IB_POOL_SIZE*64*1024;
-               r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo[i],
-                               size,
-                               AMDGPU_GPU_PAGE_SIZE,
-                               AMDGPU_GEM_DOMAIN_GTT);
-               if (r) {
-                       for (i--; i >= 0; i--)
-                               amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
-                       return r;
-               }
+                       size = AMDGPU_IB_POOL_SIZE;
+
+               r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
+                                             size, AMDGPU_GPU_PAGE_SIZE,
+                                             AMDGPU_GEM_DOMAIN_GTT);
+               if (r)
+                       goto error;
        }
        adev->ib_pool_ready = true;
 
        return 0;
+
+error:
+       while (i--)
+               amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+       return r;
 }
 
 /**
@@ -343,11 +344,12 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
 {
        int i;
 
-       if (adev->ib_pool_ready) {
-               for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
-                       amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
-               adev->ib_pool_ready = false;
-       }
+       if (!adev->ib_pool_ready)
+               return;
+
+       for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
+               amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+       adev->ib_pool_ready = false;
 }
 
 /**
@@ -362,9 +364,9 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
  */
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 {
-       unsigned i;
-       int r, ret = 0;
        long tmo_gfx, tmo_mm;
+       int r, ret = 0;
+       unsigned i;
 
        tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
        if (amdgpu_sriov_vf(adev)) {
@@ -442,15 +444,16 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = dev->dev_private;
 
-       seq_printf(m, "-------------------- NORMAL -------------------- \n");
-       amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_NORMAL], m);
-       seq_printf(m, "---------------------- VM ---------------------- \n");
-       amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_VM], m);
-       seq_printf(m, "-------------------- DIRECT--------------------- \n");
-       amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_DIRECT], m);
+       seq_printf(m, "--------------------- DELAYED --------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
+                                    m);
+       seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
+                                    m);
+       seq_printf(m, "--------------------- DIRECT ---------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
 
        return 0;
-
 }
 
 static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
index 7390261095b7f71a192e21005fae04801856cf69..107e800635532c1d95fd833e9c8ca6c4756c51d2 100644 (file)
@@ -50,6 +50,8 @@
 
 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
 
+#define AMDGPU_IB_POOL_SIZE    (1024 * 1024)
+
 enum amdgpu_ring_type {
        AMDGPU_RING_TYPE_GFX            = AMDGPU_HW_IP_GFX,
        AMDGPU_RING_TYPE_COMPUTE        = AMDGPU_HW_IP_COMPUTE,
@@ -63,6 +65,17 @@ enum amdgpu_ring_type {
        AMDGPU_RING_TYPE_KIQ
 };
 
+enum amdgpu_ib_pool_type {
+       /* Normal submissions to the top of the pipeline. */
+       AMDGPU_IB_POOL_DELAYED,
+       /* Immediate submissions to the bottom of the pipeline. */
+       AMDGPU_IB_POOL_IMMEDIATE,
+       /* Direct submission to the ring buffer during init and reset. */
+       AMDGPU_IB_POOL_DIRECT,
+
+       AMDGPU_IB_POOL_MAX
+};
+
 struct amdgpu_device;
 struct amdgpu_ring;
 struct amdgpu_ib;
index 476f1f89aaad1f9e1a0cc4c0e1a0f53b58dd6864..2f4d5ca9894fc361ee72bd5b168fc89102ceac42 100644 (file)
@@ -44,7 +44,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
        /* Number of tests =
         * (Total GTT - IB pool - writeback page - ring buffers) / test size
         */
-       n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
+       n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE;
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                if (adev->rings[i])
                        n -= adev->rings[i]->ring_size;
index 1296499f0f54920699152fd75122db043a45b83f..ea0199a8f9c95b279ea03293451083fdd312a94f 100644 (file)
@@ -333,7 +333,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
        num_bytes = num_pages * 8;
 
        r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
-                                    AMDGPU_IB_POOL_NORMAL, &job);
+                                    AMDGPU_IB_POOL_DELAYED, &job);
        if (r)
                return r;
 
@@ -2122,6 +2122,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       struct dma_fence **fence, bool direct_submit,
                       bool vm_needs_flush, bool tmz)
 {
+       enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
+               AMDGPU_IB_POOL_DELAYED;
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_job *job;
 
@@ -2139,8 +2141,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
 
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4,
-                       direct_submit ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
        if (r)
                return r;
 
@@ -2229,7 +2230,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
        /* for IB padding */
        num_dw += 64;
 
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_NORMAL, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+                                    &job);
        if (r)
                return r;
 
index 550282d9c1fc7a9fd5382a29a1e6c7386ae37377..5100ebe8858d442af14f4bf1ef46cdd30eb8c5cd 100644 (file)
@@ -1056,8 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
                        goto err;
        }
 
-       r = amdgpu_job_alloc_with_ib(adev, 64,
-                       direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
+       r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
+                                    AMDGPU_IB_POOL_DELAYED, &job);
        if (r)
                goto err;
 
index d090455282e57c01cd87aa9b8ea6843fa90c6f38..ecaa2d7483b20d19883b16f01e892941dc78e2b2 100644 (file)
@@ -447,7 +447,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
-                                       AMDGPU_IB_POOL_DIRECT, &job);
+                                    AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -526,7 +526,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
-                       direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
+                                    direct ? AMDGPU_IB_POOL_DIRECT :
+                                    AMDGPU_IB_POOL_DELAYED, &job);
        if (r)
                return r;
 
index fbd451f3559ad752f84e81408c7a9fcc5339ea9a..b96c8d9a194650e7bc109d02895214580bc8539f 100644 (file)
@@ -61,11 +61,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
                                  struct dma_resv *resv,
                                  enum amdgpu_sync_mode sync_mode)
 {
+       enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE :
+               AMDGPU_IB_POOL_DELAYED;
        unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
        int r;
 
-       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4,
-                       p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job);
+       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
        if (r)
                return r;
 
@@ -199,6 +200,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
                                 uint64_t addr, unsigned count, uint32_t incr,
                                 uint64_t flags)
 {
+       enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE :
+               AMDGPU_IB_POOL_DELAYED;
        unsigned int i, ndw, nptes;
        uint64_t *pte;
        int r;
@@ -224,8 +227,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
                        ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
                        ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
 
-                       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4,
-                                       p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job);
+                       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
+                                                    &p->job);
                        if (r)
                                return r;
 
index eff25c72c6c60900cc6b9b5a8f399298a321de6c..edaa50d850a6ade0ca3689ae195c868c71fa410a 100644 (file)
@@ -372,7 +372,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
         * translation. Avoid this by doing the invalidation from the SDMA
         * itself.
         */
-       r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_VM, &job);
+       r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
+                                    &job);
        if (r)
                goto error_alloc;