]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/amdkfd: Avoid ambiguity by indicating it's cp queue
authorYong Zhao <Yong.Zhao@amd.com>
Thu, 30 Jan 2020 23:35:23 +0000 (18:35 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 26 Feb 2020 19:20:05 +0000 (14:20 -0500)
The queues represented in queue_bitmap are only CP queues.

Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/include/kgd_kfd_interface.h

index 2c5fa5e4d6f656710c6f1730c3c1871374c1892d..bc2e72a66db9f8fe316c74c1b6d9fc79c3c37264 100644 (file)
@@ -126,7 +126,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
                /* this is going to have a few of the MSBs set that we need to
                 * clear
                 */
-               bitmap_complement(gpu_resources.queue_bitmap,
+               bitmap_complement(gpu_resources.cp_queue_bitmap,
                                  adev->gfx.mec.queue_bitmap,
                                  KGD_MAX_QUEUES);
 
@@ -137,7 +137,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
                                * adev->gfx.mec.num_pipe_per_mec
                                * adev->gfx.mec.num_queue_per_pipe;
                for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
-                       clear_bit(i, gpu_resources.queue_bitmap);
+                       clear_bit(i, gpu_resources.cp_queue_bitmap);
 
                amdgpu_doorbell_get_kfd_info(adev,
                                &gpu_resources.doorbell_physical_address,
index 7ef9b89f5c70c59c3758c1ffe14e493497eb119b..973581c2b40190d68fedd85d706b53967fe8a9a9 100644 (file)
@@ -78,14 +78,14 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
        /* queue is available for KFD usage if bit is 1 */
        for (i = 0; i <  dqm->dev->shared_resources.num_queue_per_pipe; ++i)
                if (test_bit(pipe_offset + i,
-                             dqm->dev->shared_resources.queue_bitmap))
+                             dqm->dev->shared_resources.cp_queue_bitmap))
                        return true;
        return false;
 }
 
-unsigned int get_queues_num(struct device_queue_manager *dqm)
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
 {
-       return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
+       return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
                                KGD_MAX_QUEUES);
 }
 
@@ -908,7 +908,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
 
                for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
                        if (test_bit(pipe_offset + queue,
-                                    dqm->dev->shared_resources.queue_bitmap))
+                                    dqm->dev->shared_resources.cp_queue_bitmap))
                                dqm->allocated_queues[pipe] |= 1 << queue;
        }
 
@@ -1029,7 +1029,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
                mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
                        / dqm->dev->shared_resources.num_pipe_per_mec;
 
-               if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
+               if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
                        continue;
 
                /* only acquire queues from the first MEC */
@@ -1979,7 +1979,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
 
                for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
                        if (!test_bit(pipe_offset + queue,
-                                     dqm->dev->shared_resources.queue_bitmap))
+                                     dqm->dev->shared_resources.cp_queue_bitmap))
                                continue;
 
                        r = dqm->dev->kfd2kgd->hqd_dump(
index ee3400e92c3049c3afeb956af3e9907bc5c20435..3f0fb0d28c019eadd9d170b56c3f48d41e290f4a 100644 (file)
@@ -219,7 +219,7 @@ void device_queue_manager_init_v10_navi10(
                struct device_queue_manager_asic_ops *asic_ops);
 void program_sh_mem_settings(struct device_queue_manager *dqm,
                                        struct qcm_process_device *qpd);
-unsigned int get_queues_num(struct device_queue_manager *dqm);
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
index 393c218734fd236748b23638dfe8b17473a4c56f..377bde0e781cd7368b56b7a5667d571c8f95defb 100644 (file)
@@ -62,7 +62,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
                max_proc_per_quantum = dev->max_proc_per_quantum;
 
        if ((process_count > max_proc_per_quantum) ||
-           compute_queue_count > get_queues_num(pm->dqm)) {
+           compute_queue_count > get_cp_queues_num(pm->dqm)) {
                *over_subscription = true;
                pr_debug("Over subscribed runlist\n");
        }
index 9e2aec5fad86b6a0dffc6d84a63e32aba84467d8..cfd3aa1e30e3c91c9c4a261c857e29d99c20ce19 100644 (file)
@@ -266,7 +266,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                if ((dev->dqm->sched_policy ==
                     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
                ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
-               (dev->dqm->active_queue_count >= get_queues_num(dev->dqm)))) {
+               (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
                        pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
                        retval = -EPERM;
                        goto err_create_queue;
index 034655544122cfdf3ca04f608a2cd41b041cea95..5303877c081a080e94940cdc95372535fd0a6b4f 100644 (file)
@@ -1320,7 +1320,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
        dev->node_props.num_gws = (hws_gws_support &&
                dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
                amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
-       dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
+       dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
        dev->node_props.unique_id = gpu->unique_id;
 
        kfd_fill_mem_clk_max_info(dev);
index 6910ff732b7c5a54dd43849c805f8e704d671a7e..abc0eb4ac49360abd4b94f82cd0787976d7cf9c0 100644 (file)
@@ -123,7 +123,7 @@ struct kgd2kfd_shared_resources {
        uint32_t num_queue_per_pipe;
 
        /* Bit n == 1 means Queue n is available for KFD */
-       DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
+       DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
 
        /* SDMA doorbell assignments (SOC15 and later chips only). Only
         * specific doorbells are routed to each SDMA engine. Others