]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/sched: implement and export drm_sched_pick_best
authorNirmoy Das <nirmoy.das@amd.com>
Fri, 13 Mar 2020 10:39:27 +0000 (11:39 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 16 Mar 2020 20:21:32 +0000 (16:21 -0400)
Remove drm_sched_entity_get_free_sched() and use the logic of picking
the least loaded drm scheduler from a drm scheduler list to implement
drm_sched_pick_best(). This patch also exports drm_sched_pick_best() so
that it can be utilized by other drm drivers.

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_main.c
include/drm/gpu_scheduler.h

index d631521a96798e1a20a9ee0f30da0ad44a656a14..c803e14eed91a867d63f4b3015992f7a08261f83 100644 (file)
@@ -137,38 +137,6 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
        return true;
 }
 
-/**
- * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
- *
- * @entity: scheduler entity
- *
- * Return the pointer to the rq with least load.
- */
-static struct drm_sched_rq *
-drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
-{
-       struct drm_sched_rq *rq = NULL;
-       unsigned int min_jobs = UINT_MAX, num_jobs;
-       int i;
-
-       for (i = 0; i < entity->num_sched_list; ++i) {
-               struct drm_gpu_scheduler *sched = entity->sched_list[i];
-
-               if (!entity->sched_list[i]->ready) {
-                       DRM_WARN("sched%s is not ready, skipping", sched->name);
-                       continue;
-               }
-
-               num_jobs = atomic_read(&sched->num_jobs);
-               if (num_jobs < min_jobs) {
-                       min_jobs = num_jobs;
-                       rq = &entity->sched_list[i]->sched_rq[entity->priority];
-               }
-       }
-
-       return rq;
-}
-
 /**
  * drm_sched_entity_flush - Flush a context entity
  *
@@ -479,6 +447,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
 {
        struct dma_fence *fence;
+       struct drm_gpu_scheduler *sched;
        struct drm_sched_rq *rq;
 
        if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
@@ -489,7 +458,8 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
                return;
 
        spin_lock(&entity->rq_lock);
-       rq = drm_sched_entity_get_free_sched(entity);
+       sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
+       rq = sched ? &sched->sched_rq[entity->priority] : NULL;
        if (rq != entity->rq) {
                drm_sched_rq_remove_entity(entity->rq, entity);
                entity->rq = rq;
index f4ac38b435f73744e41a29c563e1c4506ac9d21f..a18eabf692e4475d408b2d162ba186f2f1043b79 100644 (file)
@@ -696,6 +696,42 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
        return job;
 }
 
+/**
+ * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
+ * @sched_list: list of drm_gpu_schedulers
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list
+ *
+ * Returns pointer of the sched with the least load or NULL if none of the
+ * drm_gpu_schedulers are ready
+ */
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+                    unsigned int num_sched_list)
+{
+       struct drm_gpu_scheduler *sched, *picked_sched = NULL;
+       int i;
+       unsigned int min_jobs = UINT_MAX, num_jobs;
+
+       for (i = 0; i < num_sched_list; ++i) {
+               sched = sched_list[i];
+
+               if (!sched->ready) {
+                       DRM_WARN("scheduler %s is not ready, skipping",
+                                sched->name);
+                       continue;
+               }
+
+               num_jobs = atomic_read(&sched->num_jobs);
+               if (num_jobs < min_jobs) {
+                       min_jobs = num_jobs;
+                       picked_sched = sched;
+               }
+       }
+
+       return picked_sched;
+}
+EXPORT_SYMBOL(drm_sched_pick_best);
+
 /**
  * drm_sched_blocked - check if the scheduler is blocked
  *
index ae39eacee2508ddd9a3e6d6e512ec4512b12d48d..26b04ff62676612aee4a507255710db4177853ba 100644 (file)
@@ -341,5 +341,8 @@ void drm_sched_fence_finished(struct drm_sched_fence *fence);
 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
                                unsigned long remaining);
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+                    unsigned int num_sched_list);
 
 #endif