]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/amdgpu:fix gpu recover missing skipping(v2)
authorMonk Liu <Monk.Liu@amd.com>
Wed, 8 Nov 2017 06:35:04 +0000 (14:35 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 4 Dec 2017 21:41:46 +0000 (16:41 -0500)
if app close CTX right after IB submit, gpu recover
will fail to find out the entity behind this guilty
job thus lead to no job skipping for this guilty job.

to fix this corner case just move the increasement of
job->karma out of the entity iteration.

v2:
only do karma increasment if bad->s_priority != KERNEL
because we always consider KERNEL job be correct and always
want to recover an unfinished kernel job (sometimes kernel
job is interrupted by VF FLR or other GPU hang event)

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-By: Xiangliang Yu <Xiangliang.Yu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c

index 941b5920b97b0b4bc794f1a8b8f8d03f5f1aa14a..53ea7e12d219dbed6652d960dda1b0cf8d03a2a7 100644 (file)
@@ -463,7 +463,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_jo
        }
        spin_unlock(&sched->job_list_lock);
 
-       if (bad) {
+       if (bad && bad->s_priority != AMD_SCHED_PRIORITY_KERNEL) {
+               atomic_inc(&bad->karma);
                /* don't increase @bad's karma if it's from KERNEL RQ,
                 * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
                 * corrupt but keep in mind that kernel jobs always considered good.
@@ -474,7 +475,7 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_jo
                        spin_lock(&rq->lock);
                        list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
                                if (bad->s_fence->scheduled.context == entity->fence_context) {
-                                   if (atomic_inc_return(&bad->karma) > bad->sched->hang_limit)
+                                   if (atomic_read(&bad->karma) > bad->sched->hang_limit)
                                                if (entity->guilty)
                                                        atomic_set(entity->guilty, 1);
                                        break;