]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/amdgpu: Add amdgpu suspend-resume code path under SRIOV
authorBokun Zhang <Bokun.Zhang@amd.com>
Tue, 27 Sep 2022 16:30:04 +0000 (00:30 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 27 Sep 2022 22:03:36 +0000 (18:03 -0400)
- Under SRIOV, we need to send REQ_GPU_FINI to the hypervisor
  during the suspend time. Furthermore, we cannot request a
  mode 1 reset under SRIOV as VF. Therefore, we will skip it
  as it is called in suspend_noirq() function.

- In the resume code path, we need to send REQ_GPU_INIT to the
  hypervisor and also resume PSP IP block under SRIOV.

Signed-off-by: Bokun Zhang <Bokun.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 130060834b4e95b1f96b92d7e554bf16a4adcd67..48bd660ddb858eedd346d68aee57bc77bc7a04e4 100644 (file)
@@ -1050,6 +1050,10 @@ bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
 {
        if (adev->flags & AMD_IS_APU)
                return false;
+
+       if (amdgpu_sriov_vf(adev))
+               return false;
+
        return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
 }
 
index be7aff2d4a57efd7ba345a7b59526d4889dce88a..25e1f5ed7ead1476fd822561dc92d6677ce1ad1b 100644 (file)
@@ -3152,7 +3152,8 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
                        continue;
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
-                   adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+                   adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+                   (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
 
                        r = adev->ip_blocks[i].version->funcs->resume(adev);
                        if (r) {
@@ -4064,12 +4065,20 @@ static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 {
        struct amdgpu_device *adev = drm_to_adev(dev);
+       int r = 0;
 
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
        adev->in_suspend = true;
 
+       if (amdgpu_sriov_vf(adev)) {
+               amdgpu_virt_fini_data_exchange(adev);
+               r = amdgpu_virt_request_full_gpu(adev, false);
+               if (r)
+                       return r;
+       }
+
        if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
                DRM_WARN("smart shift update failed\n");
 
@@ -4093,6 +4102,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 
        amdgpu_device_ip_suspend_phase2(adev);
 
+       if (amdgpu_sriov_vf(adev))
+               amdgpu_virt_release_full_gpu(adev, false);
+
        return 0;
 }
 
@@ -4111,6 +4123,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
        struct amdgpu_device *adev = drm_to_adev(dev);
        int r = 0;
 
+       if (amdgpu_sriov_vf(adev)) {
+               r = amdgpu_virt_request_full_gpu(adev, true);
+               if (r)
+                       return r;
+       }
+
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
@@ -4125,6 +4143,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
        }
 
        r = amdgpu_device_ip_resume(adev);
+
+       /* no matter what r is, always need to properly release full GPU */
+       if (amdgpu_sriov_vf(adev)) {
+               amdgpu_virt_init_data_exchange(adev);
+               amdgpu_virt_release_full_gpu(adev, true);
+       }
+
        if (r) {
                dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
                return r;