return 0;
}
+static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ cancel_delayed_work_sync(&ring->sched.work_tdr);
+ }
+}
+
/**
* amdgpu_pci_error_detected - Called when a PCI error is detected.
* @pdev: PCI device struct
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
- case pci_channel_io_frozen:
- /* Fatal error, prepare for slot reset */
- amdgpu_device_lock_adev(adev);
+ /* Fatal error, prepare for slot reset */
+ case pci_channel_io_frozen:
+ /*
+ * Cancel and wait for all TDRs in progress if failing to
+ * set adev->in_gpu_reset in amdgpu_device_lock_adev
+ *
+ * Locking adev->reset_sem will prevent any external access
+ * to GPU during PCI error recovery
+ */
+ while (!amdgpu_device_lock_adev(adev, NULL))
+ amdgpu_cancel_all_tdr(adev);
+
+ /*
+ * Block any work scheduling as we do for regular GPU reset
+ * for the duration of the recovery
+ */
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ drm_sched_stop(&ring->sched, NULL);
+ }
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- amdgpu_device_unlock_adev(adev);
DRM_INFO("PCI error: resume callback!!\n");
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+
+ drm_sched_resubmit_jobs(&ring->sched);
+ drm_sched_start(&ring->sched, true);
+ }
+
+ amdgpu_device_unlock_adev(adev);
}