if (adev->asic_reset_res)
goto fail;
+
+ if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
+ adev->mmhub.funcs->reset_ras_error_count(adev);
} else {
task_barrier_full(&hive->tb);
}
}
- if (!r && amdgpu_ras_intr_triggered())
+ if (!r && amdgpu_ras_intr_triggered()) {
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (tmp_adev->mmhub.funcs &&
+ tmp_adev->mmhub.funcs->reset_ras_error_count)
+ tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
+ }
+
amdgpu_ras_intr_cleared();
+ }
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
if (need_full_reset) {
struct ras_debug_if data;
int ret = 0;
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_WARN("RAS WARN: error injection currently inaccessible\n");
+ return size;
+ }
+
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
if (ret)
return -EINVAL;
.head = obj->head,
};
+ if (amdgpu_ras_intr_triggered())
+ return snprintf(buf, PAGE_SIZE,
+ "Query currently inaccessible\n");
+
if (amdgpu_ras_error_query(obj->adev, &info))
return -EINVAL;