return gaudi->events_stat;
}
-static void gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
+static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
u32 flags)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
hdev->hard_reset_pending)
- return;
-
- mutex_lock(&hdev->mmu_cache_lock);
+ return 0;
if (hdev->pldm)
timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+ mutex_lock(&hdev->mmu_cache_lock);
+
/* L0 & L1 invalidation */
WREG32(mmSTLB_INV_PS, 2);
WREG32(mmSTLB_INV_SET, 0);
- if (rc)
- dev_notice_ratelimited(hdev->dev,
- "Timeout when waiting for MMU cache invalidation\n");
-
mutex_unlock(&hdev->mmu_cache_lock);
+
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
}
-static void gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
+static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
bool is_hard, u32 asid, u64 va, u64 size)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
hdev->hard_reset_pending)
- return;
+ return 0;
mutex_lock(&hdev->mmu_cache_lock);
1000,
timeout_usec);
- if (rc)
- dev_notice_ratelimited(hdev->dev,
- "Timeout when waiting for MMU cache invalidation\n");
-
mutex_unlock(&hdev->mmu_cache_lock);
+
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
}
static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev,
goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
}
-static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
+static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
u32 flags)
{
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
hdev->hard_reset_pending)
- return;
+ return 0;
/* no need in L1 only invalidation in Goya */
if (!is_hard)
- return;
+ return 0;
if (hdev->pldm)
timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
mutex_unlock(&hdev->mmu_cache_lock);
- if (rc)
- dev_notice_ratelimited(hdev->dev,
- "Timeout when waiting for MMU cache invalidation\n");
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
}
-static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
- bool is_hard, u32 asid, u64 va, u64 size)
+static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
+ bool is_hard, u32 asid, u64 va, u64 size)
{
struct goya_device *goya = hdev->asic_specific;
u32 status, timeout_usec, inv_data, pi;
if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
hdev->hard_reset_pending)
- return;
+ return 0;
/* no need in L1 only invalidation in Goya */
if (!is_hard)
- return;
+ return 0;
if (hdev->pldm)
timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
mutex_unlock(&hdev->mmu_cache_lock);
- if (rc)
- dev_notice_ratelimited(hdev->dev,
- "Timeout when waiting for MMU cache invalidation\n");
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
}
int goya_send_heartbeat(struct hl_device *hdev)
u32 *size);
u64 (*read_pte)(struct hl_device *hdev, u64 addr);
void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
- void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
+ int (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
u32 flags);
- void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
+ int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
void (*enable_clock_gating)(struct hl_device *hdev);
* with huge pages.
* @dram_va_range: holds available virtual addresses for DRAM mappings.
* @mem_hash_lock: protects the mem_hash.
- * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the
- * MMU hash or walking the PGT requires talking this lock
+ * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
+ * MMU hash or walking the PGT requires talking this lock.
* @debugfs_list: node in debugfs list of contexts.
* @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
* to user so user could inquire about CS. It is used as
vm_type = (enum vm_type_t *) userptr;
hint_addr = args->map_host.hint_addr;
+ handle = phys_pg_pack->handle;
} else {
handle = lower_32_bits(args->map_device.handle);
goto map_err;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
+ rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
mutex_unlock(&ctx->mmu_lock);
+ if (rc) {
+ dev_err(hdev->dev,
+ "mapping handle %u failed due to MMU cache invalidation\n",
+ handle);
+ goto map_err;
+ }
+
ret_vaddr += phys_pg_pack->offset;
hnode->ptr = vm_type;
* at the loop end rather than for each iteration
*/
if (!ctx_free)
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, *vm_type);
+ rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
+ *vm_type);
mutex_unlock(&ctx->mmu_lock);
/*
- * No point in maintaining the free VA block list if the context is
- * closing as the list will be freed anyway
+ * If the context is closing we don't need to check for the MMU cache
+ * invalidation return code and update the VA free list as in this flow
+ * we invalidate the MMU cache outside of this unmap function and the VA
+ * free list will be freed anyway.
*/
if (!ctx_free) {
- rc = add_va_block(hdev, va_range, vaddr,
- vaddr + phys_pg_pack->total_size - 1);
+ int tmp_rc;
+
if (rc)
+ dev_err(hdev->dev,
+ "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
+ vaddr);
+
+ tmp_rc = add_va_block(hdev, va_range, vaddr,
+ vaddr + phys_pg_pack->total_size - 1);
+ if (tmp_rc) {
dev_warn(hdev->dev,
"add va block failed for vaddr: 0x%llx\n",
vaddr);
+ if (!rc)
+ rc = tmp_rc;
+ }
}
atomic_dec(&phys_pg_pack->mapping_cnt);
dma_unmap_host_va(hdev, userptr);
}
- return 0;
+ return rc;
mapping_cnt_err:
if (is_userptr)