vm->clear_range(vm, vma_res->start, vma_res->vma_size);
if (vma_res->tlb)
- vma_invalidate_tlb(vm, *vma_res->tlb);
+ vma_invalidate_tlb(vm, vma_res->tlb);
}
static unsigned long pd_count(u64 size, int shift)
return err;
}
-void vma_invalidate_tlb(struct i915_address_space *vm, u32 tlb)
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
{
/*
* Before we release the pages that were bound by this vma, we
* the most recent TLB invalidation seqno, and if we have not yet
* flushed the TLBs upon release, perform a full invalidation.
*/
- WRITE_ONCE(tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
+ WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
}
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
dma_fence_put(unbind_fence);
unbind_fence = NULL;
}
- vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
+ vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
}
/*
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
-void vma_invalidate_tlb(struct i915_address_space *vm, u32 tlb);
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);