}
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
- unsigned long address, unsigned long pages, int ih, int gl)
+ unsigned long address, unsigned long pages, int ih)
{
struct qi_desc desc;
- if (pages == -1) {
- /* For global kernel pages we have to flush them in *all* PASIDs
- * because that's the only option the hardware gives us. Despite
- * the fact that they are actually only accessible through one. */
- if (gl)
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
- QI_EIOTLB_TYPE;
- else
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
- QI_EIOTLB_TYPE;
+ /*
+ * Do PASID granu IOTLB invalidation if page selective capability is
+ * not available.
+ */
+ if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
+ desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
+ QI_EIOTLB_DID(sdev->did) |
+ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+ QI_EIOTLB_TYPE;
desc.qw1 = 0;
} else {
int mask = ilog2(__roundup_pow_of_two(pages));
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
QI_EIOTLB_TYPE;
desc.qw1 = QI_EIOTLB_ADDR(address) |
- QI_EIOTLB_GL(gl) |
QI_EIOTLB_IH(ih) |
QI_EIOTLB_AM(mask);
}
}
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
- unsigned long pages, int ih, int gl)
+ unsigned long pages, int ih)
{
struct intel_svm_dev *sdev;
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
+ intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
rcu_read_unlock();
}
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
intel_flush_svm_range(svm, start,
- (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
+ (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
}
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) {
intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
}
rcu_read_unlock();
* large and has to be physically contiguous. So it's
* hard to be as defensive as we might like. */
intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {