}
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
- struct resource *res, int target_node, unsigned int align,
- unsigned long long pfn_flags)
+ struct resource *res, int target_node, unsigned int align)
{
struct dax_region *dax_region;
dev_set_drvdata(parent, dax_region);
memcpy(&dax_region->res, res, sizeof(*res));
- dax_region->pfn_flags = pfn_flags;
kref_init(&dax_region->kref);
dax_region->id = region_id;
dax_region->align = align;
struct dax_region;
void dax_region_put(struct dax_region *dax_region);
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
- struct resource *res, int target_node, unsigned int align,
- unsigned long long flags);
+ struct resource *res, int target_node, unsigned int align);
enum dev_dax_subsys {
DEV_DAX_BUS,
* @dev: parent device backing this region
* @align: allocation and mapping alignment for child dax devices
* @res: physical address range of the region
- * @pfn_flags: identify whether the pfns are paged back or not
*/
struct dax_region {
int id;
struct device *dev;
unsigned int align;
struct resource res;
- unsigned long long pfn_flags;
};
/**
return -EINVAL;
}
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
- && (vma->vm_flags & VM_DONTCOPY) == 0) {
- dev_info_ratelimited(dev,
- "%s: %s: fail, dax range requires MADV_DONTFORK\n",
- current->comm, func);
- return -EINVAL;
- }
-
if (!vma_is_dax(vma)) {
dev_info_ratelimited(dev,
"%s: %s: fail, vma is not DAX capable\n",
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+ *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
}
return VM_FAULT_SIGBUS;
}
- /* dax pmd mappings require pfn_t_devmap() */
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
- dev_dbg(dev, "region lacks devmap flags\n");
- return VM_FAULT_SIGBUS;
- }
-
if (fault_size < dax_region->align)
return VM_FAULT_SIGBUS;
else if (fault_size > dax_region->align)
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+ *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
return VM_FAULT_SIGBUS;
}
- /* dax pud mappings require pfn_t_devmap() */
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
- dev_dbg(dev, "region lacks devmap flags\n");
- return VM_FAULT_SIGBUS;
- }
-
if (fault_size < dax_region->align)
return VM_FAULT_SIGBUS;
else if (fault_size > dax_region->align)
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+ *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
memcpy(&pgmap.res, res, sizeof(*res));
dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node,
- PMD_SIZE, PFN_DEV|PFN_MAP);
+ PMD_SIZE);
if (!dax_region)
return -ENOMEM;
memcpy(&res, &pgmap.res, sizeof(res));
res.start += offset;
dax_region = alloc_dax_region(dev, region_id, &res,
- nd_region->target_node, le32_to_cpu(pfn_sb->align),
- PFN_DEV|PFN_MAP);
+ nd_region->target_node, le32_to_cpu(pfn_sb->align));
if (!dax_region)
return ERR_PTR(-ENOMEM);