__func__, dst, src, rem))
BUG();
}
-
-static inline void arch_invalidate_pmem(void *addr, size_t size)
-{
- clflush_cache_range(addr, size);
-}
#endif /* CONFIG_ARCH_HAS_PMEM_API */
#endif /* __ASM_X86_PMEM_H__ */
}
EXPORT_SYMBOL_GPL(clflush_cache_range);
+void arch_invalidate_pmem(void *addr, size_t size)
+{
+ clflush_cache_range(addr, size);
+}
+EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
+
static void __cpa_flush_all(void *arg)
{
unsigned long cache = (unsigned long)arg;
#include <linux/sizes.h>
#include <linux/pmem.h>
#include "nd-core.h"
+#include "pmem.h"
#include "pfn.h"
#include "btt.h"
#include "nd.h"
cleared /= 512;
badblocks_clear(&nsio->bb, sector, cleared);
}
- invalidate_pmem(nsio->addr + offset, size);
+ arch_invalidate_pmem(nsio->addr + offset, size);
} else
rc = -EIO;
}
badblocks_clear(&pmem->bb, sector, cleared);
}
- invalidate_pmem(pmem->virt_addr + offset, len);
+ arch_invalidate_pmem(pmem->virt_addr + offset, len);
return rc;
}
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
+void arch_invalidate_pmem(void *addr, size_t size);
#else
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
}
+static inline void arch_invalidate_pmem(void *addr, size_t size)
+{
+}
#endif
/* this definition is in it's own header for tools/testing/nvdimm to consume */
{
BUG();
}
-
-static inline void arch_invalidate_pmem(void *addr, size_t size)
-{
- BUG();
-}
#endif
static inline bool arch_has_pmem_api(void)
else
memcpy(dst, src, n);
}
-
-/**
- * invalidate_pmem - flush a pmem range from the cache hierarchy
- * @addr: virtual start address
- * @size: bytes to invalidate (internally aligned to cache line size)
- *
- * For platforms that support clearing poison this flushes any poisoned
- * ranges out of the cache
- */
-static inline void invalidate_pmem(void *addr, size_t size)
-{
- if (arch_has_pmem_api())
- arch_invalidate_pmem(addr, size);
-}
#endif /* __PMEM_H__ */