From e3f5ea7787827ea2cade23b870c3403b52393ce8 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 9 May 2017 18:00:43 +0100 Subject: [PATCH] x86, pmem: Fix cache flushing for iovec write < 8 bytes Commit 48b4791b30a6 added cache flushing for unaligned writes from an iovec, covering the first and last cache line of a >= 8 byte write and the first cache line of a < 8 byte write. But an unaligned write of 2-7 bytes can still cover two cache lines, so make sure we flush both in that case. Cc: Fixes: 48b4791b30a6 ("x86, pmem: fix broken __copy_user_nocache ...") Signed-off-by: Ben Hutchings Signed-off-by: Dan Williams --- arch/x86/include/asm/pmem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index d5a22bac99880..0ff8fe71b2554 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -98,7 +98,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, if (bytes < 8) { if (!IS_ALIGNED(dest, 4) || (bytes != 4)) - arch_wb_cache_pmem(addr, 1); + arch_wb_cache_pmem(addr, bytes); } else { if (!IS_ALIGNED(dest, 8)) { dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); -- 2.39.5