]> git.baikalelectronics.ru Git - kernel.git/commitdiff
x86/paravirt: Fix bool return type for PVOP_CALL()
authorPeter Zijlstra <peterz@infradead.org>
Thu, 8 Dec 2016 15:42:15 +0000 (16:42 +0100)
committerIngo Molnar <mingo@kernel.org>
Sun, 11 Dec 2016 12:09:20 +0000 (13:09 +0100)
Commit:

  d625bcd6b096 ("x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()")

introduced a paravirt op with bool return type [*]

It turns out that the PVOP_CALL*() macros miscompile when rettype is
bool. Code that looked like:

   83 ef 01                sub    $0x1,%edi
   ff 15 32 a0 d8 00       callq  *0xd8a032(%rip)        # ffffffff81e28120 <pv_lock_ops+0x20>
   84 c0                   test   %al,%al

ended up looking like so after PVOP_CALL1() was applied:

   83 ef 01                sub    $0x1,%edi
   48 63 ff                movslq %edi,%rdi
   ff 14 25 20 81 e2 81    callq  *0xffffffff81e28120
   48 85 c0                test   %rax,%rax

Note how it tests the whole of %rax, even though a typical bool return
function only sets %al, like:

  0f 95 c0                setne  %al
  c3                      retq

This is because ____PVOP_CALL() does:

__ret = (rettype)__eax;

and while regular integer type casts truncate the result, a cast to
bool tests for any !0 value. Fix this by explicitly truncating to
sizeof(rettype) before casting.

[*] The actual bug should've been exposed in commit:
      458397010062 ("locking/core, x86/paravirt: Implement vcpu_is_preempted(cpu) for KVM and Xen guests")
    but that didn't properly implement the paravirt call.

Reported-by: kernel test robot <xiaolong.ye@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Chris Wright <chrisw@sous-sol.org>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Anvin <hpa@zytor.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: d625bcd6b096 ("x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()")
Link: http://lkml.kernel.org/r/20161208154349.346057680@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/paravirt_types.h

index 2614bd7a7839340a0d891683b32e1338662588d3..3f2bc0f0d3e89f2ced6a75b699410519eeab3177 100644 (file)
@@ -510,6 +510,18 @@ int paravirt_disable_iospace(void);
 #define PVOP_TEST_NULL(op)     ((void)op)
 #endif
 
+#define PVOP_RETMASK(rettype)                                          \
+       ({      unsigned long __mask = ~0UL;                            \
+               switch (sizeof(rettype)) {                              \
+               case 1: __mask =       0xffUL; break;                   \
+               case 2: __mask =     0xffffUL; break;                   \
+               case 4: __mask = 0xffffffffUL; break;                   \
+               default: break;                                         \
+               }                                                       \
+               __mask;                                                 \
+       })
+
+
 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,                \
                      pre, post, ...)                                   \
        ({                                                              \
@@ -537,7 +549,7 @@ int paravirt_disable_iospace(void);
                                       paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
                                     : "memory", "cc" extra_clbr);      \
-                       __ret = (rettype)__eax;                         \
+                       __ret = (rettype)(__eax & PVOP_RETMASK(rettype));       \
                }                                                       \
                __ret;                                                  \
        })