]> git.baikalelectronics.ru Git - kernel.git/commitdiff
x86/mm/32: Add support for 64-bit __get_user() on 32-bit kernels
authorBenjamin LaHaise <bcrl@kvack.org>
Wed, 9 Mar 2016 20:05:56 +0000 (15:05 -0500)
committerIngo Molnar <mingo@kernel.org>
Wed, 13 Apr 2016 11:01:03 +0000 (13:01 +0200)
The existing __get_user() implementation does not support fetching
64-bit values on 32-bit x86.  Implement this in a way that does not
generate any incorrect warnings as cautioned by Russell King.

Test code available at:

  http://www.kvack.org/~bcrl/x86_32-get_user.tar .

Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/uaccess.h

index a969ae607be8323578865285b27cb443451b4483..8b3fb76b489b8d2df25c7a9730e9a360bd183b3d 100644 (file)
@@ -333,7 +333,26 @@ do {                                                                       \
 } while (0)
 
 #ifdef CONFIG_X86_32
-#define __get_user_asm_u64(x, ptr, retval, errret)     (x) = __get_user_bad()
+#define __get_user_asm_u64(x, ptr, retval, errret)                     \
+({                                                                     \
+       __typeof__(ptr) __ptr = (ptr);                                  \
+       asm volatile(ASM_STAC "\n"                                      \
+                    "1:        movl %2,%%eax\n"                        \
+                    "2:        movl %3,%%edx\n"                        \
+                    "3: " ASM_CLAC "\n"                                \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "4:        mov %4,%0\n"                            \
+                    "  xorl %%eax,%%eax\n"                             \
+                    "  xorl %%edx,%%edx\n"                             \
+                    "  jmp 3b\n"                                       \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 4b)                               \
+                    _ASM_EXTABLE(2b, 4b)                               \
+                    : "=r" (retval), "=A"(x)                           \
+                    : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
+                      "i" (errret), "0" (retval));                     \
+})
+
 #define __get_user_asm_ex_u64(x, ptr)                  (x) = __get_user_bad()
 #else
 #define __get_user_asm_u64(x, ptr, retval, errret) \
@@ -420,7 +439,7 @@ do {                                                                        \
 #define __get_user_nocheck(x, ptr, size)                               \
 ({                                                                     \
        int __gu_err;                                                   \
-       unsigned long __gu_val;                                         \
+       __inttype(*(ptr)) __gu_val;                                     \
        __uaccess_begin();                                              \
        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
        __uaccess_end();                                                \