]> git.baikalelectronics.ru Git - kernel.git/commitdiff
arm64: kexec: load from kimage prior to clobbering
authorMark Rutland <mark.rutland@arm.com>
Mon, 16 May 2022 16:07:35 +0000 (17:07 +0100)
committerWill Deacon <will@kernel.org>
Tue, 17 May 2022 13:25:35 +0000 (14:25 +0100)
In arm64_relocate_new_kernel() we load some fields out of the kimage
structure after relocation has occurred. As the kimage structure isn't
allocated to be relocation-safe, it may be clobbered during relocation,
and we may load junk values out of the structure.

Due to this, kexec may fail when the kimage allocation happens to fall
within a PA range that an object will be relocated to. This has been
observed to occur for regular kexec on a QEMU TCG 'virt' machine with
2GiB of RAM, where the PA range of the new kernel image overlaps the
kimage structure.

Avoid this by ensuring we load all values from the kimage structure
prior to relocation.

I've tested this atop v5.16 and v5.18-rc6.

Fixes: 878fdbd70486 ("arm64: kexec: pass kimage as the only argument to relocation function")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Link: https://lore.kernel.org/r/20220516160735.731404-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/kernel/relocate_kernel.S

index f0a3df9e18a32158fed3f3a49ec7e67733cd7407..413f899e4ac639414dcfaf2e5f0a7152fc55b938 100644 (file)
  * safe memory that has been set up to be preserved during the copy operation.
  */
 SYM_CODE_START(arm64_relocate_new_kernel)
+       /*
+        * The kimage structure isn't allocated specially and may be clobbered
+        * during relocation. We must load any values we need from it prior to
+        * any relocation occurring.
+        */
+       ldr     x28, [x0, #KIMAGE_START]
+       ldr     x27, [x0, #KIMAGE_ARCH_EL2_VECTORS]
+       ldr     x26, [x0, #KIMAGE_ARCH_DTB_MEM]
+
        /* Setup the list loop variables. */
        ldr     x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
        ldr     x17, [x0, #KIMAGE_ARCH_TTBR1]   /* x17 = linear map copy */
@@ -72,21 +81,20 @@ SYM_CODE_START(arm64_relocate_new_kernel)
        ic      iallu
        dsb     nsh
        isb
-       ldr     x4, [x0, #KIMAGE_START]                 /* relocation start */
-       ldr     x1, [x0, #KIMAGE_ARCH_EL2_VECTORS]      /* relocation start */
-       ldr     x0, [x0, #KIMAGE_ARCH_DTB_MEM]          /* dtb address */
        turn_off_mmu x12, x13
 
        /* Start new image. */
-       cbz     x1, .Lel1
-       mov     x1, x4                          /* relocation start */
-       mov     x2, x                         /* dtb address */
+       cbz     x27, .Lel1
+       mov     x1, x28                         /* kernel entry point */
+       mov     x2, x26                         /* dtb address */
        mov     x3, xzr
        mov     x4, xzr
        mov     x0, #HVC_SOFT_RESTART
        hvc     #0                              /* Jumps from el2 */
 .Lel1:
+       mov     x0, x26                         /* dtb address */
+       mov     x1, xzr
        mov     x2, xzr
        mov     x3, xzr
-       br      x                             /* Jumps from el1 */
+       br      x28                             /* Jumps from el1 */
 SYM_CODE_END(arm64_relocate_new_kernel)