aarch32 CPUs speculatively execute instructions following a
ERET as if it was not a jump instruction. This could lead to
cache-based side channel vulnerabilities. The software fix is
to place barrier instructions following ERET.
The counterpart patch for aarch64 is merged:
https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=
f461fe346b728d0e88142fd7b8f2816415af18bc
Change-Id: I2aa3105bee0b92238f389830b3a3b8650f33af3d
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
add r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET
ldm r8, {r0, r1, r2, r3}
- eret
+ exception_return
endfunc bl1_aarch32_smc_handler
/* -----------------------------------------------------
add r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET
ldm r8, {r0, r1, r2, r3}
- eret
+ exception_return
endfunc bl2_run_next_image
#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
/*
+ * Macro for mitigating against speculative execution.
* ARMv7 cores without Virtualization extension do not support the
* eret instruction.
*/
- .macro eret
+ .macro exception_return
movs pc, lr
+ dsb nsh
+ isb
+ .endm
+
+#else
+ /*
+ * Macro for mitigating against speculative execution beyond ERET.
+ */
+ .macro exception_return
+ eret
+ dsb nsh
+ isb
.endm
#endif
/* Restore the rest of the general purpose registers */
ldm r0, {r0-r12}
- eret
+ exception_return
.endm
#endif /* SMCCC_MACROS_S */