Current code skips load of spinlock address when cache is disabled. The
following call to spin_unlock stores into the random location that x0
points to.
Move spinlock address load earlier so that x0 is always valid on
spin_unlock call.
Change-Id: Iac640289725dce2518f2fed483d7d36ca748ffe8
Signed-off-by: Baruch Siach <baruch@tkos.co.il>
mov x4, x30 /* x3 and x4 are not clobbered by spin_lock() */
mov x3, #0 /* return value */
+ adrp x0, crash_console_spinlock
+ add x0, x0, :lo12:crash_console_spinlock
+
mrs x1, sctlr_el3
tst x1, #SCTLR_C_BIT
beq skip_spinlock /* can't synchronize when cache disabled */
-
- adrp x0, crash_console_spinlock
- add x0, x0, :lo12:crash_console_spinlock
bl spin_lock
skip_spinlock: