void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
struct cpuinfo_arm64 *boot);
-void init_this_cpu_offset(void);
-
#endif /* __ASM_CPU_H */
bl __pi_memset
dsb ishst // Make zero page visible to PTW
- bl init_this_cpu_offset
-
#ifdef CONFIG_KASAN
bl kasan_early_init
#endif
ptrauth_keys_init_cpu x2, x3, x4, x5
#endif
- bl init_this_cpu_offset
b secondary_start_kernel
SYM_FUNC_END(__secondary_switched)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
set_cpu_logical_map(0, mpidr);
+ /*
+ * clear __my_cpu_offset on boot CPU to avoid hang caused by
+ * using percpu variable early, for example, lockdep will
+ * access percpu variable inside lock_release
+ */
+ set_my_cpu_offset(0);
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
(unsigned long)mpidr, read_cpuid_id());
}
}
EXPORT_SYMBOL_GPL(cpu_logical_map);
-void noinstr init_this_cpu_offset(void)
-{
- unsigned int cpu = task_cpu(current);
- set_my_cpu_offset(per_cpu_offset(cpu));
-}
-
void __init __no_sanitize_address setup_arch(char **cmdline_p)
{
init_mm.start_code = (unsigned long) _text;
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
const struct cpu_operations *ops;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
+
+ cpu = task_cpu(current);
+ set_my_cpu_offset(per_cpu_offset(cpu));
/*
* All kernel threads share the same mm context; grab a
void __init smp_prepare_boot_cpu(void)
{
- /*
- * Now that setup_per_cpu_areas() has allocated the runtime per-cpu
- * areas it is only safe to read the CPU0 boot-time area, and we must
- * reinitialize the offset to point to the runtime area.
- */
- init_this_cpu_offset();
-
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
cpuinfo_store_boot_cpu();
/*