]> git.baikalelectronics.ru Git - kernel.git/commitdiff
ARM: report Spectre v2 status through sysfs
authorRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Fri, 11 Feb 2022 16:45:54 +0000 (16:45 +0000)
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Sat, 5 Mar 2022 10:41:22 +0000 (10:41 +0000)
As per other architectures, add support for reporting the Spectre
vulnerability status via sysfs CPU.

Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
arch/arm/include/asm/spectre.h [new file with mode: 0644]
arch/arm/kernel/Makefile
arch/arm/kernel/spectre.c [new file with mode: 0644]
arch/arm/mm/Kconfig
arch/arm/mm/proc-v7-bugs.c

diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h
new file mode 100644 (file)
index 0000000..8a9019e
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SPECTRE_H
+#define __ASM_SPECTRE_H
+
+enum {
+       SPECTRE_UNAFFECTED,
+       SPECTRE_MITIGATED,
+       SPECTRE_VULNERABLE,
+};
+
+enum {
+       __SPECTRE_V2_METHOD_BPIALL,
+       __SPECTRE_V2_METHOD_ICIALLU,
+       __SPECTRE_V2_METHOD_SMC,
+       __SPECTRE_V2_METHOD_HVC,
+};
+
+enum {
+       SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
+       SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
+       SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
+       SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
+};
+
+void spectre_v2_update_state(unsigned int state, unsigned int methods);
+
+#endif
index ae295a3bcfefddad015d56e79a4af37b80d66487..6ef3b535b7bf7764dd7b06816b05007516d0add0 100644 (file)
@@ -106,4 +106,6 @@ endif
 
 obj-$(CONFIG_HAVE_ARM_SMCCC)   += smccc-call.o
 
+obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o
+
 extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c
new file mode 100644 (file)
index 0000000..6f6dd1c
--- /dev/null
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/cpu.h>
+#include <linux/device.h>
+
+#include <asm/spectre.h>
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+static unsigned int spectre_v2_state;
+static unsigned int spectre_v2_methods;
+
+void spectre_v2_update_state(unsigned int state, unsigned int method)
+{
+       if (state > spectre_v2_state)
+               spectre_v2_state = state;
+       spectre_v2_methods |= method;
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       const char *method;
+
+       if (spectre_v2_state == SPECTRE_UNAFFECTED)
+               return sprintf(buf, "%s\n", "Not affected");
+
+       if (spectre_v2_state != SPECTRE_MITIGATED)
+               return sprintf(buf, "%s\n", "Vulnerable");
+
+       switch (spectre_v2_methods) {
+       case SPECTRE_V2_METHOD_BPIALL:
+               method = "Branch predictor hardening";
+               break;
+
+       case SPECTRE_V2_METHOD_ICIALLU:
+               method = "I-cache invalidation";
+               break;
+
+       case SPECTRE_V2_METHOD_SMC:
+       case SPECTRE_V2_METHOD_HVC:
+               method = "Firmware call";
+               break;
+
+       default:
+               method = "Multiple mitigations";
+               break;
+       }
+
+       return sprintf(buf, "Mitigation: %s\n", method);
+}
index 58afba346729960e2101f4d26f682f58ce187398..850329ea9bf8117ac3bf2e2bb4f63ae2c0e46620 100644 (file)
@@ -830,6 +830,7 @@ config CPU_BPREDICT_DISABLE
 
 config CPU_SPECTRE
        bool
+       select GENERIC_CPU_VULNERABILITIES
 
 config HARDEN_BRANCH_PREDICTOR
        bool "Harden the branch predictor against aliasing attacks" if EXPERT
index 114c05ab4dd919a26064eda1f395f1c8f4119ec1..e438e59bb63e6d2c879f34f9b154e4c5477b7825 100644 (file)
@@ -6,8 +6,35 @@
 #include <asm/cp15.h>
 #include <asm/cputype.h>
 #include <asm/proc-fns.h>
+#include <asm/spectre.h>
 #include <asm/system_misc.h>
 
+#ifdef CONFIG_ARM_PSCI
+static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+       struct arm_smccc_res res;
+
+       arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                            ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+
+       switch ((int)res.a0) {
+       case SMCCC_RET_SUCCESS:
+               return SPECTRE_MITIGATED;
+
+       case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+               return SPECTRE_UNAFFECTED;
+
+       default:
+               return SPECTRE_VULNERABLE;
+       }
+}
+#else
+static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+       return SPECTRE_VULNERABLE;
+}
+#endif
+
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
 
@@ -36,13 +63,60 @@ static void __maybe_unused call_hvc_arch_workaround_1(void)
        arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 }
 
-static void cpu_v7_spectre_init(void)
+static unsigned int spectre_v2_install_workaround(unsigned int method)
 {
        const char *spectre_v2_method = NULL;
        int cpu = smp_processor_id();
 
        if (per_cpu(harden_branch_predictor_fn, cpu))
-               return;
+               return SPECTRE_MITIGATED;
+
+       switch (method) {
+       case SPECTRE_V2_METHOD_BPIALL:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_bpiall;
+               spectre_v2_method = "BPIALL";
+               break;
+
+       case SPECTRE_V2_METHOD_ICIALLU:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_iciallu;
+               spectre_v2_method = "ICIALLU";
+               break;
+
+       case SPECTRE_V2_METHOD_HVC:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       call_hvc_arch_workaround_1;
+               cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
+               spectre_v2_method = "hypervisor";
+               break;
+
+       case SPECTRE_V2_METHOD_SMC:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       call_smc_arch_workaround_1;
+               cpu_do_switch_mm = cpu_v7_smc_switch_mm;
+               spectre_v2_method = "firmware";
+               break;
+       }
+
+       if (spectre_v2_method)
+               pr_info("CPU%u: Spectre v2: using %s workaround\n",
+                       smp_processor_id(), spectre_v2_method);
+
+       return SPECTRE_MITIGATED;
+}
+#else
+static unsigned int spectre_v2_install_workaround(unsigned int method)
+{
+       pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n");
+
+       return SPECTRE_VULNERABLE;
+}
+#endif
+
+static void cpu_v7_spectre_v2_init(void)
+{
+       unsigned int state, method = 0;
 
        switch (read_cpuid_part()) {
        case ARM_CPU_PART_CORTEX_A8:
@@ -51,68 +125,57 @@ static void cpu_v7_spectre_init(void)
        case ARM_CPU_PART_CORTEX_A17:
        case ARM_CPU_PART_CORTEX_A73:
        case ARM_CPU_PART_CORTEX_A75:
-               per_cpu(harden_branch_predictor_fn, cpu) =
-                       harden_branch_predictor_bpiall;
-               spectre_v2_method = "BPIALL";
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_BPIALL;
                break;
 
        case ARM_CPU_PART_CORTEX_A15:
        case ARM_CPU_PART_BRAHMA_B15:
-               per_cpu(harden_branch_predictor_fn, cpu) =
-                       harden_branch_predictor_iciallu;
-               spectre_v2_method = "ICIALLU";
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_ICIALLU;
                break;
 
-#ifdef CONFIG_ARM_PSCI
        case ARM_CPU_PART_BRAHMA_B53:
                /* Requires no workaround */
+               state = SPECTRE_UNAFFECTED;
                break;
+
        default:
                /* Other ARM CPUs require no workaround */
-               if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
+               if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
+                       state = SPECTRE_UNAFFECTED;
                        break;
+               }
+
                fallthrough;
-               /* Cortex A57/A72 require firmware workaround */
-       case ARM_CPU_PART_CORTEX_A57:
-       case ARM_CPU_PART_CORTEX_A72: {
-               struct arm_smccc_res res;
 
-               arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
-                                    ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-               if ((int)res.a0 != 0)
-                       return;
+       /* Cortex A57/A72 require firmware workaround */
+       case ARM_CPU_PART_CORTEX_A57:
+       case ARM_CPU_PART_CORTEX_A72:
+               state = spectre_v2_get_cpu_fw_mitigation_state();
+               if (state != SPECTRE_MITIGATED)
+                       break;
 
                switch (arm_smccc_1_1_get_conduit()) {
                case SMCCC_CONDUIT_HVC:
-                       per_cpu(harden_branch_predictor_fn, cpu) =
-                               call_hvc_arch_workaround_1;
-                       cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
-                       spectre_v2_method = "hypervisor";
+                       method = SPECTRE_V2_METHOD_HVC;
                        break;
 
                case SMCCC_CONDUIT_SMC:
-                       per_cpu(harden_branch_predictor_fn, cpu) =
-                               call_smc_arch_workaround_1;
-                       cpu_do_switch_mm = cpu_v7_smc_switch_mm;
-                       spectre_v2_method = "firmware";
+                       method = SPECTRE_V2_METHOD_SMC;
                        break;
 
                default:
+                       state = SPECTRE_VULNERABLE;
                        break;
                }
        }
-#endif
-       }
 
-       if (spectre_v2_method)
-               pr_info("CPU%u: Spectre v2: using %s workaround\n",
-                       smp_processor_id(), spectre_v2_method);
-}
-#else
-static void cpu_v7_spectre_init(void)
-{
+       if (state == SPECTRE_MITIGATED)
+               state = spectre_v2_install_workaround(method);
+
+       spectre_v2_update_state(state, method);
 }
-#endif
 
 static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
                                                  u32 mask, const char *msg)
@@ -142,16 +205,16 @@ static bool check_spectre_auxcr(bool *warned, u32 bit)
 void cpu_v7_ca8_ibe(void)
 {
        if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
-               cpu_v7_spectre_init();
+               cpu_v7_spectre_v2_init();
 }
 
 void cpu_v7_ca15_ibe(void)
 {
        if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
-               cpu_v7_spectre_init();
+               cpu_v7_spectre_v2_init();
 }
 
 void cpu_v7_bugs_init(void)
 {
-       cpu_v7_spectre_init();
+       cpu_v7_spectre_v2_init();
 }