]> git.baikalelectronics.ru Git - arm-tf.git/commitdiff
refactor(amu): use new AMU feature check routines
authorAndre Przywara <andre.przywara@arm.com>
Fri, 3 Mar 2023 10:30:06 +0000 (10:30 +0000)
committerManish Pandey <manish.pandey2@arm.com>
Mon, 27 Mar 2023 18:36:15 +0000 (19:36 +0100)
The AMU extension code was using its own feature detection routines.
Replace them with the generic CPU feature handlers (defined in
arch_features.h), which get updated to cover the v1p1 variant as well.

Change-Id: I8540f1e745d7b02a25a6c6cdf2a39d6f5e21f2aa
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
common/feat_detect.c
include/arch/aarch32/arch_features.h
include/arch/aarch64/arch_features.h
include/lib/extensions/amu.h
lib/el3_runtime/aarch32/context_mgmt.c
lib/el3_runtime/aarch64/context_mgmt.c
lib/extensions/amu/aarch32/amu.c
lib/extensions/amu/aarch64/amu.c
plat/arm/board/fvp/platform.mk

index ba8c82c2cb2703cd08a8b16b5aa476ecd298cae8..12cf1263851691b826368bd3f8b6ce968316b667 100644 (file)
@@ -112,16 +112,6 @@ static void read_feat_bti(void)
 #endif
 }
 
-/***********************************************
- * Feature : FEAT_AMUv1p1 (AMU Extensions v1.1)
- **********************************************/
-static void read_feat_amuv1p1(void)
-{
-#if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_ALWAYS)
-       feat_detect_panic(is_armv8_6_feat_amuv1p1_present(), "AMUv1p1");
-#endif
-}
-
 /**************************************************
  * Feature : FEAT_RME (Realm Management Extension)
  *************************************************/
@@ -205,7 +195,8 @@ void detect_arch_features(void)
        read_feat_rng_trap();
 
        /* v8.6 features */
-       read_feat_amuv1p1();
+       check_feature(ENABLE_FEAT_AMUv1p1, read_feat_amu_id_field(),
+                     "AMUv1p1", 2, 2);
        check_feature(ENABLE_FEAT_FGT, read_feat_fgt_id_field(), "FGT", 1, 1);
        check_feature(ENABLE_FEAT_ECV, read_feat_ecv_id_field(), "ECV", 1, 2);
        check_feature(ENABLE_FEAT_TWED, read_feat_twed_id_field(),
index 252b40734d672b753b80105637cc026c8d420d7d..7c25b99ac001027f10e6ada66e35b23873070e2b 100644 (file)
@@ -25,6 +25,37 @@ static inline bool is_armv8_2_ttcnp_present(void)
        return ISOLATE_FIELD(read_id_mmfr4(), ID_MMFR4_CNP) != 0U;
 }
 
+static unsigned int read_feat_amu_id_field(void)
+{
+       return ISOLATE_FIELD(read_id_pfr0(), ID_PFR0_AMU);
+}
+
+static inline bool is_feat_amu_supported(void)
+{
+       if (ENABLE_FEAT_AMU == FEAT_STATE_DISABLED) {
+               return false;
+       }
+
+       if (ENABLE_FEAT_AMU == FEAT_STATE_ALWAYS) {
+               return true;
+       }
+
+       return read_feat_amu_id_field() >= ID_PFR0_AMU_V1;
+}
+
+static inline bool is_feat_amuv1p1_supported(void)
+{
+       if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_DISABLED) {
+               return false;
+       }
+
+       if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_ALWAYS) {
+               return true;
+       }
+
+       return read_feat_amu_id_field() >= ID_PFR0_AMU_V1P1;
+}
+
 static inline unsigned int read_feat_trf_id_field(void)
 {
        return ISOLATE_FIELD(read_id_dfr0(), ID_DFR0_TRACEFILT);
index f8278959718082f1c4825eb35d97f65d6ab1b836..d7116a7cf2400a7efca4a066026761dc602db663 100644 (file)
@@ -255,8 +255,16 @@ static inline bool is_feat_amu_supported(void)
        return read_feat_amu_id_field() >= ID_AA64PFR0_AMU_V1;
 }
 
-static inline bool is_armv8_6_feat_amuv1p1_present(void)
+static inline bool is_feat_amuv1p1_supported(void)
 {
+       if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_DISABLED) {
+               return false;
+       }
+
+       if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_ALWAYS) {
+               return true;
+       }
+
        return read_feat_amu_id_field() >= ID_AA64PFR0_AMU_V1P1;
 }
 
index 6452f7e483c665f7f7e2b6643a3e0fcf7b253e59..de476e427b3df48c1d0820ef2a9f59b2bae3f123 100644 (file)
 
 #include <platform_def.h>
 
+#if ENABLE_FEAT_AMU
 #if __aarch64__
 void amu_enable(bool el2_unused, cpu_context_t *ctx);
 #else
 void amu_enable(bool el2_unused);
 #endif
+#else
+#if __aarch64__
+static inline void amu_enable(bool el2_unused, cpu_context_t *ctx)
+{
+}
+#else
+static inline void amu_enable(bool el2_unused)
+{
+}
+#endif
+#endif
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 /*
index f31ee5dde6d1751d89ee404c410cc5fffb0c0c89..62e30fcab8a63ae0cd1f3e4caca61563fb184d5e 100644 (file)
@@ -136,9 +136,9 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
 static void enable_extensions_nonsecure(bool el2_unused)
 {
 #if IMAGE_BL32
-#if ENABLE_FEAT_AMU
-       amu_enable(el2_unused);
-#endif
+       if (is_feat_amu_supported()) {
+               amu_enable(el2_unused);
+       }
 
        if (is_feat_sys_reg_trace_supported()) {
                sys_reg_trace_enable();
index bb6db9f4d540d0d80751db9241b816504cb42e4a..12f3e6d036ba962f6508e69c8758e75c174f8ffa 100644 (file)
@@ -485,9 +485,9 @@ static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
                spe_enable(el2_unused);
        }
 
-#if ENABLE_FEAT_AMU
-       amu_enable(el2_unused, ctx);
-#endif
+       if (is_feat_amu_supported()) {
+               amu_enable(el2_unused, ctx);
+       }
 
 #if ENABLE_SME_FOR_NS
        /* Enable SME, SVE, and FPU/SIMD for non-secure world. */
index 57b11582530aa0e065f1a2ce0f6d5791e1fcf450..03186d611b6e9b0c588d9c27f7f1ce811579aaa5 100644 (file)
@@ -10,6 +10,7 @@
 
 #include "../amu_private.h"
 #include <arch.h>
+#include <arch_features.h>
 #include <arch_helpers.h>
 #include <common/debug.h>
 #include <lib/el3_runtime/pubsub_events.h>
@@ -39,12 +40,6 @@ CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTE
        amu_ctx_group1_enable_cannot_represent_all_group1_counters);
 #endif
 
-static inline __unused uint32_t read_id_pfr0_amu(void)
-{
-       return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
-               ID_PFR0_AMU_MASK;
-}
-
 static inline __unused void write_hcptr_tam(uint32_t value)
 {
        write_hcptr((read_hcptr() & ~TAM_BIT) |
@@ -129,11 +124,6 @@ static inline __unused void write_amcntenclr1_px(uint32_t px)
        write_amcntenclr1(value);
 }
 
-static __unused bool amu_supported(void)
-{
-       return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
-}
-
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 static __unused bool amu_group1_supported(void)
 {
@@ -147,23 +137,12 @@ static __unused bool amu_group1_supported(void)
  */
 void amu_enable(bool el2_unused)
 {
-       uint32_t id_pfr0_amu;           /* AMU version */
-
        uint32_t amcfgr_ncg;            /* Number of counter groups */
        uint32_t amcgcr_cg0nc;          /* Number of group 0 counters */
 
        uint32_t amcntenset0_px = 0x0;  /* Group 0 enable mask */
        uint32_t amcntenset1_px = 0x0;  /* Group 1 enable mask */
 
-       id_pfr0_amu = read_id_pfr0_amu();
-       if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
-               /*
-                * If the AMU is unsupported, nothing needs to be done.
-                */
-
-               return;
-       }
-
        if (el2_unused) {
                /*
                 * HCPTR.TAM: Set to zero so any accesses to the Activity
@@ -221,8 +200,8 @@ void amu_enable(bool el2_unused)
 #endif
        }
 
-       /* Initialize FEAT_AMUv1p1 features if present. */
-       if (id_pfr0_amu < ID_PFR0_AMU_V1P1) {
+       /* Bail out if FEAT_AMUv1p1 features are not present. */
+       if (!is_feat_amuv1p1_supported()) {
                return;
        }
 
@@ -244,7 +223,7 @@ void amu_enable(bool el2_unused)
 /* Read the group 0 counter identified by the given `idx`. */
 static uint64_t amu_group0_cnt_read(unsigned int idx)
 {
-       assert(amu_supported());
+       assert(is_feat_amu_supported());
        assert(idx < read_amcgcr_cg0nc());
 
        return amu_group0_cnt_read_internal(idx);
@@ -253,7 +232,7 @@ static uint64_t amu_group0_cnt_read(unsigned int idx)
 /* Write the group 0 counter identified by the given `idx` with `val` */
 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
 {
-       assert(amu_supported());
+       assert(is_feat_amu_supported());
        assert(idx < read_amcgcr_cg0nc());
 
        amu_group0_cnt_write_internal(idx, val);
@@ -264,7 +243,7 @@ static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
 /* Read the group 1 counter identified by the given `idx` */
 static uint64_t amu_group1_cnt_read(unsigned  int idx)
 {
-       assert(amu_supported());
+       assert(is_feat_amu_supported());
        assert(amu_group1_supported());
        assert(idx < read_amcgcr_cg1nc());
 
@@ -274,7 +253,7 @@ static uint64_t amu_group1_cnt_read(unsigned  int idx)
 /* Write the group 1 counter identified by the given `idx` with `val` */
 static void amu_group1_cnt_write(unsigned  int idx, uint64_t val)
 {
-       assert(amu_supported());
+       assert(is_feat_amu_supported());
        assert(amu_group1_supported());
        assert(idx < read_amcgcr_cg1nc());
 
@@ -290,7 +269,6 @@ static void *amu_context_save(const void *arg)
        unsigned int core_pos;
        struct amu_ctx *ctx;
 
-       uint32_t id_pfr0_amu;   /* AMU version */
        uint32_t amcgcr_cg0nc;  /* Number of group 0 counters */
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
@@ -298,8 +276,7 @@ static void *amu_context_save(const void *arg)
        uint32_t amcgcr_cg1nc;  /* Number of group 1 counters */
 #endif
 
-       id_pfr0_amu = read_id_pfr0_amu();
-       if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
+       if (!is_feat_amu_supported()) {
                return (void *)0;
        }
 
@@ -353,8 +330,6 @@ static void *amu_context_restore(const void *arg)
        unsigned int core_pos;
        struct amu_ctx *ctx;
 
-       uint32_t id_pfr0_amu;   /* AMU version */
-
        uint32_t amcfgr_ncg;    /* Number of counter groups */
        uint32_t amcgcr_cg0nc;  /* Number of group 0 counters */
 
@@ -362,8 +337,7 @@ static void *amu_context_restore(const void *arg)
        uint32_t amcgcr_cg1nc;  /* Number of group 1 counters */
 #endif
 
-       id_pfr0_amu = read_id_pfr0_amu();
-       if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
+       if (!is_feat_amu_supported()) {
                return (void *)0;
        }
 
index 72566fd1b69e64383539438e21198781037b883f..c650629cbbb1a6b91078b455ac5a66c9316b4700 100644 (file)
@@ -57,12 +57,6 @@ CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTE
        amu_ctx_group1_enable_cannot_represent_all_group1_counters);
 #endif
 
-static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
-{
-       return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
-               ID_AA64PFR0_AMU_MASK;
-}
-
 static inline __unused uint64_t read_hcr_el2_amvoffen(void)
 {
        return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
@@ -183,16 +177,6 @@ static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
        write_amcntenclr1_el0(value);
 }
 
-static __unused bool amu_supported(void)
-{
-       return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
-}
-
-static __unused bool amu_v1p1_supported(void)
-{
-       return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
-}
-
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 static __unused bool amu_group1_supported(void)
 {
@@ -206,23 +190,12 @@ static __unused bool amu_group1_supported(void)
  */
 void amu_enable(bool el2_unused, cpu_context_t *ctx)
 {
-       uint64_t id_aa64pfr0_el1_amu;           /* AMU version */
-
        uint64_t amcfgr_el0_ncg;                /* Number of counter groups */
        uint64_t amcgcr_el0_cg0nc;              /* Number of group 0 counters */
 
        uint64_t amcntenset0_el0_px = 0x0;      /* Group 0 enable mask */
        uint64_t amcntenset1_el0_px = 0x0;      /* Group 1 enable mask */
 
-       id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
-       if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
-               /*
-                * If the AMU is unsupported, nothing needs to be done.
-                */
-
-               return;
-       }
-
        if (el2_unused) {
                /*
                 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity
@@ -288,7 +261,7 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
        }
 
        /* Initialize FEAT_AMUv1p1 features if present. */
-       if (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) {
+       if (is_feat_amuv1p1_supported()) {
                if (el2_unused) {
                        /*
                         * Make sure virtual offsets are disabled if EL2 not
@@ -327,7 +300,7 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
 /* Read the group 0 counter identified by the given `idx`. */
 static uint64_t amu_group0_cnt_read(unsigned int idx)
 {
-       assert(amu_supported());
+       assert(is_feat_amu_supported());
        assert(idx < read_amcgcr_el0_cg0nc());
 
        return amu_group0_cnt_read_internal(idx);
@@ -336,7 +309,7 @@ static uint64_t amu_group0_cnt_read(unsigned int idx)
 /* Write the group 0 counter identified by the given `idx` with `val` */
 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
 {
-       assert(amu_supported());
+       assert(is_feat_amu_supported());
        assert(idx < read_amcgcr_el0_cg0nc());
 
        amu_group0_cnt_write_internal(idx, val);
@@ -376,7 +349,7 @@ static bool amu_group0_voffset_supported(uint64_t idx)
  */
 static uint64_t amu_group0_voffset_read(unsigned int idx)
 {
-       assert(amu_v1p1_supported());
+       assert(is_feat_amuv1p1_supported());
        assert(idx < read_amcgcr_el0_cg0nc());
        assert(idx != 1U);
 
@@ -391,7 +364,7 @@ static uint64_t amu_group0_voffset_read(unsigned int idx)
  */
 static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
 {
-       assert(amu_v1p1_supported());
+       assert(is_feat_amuv1p1_supported());
        assert(idx < read_amcgcr_el0_cg0nc());
        assert(idx != 1U);
 
@@ -403,7 +376,7 @@ static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
 /* Read the group 1 counter identified by the given `idx` */
 static uint64_t amu_group1_cnt_read(unsigned int idx)
 {
-       assert(amu_supported());
+       assert(is_feat_amu_supported());
        assert(amu_group1_supported());
        assert(idx < read_amcgcr_el0_cg1nc());
 
@@ -413,7 +386,7 @@ static uint64_t amu_group1_cnt_read(unsigned int idx)
 /* Write the group 1 counter identified by the given `idx` with `val` */
 static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
 {
-       assert(amu_supported());
+       assert(is_feat_amu_supported());
        assert(amu_group1_supported());
        assert(idx < read_amcgcr_el0_cg1nc());
 
@@ -428,7 +401,7 @@ static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
  */
 static uint64_t amu_group1_voffset_read(unsigned int idx)
 {
-       assert(amu_v1p1_supported());
+       assert(is_feat_amuv1p1_supported());
        assert(amu_group1_supported());
        assert(idx < read_amcgcr_el0_cg1nc());
        assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
@@ -443,7 +416,7 @@ static uint64_t amu_group1_voffset_read(unsigned int idx)
  */
 static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
 {
-       assert(amu_v1p1_supported());
+       assert(is_feat_amuv1p1_supported());
        assert(amu_group1_supported());
        assert(idx < read_amcgcr_el0_cg1nc());
        assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
@@ -460,8 +433,7 @@ static void *amu_context_save(const void *arg)
        unsigned int core_pos;
        struct amu_ctx *ctx;
 
-       uint64_t id_aa64pfr0_el1_amu;   /* AMU version */
-       uint64_t hcr_el2_amvoffen;      /* AMU virtual offsets enabled */
+       uint64_t hcr_el2_amvoffen = 0;  /* AMU virtual offsets enabled */
        uint64_t amcgcr_el0_cg0nc;      /* Number of group 0 counters */
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
@@ -470,8 +442,7 @@ static void *amu_context_save(const void *arg)
        uint64_t amcgcr_el0_cg1nc;      /* Number of group 1 counters */
 #endif
 
-       id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
-       if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
+       if (!is_feat_amu_supported()) {
                return (void *)0;
        }
 
@@ -479,8 +450,9 @@ static void *amu_context_save(const void *arg)
        ctx = &amu_ctxs_[core_pos];
 
        amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
-       hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
-               read_hcr_el2_amvoffen() : 0U;
+       if (is_feat_amuv1p1_supported()) {
+               hcr_el2_amvoffen = read_hcr_el2_amvoffen();
+       }
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
        amcfgr_el0_ncg = read_amcfgr_el0_ncg();
@@ -552,9 +524,7 @@ static void *amu_context_restore(const void *arg)
        unsigned int core_pos;
        struct amu_ctx *ctx;
 
-       uint64_t id_aa64pfr0_el1_amu;   /* AMU version */
-
-       uint64_t hcr_el2_amvoffen;      /* AMU virtual offsets enabled */
+       uint64_t hcr_el2_amvoffen = 0;  /* AMU virtual offsets enabled */
 
        uint64_t amcfgr_el0_ncg;        /* Number of counter groups */
        uint64_t amcgcr_el0_cg0nc;      /* Number of group 0 counters */
@@ -564,8 +534,7 @@ static void *amu_context_restore(const void *arg)
        uint64_t amcg1idr_el0_voff;     /* Auxiliary counters with virtual offsets */
 #endif
 
-       id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
-       if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
+       if (!is_feat_amu_supported()) {
                return (void *)0;
        }
 
@@ -575,8 +544,9 @@ static void *amu_context_restore(const void *arg)
        amcfgr_el0_ncg = read_amcfgr_el0_ncg();
        amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
 
-       hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
-               read_hcr_el2_amvoffen() : 0U;
+       if (is_feat_amuv1p1_supported()) {
+               hcr_el2_amvoffen = read_hcr_el2_amvoffen();
+       }
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
        amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
index 0d254fb00fa490932dae5b284513ba7dd684c4f7..c35bf7f3639a142ca45b1d7d1acbf7557162f372 100644 (file)
@@ -320,6 +320,7 @@ endif
 
 # Enable Activity Monitor Unit extensions by default
 ENABLE_FEAT_AMU                        :=      2
+ENABLE_FEAT_AMUv1p1            :=      2
 
 # Enable dynamic mitigation support by default
 DYNAMIC_WORKAROUND_CVE_2018_3639       :=      1