The AMU extension code was using its own feature detection routines.
Replace them with the generic CPU feature handlers (defined in
arch_features.h), which get updated to cover the v1p1 variant as well.
Change-Id: I8540f1e745d7b02a25a6c6cdf2a39d6f5e21f2aa
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
#endif
}
-/***********************************************
- * Feature : FEAT_AMUv1p1 (AMU Extensions v1.1)
- **********************************************/
-static void read_feat_amuv1p1(void)
-{
-#if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_ALWAYS)
- feat_detect_panic(is_armv8_6_feat_amuv1p1_present(), "AMUv1p1");
-#endif
-}
-
/**************************************************
* Feature : FEAT_RME (Realm Management Extension)
*************************************************/
read_feat_rng_trap();
/* v8.6 features */
- read_feat_amuv1p1();
+ check_feature(ENABLE_FEAT_AMUv1p1, read_feat_amu_id_field(),
+ "AMUv1p1", 2, 2);
check_feature(ENABLE_FEAT_FGT, read_feat_fgt_id_field(), "FGT", 1, 1);
check_feature(ENABLE_FEAT_ECV, read_feat_ecv_id_field(), "ECV", 1, 2);
check_feature(ENABLE_FEAT_TWED, read_feat_twed_id_field(),
return ISOLATE_FIELD(read_id_mmfr4(), ID_MMFR4_CNP) != 0U;
}
+static unsigned int read_feat_amu_id_field(void)
+{
+ return ISOLATE_FIELD(read_id_pfr0(), ID_PFR0_AMU);
+}
+
+static inline bool is_feat_amu_supported(void)
+{
+ if (ENABLE_FEAT_AMU == FEAT_STATE_DISABLED) {
+ return false;
+ }
+
+ if (ENABLE_FEAT_AMU == FEAT_STATE_ALWAYS) {
+ return true;
+ }
+
+ return read_feat_amu_id_field() >= ID_PFR0_AMU_V1;
+}
+
+static inline bool is_feat_amuv1p1_supported(void)
+{
+ if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_DISABLED) {
+ return false;
+ }
+
+ if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_ALWAYS) {
+ return true;
+ }
+
+ return read_feat_amu_id_field() >= ID_PFR0_AMU_V1P1;
+}
+
static inline unsigned int read_feat_trf_id_field(void)
{
return ISOLATE_FIELD(read_id_dfr0(), ID_DFR0_TRACEFILT);
return read_feat_amu_id_field() >= ID_AA64PFR0_AMU_V1;
}
-static inline bool is_armv8_6_feat_amuv1p1_present(void)
+static inline bool is_feat_amuv1p1_supported(void)
{
+ if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_DISABLED) {
+ return false;
+ }
+
+ if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_ALWAYS) {
+ return true;
+ }
+
return read_feat_amu_id_field() >= ID_AA64PFR0_AMU_V1P1;
}
#include <platform_def.h>
+#if ENABLE_FEAT_AMU
#if __aarch64__
void amu_enable(bool el2_unused, cpu_context_t *ctx);
#else
void amu_enable(bool el2_unused);
#endif
+#else
+#if __aarch64__
+static inline void amu_enable(bool el2_unused, cpu_context_t *ctx)
+{
+}
+#else
+static inline void amu_enable(bool el2_unused)
+{
+}
+#endif
+#endif
#if ENABLE_AMU_AUXILIARY_COUNTERS
/*
static void enable_extensions_nonsecure(bool el2_unused)
{
#if IMAGE_BL32
-#if ENABLE_FEAT_AMU
- amu_enable(el2_unused);
-#endif
+ if (is_feat_amu_supported()) {
+ amu_enable(el2_unused);
+ }
if (is_feat_sys_reg_trace_supported()) {
sys_reg_trace_enable();
spe_enable(el2_unused);
}
-#if ENABLE_FEAT_AMU
- amu_enable(el2_unused, ctx);
-#endif
+ if (is_feat_amu_supported()) {
+ amu_enable(el2_unused, ctx);
+ }
#if ENABLE_SME_FOR_NS
/* Enable SME, SVE, and FPU/SIMD for non-secure world. */
#include "../amu_private.h"
#include <arch.h>
+#include <arch_features.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <lib/el3_runtime/pubsub_events.h>
amu_ctx_group1_enable_cannot_represent_all_group1_counters);
#endif
-static inline __unused uint32_t read_id_pfr0_amu(void)
-{
- return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
- ID_PFR0_AMU_MASK;
-}
-
static inline __unused void write_hcptr_tam(uint32_t value)
{
write_hcptr((read_hcptr() & ~TAM_BIT) |
write_amcntenclr1(value);
}
-static __unused bool amu_supported(void)
-{
- return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
-}
-
#if ENABLE_AMU_AUXILIARY_COUNTERS
static __unused bool amu_group1_supported(void)
{
*/
void amu_enable(bool el2_unused)
{
- uint32_t id_pfr0_amu; /* AMU version */
-
uint32_t amcfgr_ncg; /* Number of counter groups */
uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
uint32_t amcntenset0_px = 0x0; /* Group 0 enable mask */
uint32_t amcntenset1_px = 0x0; /* Group 1 enable mask */
- id_pfr0_amu = read_id_pfr0_amu();
- if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
- /*
- * If the AMU is unsupported, nothing needs to be done.
- */
-
- return;
- }
-
if (el2_unused) {
/*
* HCPTR.TAM: Set to zero so any accesses to the Activity
#endif
}
- /* Initialize FEAT_AMUv1p1 features if present. */
- if (id_pfr0_amu < ID_PFR0_AMU_V1P1) {
+ /* Bail out if FEAT_AMUv1p1 features are not present. */
+ if (!is_feat_amuv1p1_supported()) {
return;
}
/* Read the group 0 counter identified by the given `idx`. */
static uint64_t amu_group0_cnt_read(unsigned int idx)
{
- assert(amu_supported());
+ assert(is_feat_amu_supported());
assert(idx < read_amcgcr_cg0nc());
return amu_group0_cnt_read_internal(idx);
/* Write the group 0 counter identified by the given `idx` with `val` */
static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
- assert(amu_supported());
+ assert(is_feat_amu_supported());
assert(idx < read_amcgcr_cg0nc());
amu_group0_cnt_write_internal(idx, val);
/* Read the group 1 counter identified by the given `idx` */
static uint64_t amu_group1_cnt_read(unsigned int idx)
{
- assert(amu_supported());
+ assert(is_feat_amu_supported());
assert(amu_group1_supported());
assert(idx < read_amcgcr_cg1nc());
/* Write the group 1 counter identified by the given `idx` with `val` */
static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
- assert(amu_supported());
+ assert(is_feat_amu_supported());
assert(amu_group1_supported());
assert(idx < read_amcgcr_cg1nc());
unsigned int core_pos;
struct amu_ctx *ctx;
- uint32_t id_pfr0_amu; /* AMU version */
uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
#if ENABLE_AMU_AUXILIARY_COUNTERS
uint32_t amcgcr_cg1nc; /* Number of group 1 counters */
#endif
- id_pfr0_amu = read_id_pfr0_amu();
- if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
+ if (!is_feat_amu_supported()) {
return (void *)0;
}
unsigned int core_pos;
struct amu_ctx *ctx;
- uint32_t id_pfr0_amu; /* AMU version */
-
uint32_t amcfgr_ncg; /* Number of counter groups */
uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
uint32_t amcgcr_cg1nc; /* Number of group 1 counters */
#endif
- id_pfr0_amu = read_id_pfr0_amu();
- if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
+ if (!is_feat_amu_supported()) {
return (void *)0;
}
amu_ctx_group1_enable_cannot_represent_all_group1_counters);
#endif
-static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
-{
- return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
- ID_AA64PFR0_AMU_MASK;
-}
-
static inline __unused uint64_t read_hcr_el2_amvoffen(void)
{
return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
write_amcntenclr1_el0(value);
}
-static __unused bool amu_supported(void)
-{
- return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
-}
-
-static __unused bool amu_v1p1_supported(void)
-{
- return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
-}
-
#if ENABLE_AMU_AUXILIARY_COUNTERS
static __unused bool amu_group1_supported(void)
{
*/
void amu_enable(bool el2_unused, cpu_context_t *ctx)
{
- uint64_t id_aa64pfr0_el1_amu; /* AMU version */
-
uint64_t amcfgr_el0_ncg; /* Number of counter groups */
uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
uint64_t amcntenset0_el0_px = 0x0; /* Group 0 enable mask */
uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */
- id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
- if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
- /*
- * If the AMU is unsupported, nothing needs to be done.
- */
-
- return;
- }
-
if (el2_unused) {
/*
* CPTR_EL2.TAM: Set to zero so any accesses to the Activity
}
/* Initialize FEAT_AMUv1p1 features if present. */
- if (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) {
+ if (is_feat_amuv1p1_supported()) {
if (el2_unused) {
/*
* Make sure virtual offsets are disabled if EL2 not
/* Read the group 0 counter identified by the given `idx`. */
static uint64_t amu_group0_cnt_read(unsigned int idx)
{
- assert(amu_supported());
+ assert(is_feat_amu_supported());
assert(idx < read_amcgcr_el0_cg0nc());
return amu_group0_cnt_read_internal(idx);
/* Write the group 0 counter identified by the given `idx` with `val` */
static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
- assert(amu_supported());
+ assert(is_feat_amu_supported());
assert(idx < read_amcgcr_el0_cg0nc());
amu_group0_cnt_write_internal(idx, val);
*/
static uint64_t amu_group0_voffset_read(unsigned int idx)
{
- assert(amu_v1p1_supported());
+ assert(is_feat_amuv1p1_supported());
assert(idx < read_amcgcr_el0_cg0nc());
assert(idx != 1U);
*/
static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
{
- assert(amu_v1p1_supported());
+ assert(is_feat_amuv1p1_supported());
assert(idx < read_amcgcr_el0_cg0nc());
assert(idx != 1U);
/* Read the group 1 counter identified by the given `idx` */
static uint64_t amu_group1_cnt_read(unsigned int idx)
{
- assert(amu_supported());
+ assert(is_feat_amu_supported());
assert(amu_group1_supported());
assert(idx < read_amcgcr_el0_cg1nc());
/* Write the group 1 counter identified by the given `idx` with `val` */
static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
- assert(amu_supported());
+ assert(is_feat_amu_supported());
assert(amu_group1_supported());
assert(idx < read_amcgcr_el0_cg1nc());
*/
static uint64_t amu_group1_voffset_read(unsigned int idx)
{
- assert(amu_v1p1_supported());
+ assert(is_feat_amuv1p1_supported());
assert(amu_group1_supported());
assert(idx < read_amcgcr_el0_cg1nc());
assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
*/
static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
{
- assert(amu_v1p1_supported());
+ assert(is_feat_amuv1p1_supported());
assert(amu_group1_supported());
assert(idx < read_amcgcr_el0_cg1nc());
assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
unsigned int core_pos;
struct amu_ctx *ctx;
- uint64_t id_aa64pfr0_el1_amu; /* AMU version */
- uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */
+ uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */
uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
#if ENABLE_AMU_AUXILIARY_COUNTERS
uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
#endif
- id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
- if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
+ if (!is_feat_amu_supported()) {
return (void *)0;
}
ctx = &amu_ctxs_[core_pos];
amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
- hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
- read_hcr_el2_amvoffen() : 0U;
+ if (is_feat_amuv1p1_supported()) {
+ hcr_el2_amvoffen = read_hcr_el2_amvoffen();
+ }
#if ENABLE_AMU_AUXILIARY_COUNTERS
amcfgr_el0_ncg = read_amcfgr_el0_ncg();
unsigned int core_pos;
struct amu_ctx *ctx;
- uint64_t id_aa64pfr0_el1_amu; /* AMU version */
-
- uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */
+ uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */
uint64_t amcfgr_el0_ncg; /* Number of counter groups */
uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
#endif
- id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
- if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
+ if (!is_feat_amu_supported()) {
return (void *)0;
}
amcfgr_el0_ncg = read_amcfgr_el0_ncg();
amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
- hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
- read_hcr_el2_amvoffen() : 0U;
+ if (is_feat_amuv1p1_supported()) {
+ hcr_el2_amvoffen = read_hcr_el2_amvoffen();
+ }
#if ENABLE_AMU_AUXILIARY_COUNTERS
amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
# Enable Activity Monitor Unit extensions by default
ENABLE_FEAT_AMU := 2
+ENABLE_FEAT_AMUv1p1 := 2
# Enable dynamic mitigation support by default
DYNAMIC_WORKAROUND_CVE_2018_3639 := 1