CTX_INCLUDE_EL2_REGS \
CTX_INCLUDE_NEVE_REGS \
DEBUG \
+ DISABLE_MTPMU \
DYN_DISABLE_AUTH \
EL3_EXCEPTION_HANDLING \
ENABLE_AMU \
CTX_INCLUDE_EL2_REGS \
CTX_INCLUDE_NEVE_REGS \
DECRYPTION_SUPPORT_${DECRYPTION_SUPPORT} \
+ DISABLE_MTPMU \
ENABLE_AMU \
ENABLE_ASSERTIONS \
ENABLE_BTI \
#
-# Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
plat/common/${ARCH}/platform_up_stack.S \
${MBEDTLS_SOURCES}
+ifeq (${DISABLE_MTPMU},1)
+BL1_SOURCES += lib/extensions/mtpmu/${ARCH}/mtpmu.S
+endif
+
ifeq (${ARCH},aarch64)
BL1_SOURCES += lib/cpus/aarch64/dsu_helpers.S \
lib/el3_runtime/aarch64/context.S
#
-# Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
lib/cpus/${ARCH}/cpu_helpers.S \
lib/cpus/errata_report.c
+ifeq (${DISABLE_MTPMU},1)
+BL2_SOURCES += lib/extensions/mtpmu/${ARCH}/mtpmu.S
+endif
+
ifeq (${ARCH},aarch64)
BL2_SOURCES += lib/cpus/aarch64/dsu_helpers.S
endif
#
-# Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
${SPMD_SOURCES} \
${SPM_SOURCES}
+ifeq (${DISABLE_MTPMU},1)
+BL31_SOURCES += lib/extensions/mtpmu/aarch64/mtpmu.S
+endif
ifeq (${ENABLE_PMF}, 1)
BL31_SOURCES += lib/pmf/pmf_main.c
#
-# Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
services/std_svc/std_svc_setup.c \
${PSCI_LIB_SOURCES}
+ifeq (${DISABLE_MTPMU},1)
+BL32_SOURCES += lib/extensions/mtpmu/aarch32/mtpmu.S
+endif
+
ifeq (${ENABLE_PMF}, 1)
BL32_SOURCES += lib/pmf/pmf_main.c
endif
of the binary image. If set to 1, then only the ELF image is built.
0 is the default.
+- ``DISABLE_MTPMU``: Boolean option to disable FEAT_MTPMU if implemented
+ (Armv8.6 onwards). Its default value is 0 to keep consistency with platforms
+ that do not implement FEAT_MTPMU. For more information on FEAT_MTPMU,
+ check the latest Arm ARM.
+
- ``DYN_DISABLE_AUTH``: Provides the capability to dynamically disable Trusted
Board Boot authentication at runtime. This option is meant to be enabled only
for development platforms. ``TRUSTED_BOARD_BOOT`` flag must be set if this
/* CSSELR definitions */
#define LEVEL_SHIFT U(1)
+/* ID_DFR1_EL1 definitions */
+#define ID_DFR1_MTPMU_SHIFT U(0)
+#define ID_DFR1_MTPMU_MASK U(0xf)
+#define ID_DFR1_MTPMU_SUPPORTED U(1)
+
/* ID_MMFR4 definitions */
#define ID_MMFR4_CNP_SHIFT U(12)
#define ID_MMFR4_CNP_LENGTH U(4)
#define ID_PFR1_GENTIMER_MASK U(0xf)
#define ID_PFR1_GIC_SHIFT U(28)
#define ID_PFR1_GIC_MASK U(0xf)
+#define ID_PFR1_SEC_SHIFT U(4)
+#define ID_PFR1_SEC_MASK U(0xf)
+#define ID_PFR1_ELx_ENABLED U(1)
/* SCTLR definitions */
#define SCTLR_RES1_DEF ((U(1) << 23) | (U(1) << 22) | (U(1) << 4) | \
#define SDCR_SCCD_BIT (U(1) << 23)
#define SDCR_SPME_BIT (U(1) << 17)
#define SDCR_RESET_VAL U(0x0)
+#define SDCR_MTPME_BIT (U(1) << 28)
/* HSCTLR definitions */
#define HSCTLR_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
#define VTTBR_BADDR_SHIFT U(0)
/* HDCR definitions */
+#define HDCR_MTPME_BIT (U(1) << 28)
#define HDCR_HLP_BIT (U(1) << 26)
#define HDCR_HPME_BIT (U(1) << 7)
#define HDCR_RESET_VAL U(0x0)
#define CTR p15, 0, c0, c0, 1
#define CNTFRQ p15, 0, c14, c0, 0
#define ID_MMFR4 p15, 0, c0, c2, 6
+#define ID_DFR1 p15, 0, c0, c3, 5
#define ID_PFR0 p15, 0, c0, c1, 0
#define ID_PFR1 p15, 0, c0, c1, 1
#define MAIR0 p15, 0, c10, c2, 0
cps #MODE32_mon
isb
+#if DISABLE_MTPMU
+ bl mtpmu_disable
+#endif
+
.if \_warm_boot_mailbox
/* -------------------------------------------------------------
* This code will be executed for both warm and cold resets.
#define ID_AA64DFR0_PMS_SHIFT U(32)
#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+/* ID_AA64DFR0_EL1.MTPMU definitions (for ARMv8.6+) */
+#define ID_AA64DFR0_MTPMU_SHIFT U(48)
+#define ID_AA64DFR0_MTPMU_MASK ULL(0xf)
+#define ID_AA64DFR0_MTPMU_SUPPORTED ULL(1)
+
/* ID_AA64ISAR1_EL1 definitions */
#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
#define ID_AA64ISAR1_GPI_SHIFT U(28)
#define SCR_RESET_VAL SCR_RES1_BITS
/* MDCR_EL3 definitions */
+#define MDCR_MTPME_BIT (ULL(1) << 28)
#define MDCR_SCCD_BIT (ULL(1) << 23)
#define MDCR_SPME_BIT (ULL(1) << 17)
#define MDCR_SDD_BIT (ULL(1) << 16)
#define MDCR_EL3_RESET_VAL ULL(0x0)
/* MDCR_EL2 definitions */
+#define MDCR_EL2_MTPME (U(1) << 28)
#define MDCR_EL2_HLP (U(1) << 26)
#define MDCR_EL2_HCCD (U(1) << 23)
#define MDCR_EL2_TTRF (U(1) << 19)
isb
.endif /* _init_sctlr */
+#if DISABLE_MTPMU
+ bl mtpmu_disable
+#endif
+
.if \_warm_boot_mailbox
/* -------------------------------------------------------------
* This code will be executed for both warm and cold resets.
--- /dev/null
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .global mtpmu_disable
+
+/* -------------------------------------------------------------
+ * The functions in this file are called at entrypoint, before
+ * the CPU has decided whether this is a cold or a warm boot.
+ * Therefore there are no stack yet to rely on for a C function
+ * call.
+ * -------------------------------------------------------------
+ */
+
+/*
+ * bool mtpmu_supported(void)
+ *
+ * Return a boolean indicating whether FEAT_MTPMU is supported or not.
+ *
+ * Trash registers: r0.
+ */
+func mtpmu_supported
+ ldcopr r0, ID_DFR1
+ and r0, r0, #(ID_DFR1_MTPMU_MASK >> ID_DFR1_MTPMU_SHIFT)
+ cmp r0, #ID_DFR1_MTPMU_SUPPORTED
+ mov r0, #0
+ addeq r0, r0, #1
+ bx lr
+endfunc mtpmu_supported
+
+/*
+ * bool el_implemented(unsigned int el)
+ *
+ * Return a boolean indicating if the specified EL (2 or 3) is implemented.
+ *
+ * Trash registers: r0
+ */
+func el_implemented
+ cmp r0, #3
+ ldcopr r0, ID_PFR1
+ lsreq r0, r0, #ID_PFR1_SEC_SHIFT
+ lsrne r0, r0, #ID_PFR1_VIRTEXT_SHIFT
+ /*
+ * ID_PFR1_VIRTEXT_MASK is the same as ID_PFR1_SEC_MASK
+ * so use any one of them
+ */
+ and r0, r0, #ID_PFR1_VIRTEXT_MASK
+ cmp r0, #ID_PFR1_ELx_ENABLED
+ mov r0, #0
+ addeq r0, r0, #1
+ bx lr
+endfunc el_implemented
+
+/*
+ * void mtpmu_disable(void)
+ *
+ * Disable mtpmu feature if supported.
+ *
+ * Trash register: r0, r1, r2
+ */
+func mtpmu_disable
+ mov r2, lr
+ bl mtpmu_supported
+ cmp r0, #0
+ bxeq r2 /* FEAT_MTPMU not supported */
+
+ /* FEAT_MTMPU Supported */
+ mov r0, #3
+ bl el_implemented
+ cmp r0, #0
+ beq 1f
+
+ /* EL3 implemented */
+ ldcopr r0, SDCR
+ ldr r1, =SDCR_MTPME_BIT
+ bic r0, r0, r1
+ stcopr r0, SDCR
+
+ /*
+ * If EL3 is implemented, HDCR.MTPME is implemented as Res0 and
+ * FEAT_MTPMU is controlled only from EL3, so no need to perform
+ * any operations for EL2.
+ */
+ isb
+ bx r2
+1:
+ /* EL3 not implemented */
+ mov r0, #2
+ bl el_implemented
+ cmp r0, #0
+ bxeq r2 /* No EL2 or EL3 implemented */
+
+ /* EL2 implemented */
+ ldcopr r0, HDCR
+ ldr r1, =HDCR_MTPME_BIT
+ orr r0, r0, r1
+ stcopr r0, HDCR
+ isb
+ bx r2
+endfunc mtpmu_disable
--- /dev/null
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .global mtpmu_disable
+
+/* -------------------------------------------------------------
+ * The functions in this file are called at entrypoint, before
+ * the CPU has decided whether this is a cold or a warm boot.
+ * Therefore there are no stack yet to rely on for a C function
+ * call.
+ * -------------------------------------------------------------
+ */
+
+/*
+ * bool mtpmu_supported(void)
+ *
+ * Return a boolean indicating whether FEAT_MTPMU is supported or not.
+ *
+ * Trash registers: x0, x1
+ */
+func mtpmu_supported
+ mrs x0, id_aa64dfr0_el1
+ mov_imm x1, ID_AA64DFR0_MTPMU_MASK
+ and x0, x1, x0, LSR #ID_AA64DFR0_MTPMU_SHIFT
+ cmp x0, ID_AA64DFR0_MTPMU_SUPPORTED
+ cset x0, eq
+ ret
+endfunc mtpmu_supported
+
+/*
+ * bool el_implemented(unsigned int el_shift)
+ *
+ * Return a boolean indicating if the specified EL is implemented.
+ * The EL is represented as the bitmask shift on id_aa64pfr0_el1 register.
+ *
+ * Trash registers: x0, x1
+ */
+func el_implemented
+ mrs x1, id_aa64pfr0_el1
+ lsr x1, x1, x0
+ cmp x1, #ID_AA64PFR0_ELX_MASK
+ cset x0, eq
+ ret
+endfunc el_implemented
+
+/*
+ * void mtpmu_disable(void)
+ *
+ * Disable mtpmu feature if supported.
+ *
+ * Trash register: x0, x1, x30
+ */
+func mtpmu_disable
+ mov x10, x30
+ bl mtpmu_supported
+ cbz x0, exit_disable
+
+ /* FEAT_MTMPU Supported */
+ mov_imm x0, ID_AA64PFR0_EL3_SHIFT
+ bl el_implemented
+ cbz x0, 1f
+
+ /* EL3 implemented */
+ mrs x0, mdcr_el3
+ mov_imm x1, MDCR_MTPME_BIT
+ bic x0, x0, x1
+ msr mdcr_el3, x0
+
+ /*
+ * If EL3 is implemented, MDCR_EL2.MTPME is implemented as Res0 and
+ * FEAT_MTPMU is controlled only from EL3, so no need to perform
+ * any operations for EL2.
+ */
+ isb
+exit_disable:
+ ret x10
+1:
+ /* EL3 not implemented */
+ mov_imm x0, ID_AA64PFR0_EL2_SHIFT
+ bl el_implemented
+ cbz x0, exit_disable
+
+ /* EL2 implemented */
+ mrs x0, mdcr_el2
+ mov_imm x1, MDCR_EL2_MTPME
+ bic x0, x0, x1
+ msr mdcr_el2, x0
+ isb
+ ret x10
+endfunc mtpmu_disable
# Disable the generation of the binary image (ELF only).
DISABLE_BIN_GENERATION := 0
+# Disable MTPMU if FEAT_MTPMU is supported. Default is 0 to keep backwards
+# compatibility.
+DISABLE_MTPMU := 0
+
# Enable capability to disable authentication dynamically. Only meant for
# development platforms.
DYN_DISABLE_AUTH := 0