From 0b3a2cf0226878ad7098cc6cd1a97ade74fd9c38 Mon Sep 17 00:00:00 2001 From: Jay Buddhabhatti Date: Thu, 2 Mar 2023 02:47:36 -0800 Subject: [PATCH] fix(versal-net): use spin_lock instead of bakery_lock In ARM v8.2 the cache will turn off automatically when cpu power down. Therefore use the spin_lock instead of bakery_lock for the platform in which HW_ASSISTED_COHERENCY is enabled. In Versal NET platform HW_ASSISTED_COHERENCY is enabled so it will use spin lock. In ZynqMP and Versal HW_ASSISTED_COHERENCY is not enabled so it will use bakery_lock. Also remove bakery_lock_init() because it is empty. Signed-off-by: Jay Buddhabhatti Change-Id: I18ff939b51f16d7d3484d8564d6ee6c586f363d8 --- plat/xilinx/common/pm_service/pm_ipi.c | 40 +++++++++++++++---- plat/xilinx/versal_net/pm_service/pm_client.c | 40 ++++++++++++++++--- 2 files changed, 67 insertions(+), 13 deletions(-) diff --git a/plat/xilinx/common/pm_service/pm_ipi.c b/plat/xilinx/common/pm_service/pm_ipi.c index 37d038423..b19fc108a 100644 --- a/plat/xilinx/common/pm_service/pm_ipi.c +++ b/plat/xilinx/common/pm_service/pm_ipi.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -21,7 +22,33 @@ #define ERROR_CODE_MASK (0xFFFFU) #define PM_OFFSET (0U) +/* + * ARM v8.2, the cache will turn off automatically when cpu + * power down. Therefore, there is no doubt to use the spin_lock here. + */ +#if !HW_ASSISTED_COHERENCY DEFINE_BAKERY_LOCK(pm_secure_lock); +static inline void pm_ipi_lock_get(void) +{ + bakery_lock_get(&pm_secure_lock); +} + +static inline void pm_ipi_lock_release(void) +{ + bakery_lock_release(&pm_secure_lock); +} +#else +spinlock_t pm_secure_lock; +static inline void pm_ipi_lock_get(void) +{ + spin_lock(&pm_secure_lock); +} + +static inline void pm_ipi_lock_release(void) +{ + spin_unlock(&pm_secure_lock); +} +#endif /** * pm_ipi_init() - Initialize IPI peripheral for communication with @@ -36,7 +63,6 @@ DEFINE_BAKERY_LOCK(pm_secure_lock); */ void pm_ipi_init(const struct pm_proc *proc) { - bakery_lock_init(&pm_secure_lock); ipi_mb_open(proc->ipi->local_ipi_id, proc->ipi->remote_ipi_id); } @@ -90,11 +116,11 @@ enum pm_ret_status pm_ipi_send_non_blocking(const struct pm_proc *proc, { enum pm_ret_status ret; - bakery_lock_get(&pm_secure_lock); + pm_ipi_lock_get(); ret = pm_ipi_send_common(proc, payload, IPI_NON_BLOCKING); - bakery_lock_release(&pm_secure_lock); + pm_ipi_lock_release(); return ret; } @@ -113,11 +139,11 @@ enum pm_ret_status pm_ipi_send(const struct pm_proc *proc, { enum pm_ret_status ret; - bakery_lock_get(&pm_secure_lock); + pm_ipi_lock_get(); ret = pm_ipi_send_common(proc, payload, IPI_BLOCKING); - bakery_lock_release(&pm_secure_lock); + pm_ipi_lock_release(); return ret; } @@ -249,7 +275,7 @@ enum pm_ret_status pm_ipi_send_sync(const struct pm_proc *proc, { enum pm_ret_status ret; - bakery_lock_get(&pm_secure_lock); + pm_ipi_lock_get(); ret = pm_ipi_send_common(proc, payload, IPI_BLOCKING); if (ret != PM_RET_SUCCESS) { @@ -259,7 +285,7 @@ enum pm_ret_status pm_ipi_send_sync(const struct pm_proc *proc, ret = ERROR_CODE_MASK & (pm_ipi_buff_read(proc, value, count)); unlock: - bakery_lock_release(&pm_secure_lock); + pm_ipi_lock_release(); return ret; } diff --git a/plat/xilinx/versal_net/pm_service/pm_client.c b/plat/xilinx/versal_net/pm_service/pm_client.c index f543193e4..328100b46 100644 --- a/plat/xilinx/versal_net/pm_service/pm_client.c +++ b/plat/xilinx/versal_net/pm_service/pm_client.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -29,7 +30,34 @@ #define UNDEFINED_CPUID (~0) DEFINE_RENAME_SYSREG_RW_FUNCS(cpu_pwrctrl_val, S3_0_C15_C2_7) + +/* + * ARM v8.2, the cache will turn off automatically when cpu + * power down. Therefore, there is no doubt to use the spin_lock here. + */ +#if !HW_ASSISTED_COHERENCY DEFINE_BAKERY_LOCK(pm_client_secure_lock); +static inline void pm_client_lock_get(void) +{ + bakery_lock_get(&pm_client_secure_lock); +} + +static inline void pm_client_lock_release(void) +{ + bakery_lock_release(&pm_client_secure_lock); +} +#else +spinlock_t pm_client_secure_lock; +static inline void pm_client_lock_get(void) +{ + spin_lock(&pm_client_secure_lock); +} + +static inline void pm_client_lock_release(void) +{ + spin_unlock(&pm_client_secure_lock); +} +#endif static const struct pm_ipi apu_ipi = { .local_ipi_id = IPI_ID_APU, @@ -154,7 +182,7 @@ void pm_client_suspend(const struct pm_proc *proc, uint32_t state) uint32_t cpu_id = plat_my_core_pos(); uintptr_t val; - bakery_lock_get(&pm_client_secure_lock); + pm_client_lock_get(); /* TODO: Set wakeup source */ @@ -177,7 +205,7 @@ void pm_client_suspend(const struct pm_proc *proc, uint32_t state) mmio_write_32(APU_PCIL_CORE_X_IEN_WAKE_REG(cpu_id), APU_PCIL_CORE_X_IEN_WAKE_MASK); - bakery_lock_release(&pm_client_secure_lock); + pm_client_lock_release(); } /** @@ -213,7 +241,7 @@ void pm_client_wakeup(const struct pm_proc *proc) return; } - bakery_lock_get(&pm_client_secure_lock); + pm_client_lock_get(); /* Clear powerdown request */ val = read_cpu_pwrctrl_val(); @@ -232,7 +260,7 @@ void pm_client_wakeup(const struct pm_proc *proc) mmio_write_32(APU_PCIL_CORE_X_IDS_WAKE_REG(cpuid), APU_PCIL_CORE_X_IDS_WAKE_MASK); - bakery_lock_release(&pm_client_secure_lock); + pm_client_lock_release(); } /** @@ -249,7 +277,7 @@ void pm_client_abort_suspend(void) /* Enable interrupts at processor level (for current cpu) */ gicv3_cpuif_enable(plat_my_core_pos()); - bakery_lock_get(&pm_client_secure_lock); + pm_client_lock_get(); /* Clear powerdown request */ val = read_cpu_pwrctrl_val(); @@ -262,5 +290,5 @@ void pm_client_abort_suspend(void) mmio_write_32(APU_PCIL_CORE_X_IDS_POWER_REG(cpu_id), APU_PCIL_CORE_X_IDS_POWER_MASK); - bakery_lock_release(&pm_client_secure_lock); + pm_client_lock_release(); } -- 2.39.5