#include <arch_helpers.h>
#include <lib/bakery_lock.h>
#include <lib/mmio.h>
+#include <lib/spinlock.h>
#include <ipi.h>
#include <plat_ipi.h>
#include <plat_private.h>
#define ERROR_CODE_MASK (0xFFFFU)
#define PM_OFFSET (0U)
+/*
+ * ARM v8.2, the cache will turn off automatically when cpu
+ * power down. Therefore, there is no doubt to use the spin_lock here.
+ */
+#if !HW_ASSISTED_COHERENCY
DEFINE_BAKERY_LOCK(pm_secure_lock);
+static inline void pm_ipi_lock_get(void)
+{
+ bakery_lock_get(&pm_secure_lock);
+}
+
+static inline void pm_ipi_lock_release(void)
+{
+ bakery_lock_release(&pm_secure_lock);
+}
+#else
+spinlock_t pm_secure_lock;
+static inline void pm_ipi_lock_get(void)
+{
+ spin_lock(&pm_secure_lock);
+}
+
+static inline void pm_ipi_lock_release(void)
+{
+ spin_unlock(&pm_secure_lock);
+}
+#endif
/**
* pm_ipi_init() - Initialize IPI peripheral for communication with
*/
void pm_ipi_init(const struct pm_proc *proc)
{
- bakery_lock_init(&pm_secure_lock);
ipi_mb_open(proc->ipi->local_ipi_id, proc->ipi->remote_ipi_id);
}
{
enum pm_ret_status ret;
- bakery_lock_get(&pm_secure_lock);
+ pm_ipi_lock_get();
ret = pm_ipi_send_common(proc, payload, IPI_NON_BLOCKING);
- bakery_lock_release(&pm_secure_lock);
+ pm_ipi_lock_release();
return ret;
}
{
enum pm_ret_status ret;
- bakery_lock_get(&pm_secure_lock);
+ pm_ipi_lock_get();
ret = pm_ipi_send_common(proc, payload, IPI_BLOCKING);
- bakery_lock_release(&pm_secure_lock);
+ pm_ipi_lock_release();
return ret;
}
{
enum pm_ret_status ret;
- bakery_lock_get(&pm_secure_lock);
+ pm_ipi_lock_get();
ret = pm_ipi_send_common(proc, payload, IPI_BLOCKING);
if (ret != PM_RET_SUCCESS) {
ret = ERROR_CODE_MASK & (pm_ipi_buff_read(proc, value, count));
unlock:
- bakery_lock_release(&pm_secure_lock);
+ pm_ipi_lock_release();
return ret;
}
#include <lib/mmio.h>
#include <lib/mmio.h>
#include <lib/utils.h>
+#include <lib/spinlock.h>
#include <plat/common/platform.h>
#include <plat_ipi.h>
#define UNDEFINED_CPUID (~0)
DEFINE_RENAME_SYSREG_RW_FUNCS(cpu_pwrctrl_val, S3_0_C15_C2_7)
+
+/*
+ * ARM v8.2, the cache will turn off automatically when cpu
+ * power down. Therefore, there is no doubt to use the spin_lock here.
+ */
+#if !HW_ASSISTED_COHERENCY
DEFINE_BAKERY_LOCK(pm_client_secure_lock);
+static inline void pm_client_lock_get(void)
+{
+ bakery_lock_get(&pm_client_secure_lock);
+}
+
+static inline void pm_client_lock_release(void)
+{
+ bakery_lock_release(&pm_client_secure_lock);
+}
+#else
+spinlock_t pm_client_secure_lock;
+static inline void pm_client_lock_get(void)
+{
+ spin_lock(&pm_client_secure_lock);
+}
+
+static inline void pm_client_lock_release(void)
+{
+ spin_unlock(&pm_client_secure_lock);
+}
+#endif
static const struct pm_ipi apu_ipi = {
.local_ipi_id = IPI_ID_APU,
uint32_t cpu_id = plat_my_core_pos();
uintptr_t val;
- bakery_lock_get(&pm_client_secure_lock);
+ pm_client_lock_get();
/* TODO: Set wakeup source */
mmio_write_32(APU_PCIL_CORE_X_IEN_WAKE_REG(cpu_id),
APU_PCIL_CORE_X_IEN_WAKE_MASK);
- bakery_lock_release(&pm_client_secure_lock);
+ pm_client_lock_release();
}
/**
return;
}
- bakery_lock_get(&pm_client_secure_lock);
+ pm_client_lock_get();
/* Clear powerdown request */
val = read_cpu_pwrctrl_val();
mmio_write_32(APU_PCIL_CORE_X_IDS_WAKE_REG(cpuid),
APU_PCIL_CORE_X_IDS_WAKE_MASK);
- bakery_lock_release(&pm_client_secure_lock);
+ pm_client_lock_release();
}
/**
/* Enable interrupts at processor level (for current cpu) */
gicv3_cpuif_enable(plat_my_core_pos());
- bakery_lock_get(&pm_client_secure_lock);
+ pm_client_lock_get();
/* Clear powerdown request */
val = read_cpu_pwrctrl_val();
mmio_write_32(APU_PCIL_CORE_X_IDS_POWER_REG(cpu_id),
APU_PCIL_CORE_X_IDS_POWER_MASK);
- bakery_lock_release(&pm_client_secure_lock);
+ pm_client_lock_release();
}