#include <linux/smp.h>
#ifdef CONFIG_RISCV_M_MODE
-extern u32 __iomem *clint_ipi_base;
-
void clint_init_boot_cpu(void);
-
-static inline void clint_send_ipi_single(unsigned long hartid)
-{
- writel(1, clint_ipi_base + hartid);
-}
-
-static inline void clint_send_ipi_mask(const struct cpumask *mask)
-{
- int cpu;
-
- for_each_cpu(cpu, mask)
- clint_send_ipi_single(cpuid_to_hartid_map(cpu));
-}
-
-static inline void clint_clear_ipi(unsigned long hartid)
-{
- writel(0, clint_ipi_base + hartid);
-}
#else /* CONFIG_RISCV_M_MODE */
#define clint_init_boot_cpu() do { } while (0)
-
-/* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */
-void clint_send_ipi_single(unsigned long hartid);
-void clint_send_ipi_mask(const struct cpumask *hartid_mask);
-void clint_clear_ipi(unsigned long hartid);
#endif /* CONFIG_RISCV_M_MODE */
#endif /* _ASM_RISCV_CLINT_H */
struct seq_file;
extern unsigned long boot_cpu_hartid;
+struct riscv_ipi_ops {
+ void (*ipi_inject)(const struct cpumask *target);
+ void (*ipi_clear)(void);
+};
+
#ifdef CONFIG_SMP
/*
* Mapping between linux logical cpu index and hartid.
int riscv_hartid_to_cpuid(int hartid);
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
+/* Set custom IPI operations */
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops);
+
+/* Clear IPI for current CPU */
+void riscv_clear_ipi(void);
+
/* Secondary hart entry */
asmlinkage void smp_callin(void);
cpumask_set_cpu(boot_cpu_hartid, out);
}
+static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+}
+
+static inline void riscv_clear_ipi(void)
+{
+}
+
#endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
#include <linux/io.h>
#include <linux/of_address.h>
+#include <linux/smp.h>
#include <linux/types.h>
#include <asm/clint.h>
#include <asm/csr.h>
#include <asm/timex.h>
-#include <asm/smp.h>
/*
* This is the layout used by the SiFive clint, which is also shared by the qemu
u32 __iomem *clint_ipi_base;
+static void clint_send_ipi(const struct cpumask *target)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, target)
+ writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
+}
+
+static void clint_clear_ipi(void)
+{
+ writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
+}
+
+static struct riscv_ipi_ops clint_ipi_ops = {
+ .ipi_inject = clint_send_ipi,
+ .ipi_clear = clint_clear_ipi,
+};
+
void clint_init_boot_cpu(void)
{
struct device_node *np;
riscv_time_cmp = base + CLINT_TIME_CMP_OFF;
riscv_time_val = base + CLINT_TIME_VAL_OFF;
- clint_clear_ipi(boot_cpu_hartid);
+ clint_clear_ipi();
+ riscv_set_ipi_ops(&clint_ipi_ops);
}
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
}
+static void sbi_send_cpumask_ipi(const struct cpumask *target)
+{
+ struct cpumask hartid_mask;
+
+ riscv_cpuid_to_hartid_mask(target, &hartid_mask);
+
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static struct riscv_ipi_ops sbi_ipi_ops = {
+ .ipi_inject = sbi_send_cpumask_ipi
+};
int __init sbi_init(void)
{
__sbi_rfence = __sbi_rfence_v01;
}
+ riscv_set_ipi_ops(&sbi_ipi_ops);
+
return 0;
}
wait_for_interrupt();
}
+static struct riscv_ipi_ops *ipi_ops;
+
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+ ipi_ops = ops;
+}
+EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
+
+void riscv_clear_ipi(void)
+{
+ if (ipi_ops && ipi_ops->ipi_clear)
+ ipi_ops->ipi_clear();
+
+ csr_clear(CSR_IP, IE_SIE);
+}
+EXPORT_SYMBOL_GPL(riscv_clear_ipi);
+
static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
{
- struct cpumask hartid_mask;
int cpu;
smp_mb__before_atomic();
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
- riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- sbi_send_ipi(cpumask_bits(&hartid_mask));
+ if (ipi_ops && ipi_ops->ipi_inject)
+ ipi_ops->ipi_inject(mask);
else
- clint_send_ipi_mask(mask);
+ pr_warn("SMP: IPI inject method not available\n");
}
static void send_ipi_single(int cpu, enum ipi_message_type op)
{
- int hartid = cpuid_to_hartid_map(cpu);
-
smp_mb__before_atomic();
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
- else
- clint_send_ipi_single(hartid);
-}
-
-static inline void clear_ipi(void)
-{
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- csr_clear(CSR_IP, IE_SIE);
+ if (ipi_ops && ipi_ops->ipi_inject)
+ ipi_ops->ipi_inject(cpumask_of(cpu));
else
- clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
+ pr_warn("SMP: IPI inject method not available\n");
}
#ifdef CONFIG_IRQ_WORK
irq_enter();
- clear_ipi();
+ riscv_clear_ipi();
while (true) {
unsigned long ops;
struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id();
- if (!IS_ENABLED(CONFIG_RISCV_SBI))
- clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
+ riscv_clear_ipi();
/* All kernel threads share the same mm context. */
mmgrab(mm);