]> git.baikalelectronics.ru Git - kernel.git/commitdiff
blackfin: smp: adapt to generic smp helpers
authorSteven Miao <realmz6@gmail.com>
Tue, 31 Jul 2012 09:28:10 +0000 (17:28 +0800)
committerBob Liu <lliubbo@gmail.com>
Fri, 7 Sep 2012 09:54:56 +0000 (17:54 +0800)
Replace blackfin ipi message queue with generic smp helper function.

Signed-off-by: Steven Miao <realmz6@gmail.com>
Signed-off-by: Bob Liu <lliubbo@gmail.com>
arch/blackfin/Kconfig
arch/blackfin/include/asm/smp.h
arch/blackfin/mach-common/smp.c

index f34861920634d15c9de2b1149aa8562a767e64f1..c7092e6057c56e4fa856362263ed4ac49a95d32e 100644 (file)
@@ -38,6 +38,7 @@ config BLACKFIN
        select GENERIC_ATOMIC64
        select GENERIC_IRQ_PROBE
        select IRQ_PER_CPU if SMP
+       select USE_GENERIC_SMP_HELPERS if SMP
        select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
        select GENERIC_SMP_IDLE_THREAD
        select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
index dc3d144b4bb5930a396f73d9bce48f67ca1c6365..9631598dcc5d120321febba77a85b6309bbdea7b 100644 (file)
@@ -18,6 +18,8 @@
 #define raw_smp_processor_id()  blackfin_core_id()
 
 extern void bfin_relocate_coreb_l1_mem(void);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
 asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
index 00bbe672b3b308433445d39461637f3a0d396573..a40151306b77ff301b9b152b205795a6fece4b9c 100644 (file)
@@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
 
 struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
 
-#define BFIN_IPI_TIMER       0
-#define BFIN_IPI_RESCHEDULE   1
-#define BFIN_IPI_CALL_FUNC    2
-#define BFIN_IPI_CPU_STOP     3
+enum ipi_message_type {
+       BFIN_IPI_TIMER,
+       BFIN_IPI_RESCHEDULE,
+       BFIN_IPI_CALL_FUNC,
+       BFIN_IPI_CALL_FUNC_SINGLE,
+       BFIN_IPI_CPU_STOP,
+};
 
 struct blackfin_flush_data {
        unsigned long start;
@@ -60,35 +63,20 @@ struct blackfin_flush_data {
 
 void *secondary_stack;
 
-
-struct smp_call_struct {
-       void (*func)(void *info);
-       void *info;
-       int wait;
-       cpumask_t *waitmask;
-};
-
 static struct blackfin_flush_data smp_flush_data;
 
 static DEFINE_SPINLOCK(stop_lock);
 
-struct ipi_message {
-       unsigned long type;
-       struct smp_call_struct call_struct;
-};
-
 /* A magic number - stress test shows this is safe for common cases */
 #define BFIN_IPI_MSGQ_LEN 5
 
 /* Simple FIFO buffer, overflow leads to panic */
-struct ipi_message_queue {
-       spinlock_t lock;
+struct ipi_data {
        unsigned long count;
-       unsigned long head; /* head of the queue */
-       struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
+       unsigned long bits;
 };
 
-static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
+static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
 
 static void ipi_cpu_stop(unsigned int cpu)
 {
@@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
        blackfin_icache_flush_range(fdata->start, fdata->end);
 }
 
-static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
-{
-       int wait;
-       void (*func)(void *info);
-       void *info;
-       func = msg->call_struct.func;
-       info = msg->call_struct.info;
-       wait = msg->call_struct.wait;
-       func(info);
-       if (wait) {
-#ifdef __ARCH_SYNC_CORE_DCACHE
-               /*
-                * 'wait' usually means synchronization between CPUs.
-                * Invalidate D cache in case shared data was changed
-                * by func() to ensure cache coherence.
-                */
-               resync_core_dcache();
-#endif
-               cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
-       }
-}
-
 /* Use IRQ_SUPPLE_0 to request reschedule.
  * When returning from interrupt to user space,
  * there is chance to reschedule */
@@ -172,152 +138,95 @@ void ipi_timer(void)
 
 static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
 {
-       struct ipi_message *msg;
-       struct ipi_message_queue *msg_queue;
+       struct ipi_data *bfin_ipi_data;
        unsigned int cpu = smp_processor_id();
-       unsigned long flags;
+       unsigned long pending;
+       unsigned long msg;
 
        platform_clear_ipi(cpu, IRQ_SUPPLE_1);
 
-       msg_queue = &__get_cpu_var(ipi_msg_queue);
-
-       spin_lock_irqsave(&msg_queue->lock, flags);
-
-       while (msg_queue->count) {
-               msg = &msg_queue->ipi_message[msg_queue->head];
-               switch (msg->type) {
-               case BFIN_IPI_TIMER:
-                       ipi_timer();
-                       break;
-               case BFIN_IPI_RESCHEDULE:
-                       scheduler_ipi();
-                       break;
-               case BFIN_IPI_CALL_FUNC:
-                       ipi_call_function(cpu, msg);
-                       break;
-               case BFIN_IPI_CPU_STOP:
-                       ipi_cpu_stop(cpu);
-                       break;
-               default:
-                       printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
-                              cpu, msg->type);
-                       break;
-               }
-               msg_queue->head++;
-               msg_queue->head %= BFIN_IPI_MSGQ_LEN;
-               msg_queue->count--;
+       bfin_ipi_data = &__get_cpu_var(bfin_ipi);
+
+       while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
+               msg = 0;
+               do {
+                       msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
+                       switch (msg) {
+                       case BFIN_IPI_TIMER:
+                               ipi_timer();
+                               break;
+                       case BFIN_IPI_RESCHEDULE:
+                               scheduler_ipi();
+                               break;
+                       case BFIN_IPI_CALL_FUNC:
+                               generic_smp_call_function_interrupt();
+                               break;
+
+                       case BFIN_IPI_CALL_FUNC_SINGLE:
+                               generic_smp_call_function_single_interrupt();
+                               break;
+
+                       case BFIN_IPI_CPU_STOP:
+                               ipi_cpu_stop(cpu);
+                               break;
+                       }
+               } while (msg < BITS_PER_LONG);
+
+               smp_mb();
        }
-       spin_unlock_irqrestore(&msg_queue->lock, flags);
        return IRQ_HANDLED;
 }
 
-static void ipi_queue_init(void)
+static void bfin_ipi_init(void)
 {
        unsigned int cpu;
-       struct ipi_message_queue *msg_queue;
+       struct ipi_data *bfin_ipi_data;
        for_each_possible_cpu(cpu) {
-               msg_queue = &per_cpu(ipi_msg_queue, cpu);
-               spin_lock_init(&msg_queue->lock);
-               msg_queue->count = 0;
-               msg_queue->head = 0;
+               bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+               bfin_ipi_data->bits = 0;
+               bfin_ipi_data->count = 0;
        }
 }
 
-static inline void smp_send_message(cpumask_t callmap, unsigned long type,
-                                       void (*func) (void *info), void *info, int wait)
+void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
 {
        unsigned int cpu;
-       struct ipi_message_queue *msg_queue;
-       struct ipi_message *msg;
-       unsigned long flags, next_msg;
-       cpumask_t waitmask; /* waitmask is shared by all cpus */
-
-       cpumask_copy(&waitmask, &callmap);
-       for_each_cpu(cpu, &callmap) {
-               msg_queue = &per_cpu(ipi_msg_queue, cpu);
-               spin_lock_irqsave(&msg_queue->lock, flags);
-               if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
-                       next_msg = (msg_queue->head + msg_queue->count)
-                                       % BFIN_IPI_MSGQ_LEN;
-                       msg = &msg_queue->ipi_message[next_msg];
-                       msg->type = type;
-                       if (type == BFIN_IPI_CALL_FUNC) {
-                               msg->call_struct.func = func;
-                               msg->call_struct.info = info;
-                               msg->call_struct.wait = wait;
-                               msg->call_struct.waitmask = &waitmask;
-                       }
-                       msg_queue->count++;
-               } else
-                       panic("IPI message queue overflow\n");
-               spin_unlock_irqrestore(&msg_queue->lock, flags);
+       struct ipi_data *bfin_ipi_data;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       for_each_cpu(cpu, cpumask) {
+               bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+               smp_mb();
+               set_bit(msg, &bfin_ipi_data->bits);
+               bfin_ipi_data->count++;
                platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
        }
 
-       if (wait) {
-               while (!cpumask_empty(&waitmask))
-                       blackfin_dcache_invalidate_range(
-                               (unsigned long)(&waitmask),
-                               (unsigned long)(&waitmask));
-#ifdef __ARCH_SYNC_CORE_DCACHE
-               /*
-                * Invalidate D cache in case shared data was changed by
-                * other processors to ensure cache coherence.
-                */
-               resync_core_dcache();
-#endif
-       }
+       local_irq_restore(flags);
 }
 
-int smp_call_function(void (*func)(void *info), void *info, int wait)
+void arch_send_call_function_single_ipi(int cpu)
 {
-       cpumask_t callmap;
-
-       preempt_disable();
-       cpumask_copy(&callmap, cpu_online_mask);
-       cpumask_clear_cpu(smp_processor_id(), &callmap);
-       if (!cpumask_empty(&callmap))
-               smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
-       preempt_enable();
-
-       return 0;
+       send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
 }
-EXPORT_SYMBOL_GPL(smp_call_function);
 
-int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
-                               int wait)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 {
-       unsigned int cpu = cpuid;
-       cpumask_t callmap;
-
-       if (cpu_is_offline(cpu))
-               return 0;
-       cpumask_clear(&callmap);
-       cpumask_set_cpu(cpu, &callmap);
-
-       smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
-       return 0;
+       send_ipi(mask, BFIN_IPI_CALL_FUNC);
 }
-EXPORT_SYMBOL_GPL(smp_call_function_single);
 
 void smp_send_reschedule(int cpu)
 {
-       cpumask_t callmap;
-       /* simply trigger an ipi */
-
-       cpumask_clear(&callmap);
-       cpumask_set_cpu(cpu, &callmap);
-
-       smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
+       send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
 
        return;
 }
 
 void smp_send_msg(const struct cpumask *mask, unsigned long type)
 {
-       smp_send_message(*mask, type, NULL, NULL, 0);
+       send_ipi(mask, type);
 }
 
 void smp_timer_broadcast(const struct cpumask *mask)
@@ -333,7 +242,7 @@ void smp_send_stop(void)
        cpumask_copy(&callmap, cpu_online_mask);
        cpumask_clear_cpu(smp_processor_id(), &callmap);
        if (!cpumask_empty(&callmap))
-               smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
+               send_ipi(&callmap, BFIN_IPI_CPU_STOP);
 
        preempt_enable();
 
@@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        platform_prepare_cpus(max_cpus);
-       ipi_queue_init();
+       bfin_ipi_init();
        platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
        platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
 }