]> git.baikalelectronics.ru Git - kernel.git/commitdiff
powerpc/irq: Remove arch_local_irq_restore() for !CONFIG_CC_HAS_ASM_GOTO
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Mon, 16 May 2022 15:36:04 +0000 (17:36 +0200)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 22 May 2022 05:58:28 +0000 (15:58 +1000)
All supported versions of GCC & clang support asm goto.

Remove the !CONFIG_CC_HAS_ASM_GOTO version of arch_local_irq_restore()

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/58df50c9e77e2ed945bacdead30412770578886b.1652715336.git.christophe.leroy@csgroup.eu
arch/powerpc/kernel/irq.c

index 2e055e13685a14a67dd1d5d3d17937551e660f0e..ea38c13936c7e995e737ec6f7836090dbf65be77 100644 (file)
@@ -217,7 +217,6 @@ static inline void replay_soft_interrupts_irqrestore(void)
 #define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
 #endif
 
-#ifdef CONFIG_CC_HAS_ASM_GOTO
 notrace void arch_local_irq_restore(unsigned long mask)
 {
        unsigned char irq_happened;
@@ -313,82 +312,6 @@ happened:
        __hard_irq_enable();
        preempt_enable();
 }
-#else
-notrace void arch_local_irq_restore(unsigned long mask)
-{
-       unsigned char irq_happened;
-
-       /* Write the new soft-enabled value */
-       irq_soft_mask_set(mask);
-       if (mask)
-               return;
-
-       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
-               WARN_ON_ONCE(in_nmi() || in_hardirq());
-
-       /*
-        * From this point onward, we can take interrupts, preempt,
-        * etc... unless we got hard-disabled. We check if an event
-        * happened. If none happened, we know we can just return.
-        *
-        * We may have preempted before the check below, in which case
-        * we are checking the "new" CPU instead of the old one. This
-        * is only a problem if an event happened on the "old" CPU.
-        *
-        * External interrupt events will have caused interrupts to
-        * be hard-disabled, so there is no problem, we
-        * cannot have preempted.
-        */
-       irq_happened = get_irq_happened();
-       if (!irq_happened) {
-               if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
-                       WARN_ON_ONCE(!(mfmsr() & MSR_EE));
-               return;
-       }
-
-       /* We need to hard disable to replay. */
-       if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
-               if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
-                       WARN_ON_ONCE(!(mfmsr() & MSR_EE));
-               __hard_irq_disable();
-               local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-       } else {
-               /*
-                * We should already be hard disabled here. We had bugs
-                * where that wasn't the case so let's dbl check it and
-                * warn if we are wrong. Only do that when IRQ tracing
-                * is enabled as mfmsr() can be costly.
-                */
-               if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
-                       if (WARN_ON_ONCE(mfmsr() & MSR_EE))
-                               __hard_irq_disable();
-               }
-
-               if (irq_happened == PACA_IRQ_HARD_DIS) {
-                       local_paca->irq_happened = 0;
-                       __hard_irq_enable();
-                       return;
-               }
-       }
-
-       /*
-        * Disable preempt here, so that the below preempt_enable will
-        * perform resched if required (a replayed interrupt may set
-        * need_resched).
-        */
-       preempt_disable();
-       irq_soft_mask_set(IRQS_ALL_DISABLED);
-       trace_hardirqs_off();
-
-       replay_soft_interrupts_irqrestore();
-       local_paca->irq_happened = 0;
-
-       trace_hardirqs_on();
-       irq_soft_mask_set(IRQS_ENABLED);
-       __hard_irq_enable();
-       preempt_enable();
-}
-#endif
 EXPORT_SYMBOL(arch_local_irq_restore);
 
 /*