]> git.baikalelectronics.ru Git - kernel.git/commitdiff
x86/entry/32: Remove redundant irq disable code
authorThomas Gleixner <tglx@linutronix.de>
Thu, 21 May 2020 20:05:49 +0000 (22:05 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 11 Jun 2020 13:15:18 +0000 (15:15 +0200)
All exceptions/interrupts return with interrupts disabled now. No point in
doing this in ASM again.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Andy Lutomirski <luto@kernel.org>
Link: https://lore.kernel.org/r/20200521202120.221223450@linutronix.de
arch/x86/entry/entry_32.S

index c8f176c88a3cce7acaa96227eaf5ac9693cae0ae..2d29f77a360122633ea153da21be9e20ba31df56 100644 (file)
 
        .section .entry.text, "ax"
 
-/*
- * We use macros for low-level operations which need to be overridden
- * for paravirtualization.  The following will never clobber any registers:
- *   INTERRUPT_RETURN (aka. "iret")
- *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
- *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
- *
- * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
- * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
- * Allowing a register to be clobbered can shrink the paravirt replacement
- * enough to patch inline, increasing performance.
- */
-
-#ifdef CONFIG_PREEMPTION
-# define preempt_stop(clobbers)        DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
-#else
-# define preempt_stop(clobbers)
-#endif
-
-.macro TRACE_IRQS_IRET
-#ifdef CONFIG_TRACE_IRQFLAGS
-       testl   $X86_EFLAGS_IF, PT_EFLAGS(%esp)     # interrupts off?
-       jz      1f
-       TRACE_IRQS_ON
-1:
-#endif
-.endm
-
 #define PTI_SWITCH_MASK         (1 << PAGE_SHIFT)
 
 /*
@@ -881,38 +853,6 @@ SYM_CODE_START(ret_from_fork)
 SYM_CODE_END(ret_from_fork)
 .popsection
 
-/*
- * Return to user mode is not as complex as all this looks,
- * but we want the default path for a system call return to
- * go as quickly as possible which is why some of this is
- * less clear than it otherwise should be.
- */
-
-       # userspace resumption stub bypassing syscall exit tracing
-SYM_CODE_START_LOCAL(ret_from_exception)
-       preempt_stop(CLBR_ANY)
-ret_from_intr:
-#ifdef CONFIG_VM86
-       movl    PT_EFLAGS(%esp), %eax           # mix EFLAGS and CS
-       movb    PT_CS(%esp), %al
-       andl    $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
-#else
-       /*
-        * We can be coming here from child spawned by kernel_thread().
-        */
-       movl    PT_CS(%esp), %eax
-       andl    $SEGMENT_RPL_MASK, %eax
-#endif
-       cmpl    $USER_RPL, %eax
-       jb      restore_all_kernel              # not returning to v8086 or userspace
-
-       DISABLE_INTERRUPTS(CLBR_ANY)
-       TRACE_IRQS_OFF
-       movl    %esp, %eax
-       call    prepare_exit_to_usermode
-       jmp     restore_all_switch_stack
-SYM_CODE_END(ret_from_exception)
-
 SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
 /*
  * All code from here through __end_SYSENTER_singlestep_region is subject
@@ -1147,22 +1087,6 @@ restore_all_switch_stack:
         */
        INTERRUPT_RETURN
 
-restore_all_kernel:
-#ifdef CONFIG_PREEMPTION
-       DISABLE_INTERRUPTS(CLBR_ANY)
-       cmpl    $0, PER_CPU_VAR(__preempt_count)
-       jnz     .Lno_preempt
-       testl   $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
-       jz      .Lno_preempt
-       call    preempt_schedule_irq
-.Lno_preempt:
-#endif
-       TRACE_IRQS_IRET
-       PARANOID_EXIT_TO_KERNEL_MODE
-       BUG_IF_WRONG_CR3
-       RESTORE_REGS 4
-       jmp     .Lirq_return
-
 .section .fixup, "ax"
 SYM_CODE_START(asm_iret_error)
        pushl   $0                              # no error code