select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
select SWIOTLB
- select X86_DEV_DMA_OPS
select ARCH_HAS_SYSCALL_WRAPPER
+ config FORCE_DYNAMIC_FTRACE
+ def_bool y
+ depends on X86_32
+ depends on FUNCTION_TRACER
+ select DYNAMIC_FTRACE
+ help
+ We keep the static function tracing (!DYNAMIC_FTRACE) around
+ in order to test the non static function tracing in the
+ generic code, as other architectures still use it. But we
+ only need to keep it around for x86_64. No need to keep it
+ for x86_32. For x86_32, force DYNAMIC_FTRACE.
#
# Arch settings
#
* @paranoid == 2 is special: the stub will never switch stacks. This is for
* #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
*/
- .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0
-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0
++.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0
ENTRY(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
hv_stimer0_callback_vector hv_stimer0_vector_handler
#endif /* CONFIG_HYPERV */
-idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
+idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET
- idtentry int3 do_int3 has_error_code=0
+ idtentry int3 do_int3 has_error_code=0 create_gap=1
idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN_PV
* inconsistent instruction while you patch.
*/
extern void *text_poke(void *addr, const void *opcode, size_t len);
+extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
-extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+extern void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
extern int after_bootmem;
+extern __ro_after_init struct mm_struct *poking_mm;
+extern __ro_after_init unsigned long poking_addr;
+ #ifndef CONFIG_UML_X86
+ static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
+ {
+ regs->ip = ip;
+ }
+
+ #define INT3_INSN_SIZE 1
+ #define CALL_INSN_SIZE 5
+
+ #ifdef CONFIG_X86_64
+ static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
+ {
+ /*
+ * The int3 handler in entry_64.S adds a gap between the
+ * stack where the break point happened, and the saving of
+ * pt_regs. We can extend the original stack because of
+ * this gap. See the idtentry macro's create_gap option.
+ */
+ regs->sp -= sizeof(unsigned long);
+ *(unsigned long *)regs->sp = val;
+ }
+
+ static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
+ {
+ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
+ int3_emulate_jmp(regs, func);
+ }
+ #endif /* CONFIG_X86_64 */
+ #endif /* !CONFIG_UML_X86 */
+
#endif /* _ASM_X86_TEXT_PATCHING_H */