fpu_save_csr a0 t1
fpu_save_double a0 t1 # clobbers t1
fpu_save_cc a0 t1 t2 # clobbers t1, t2
- jirl zero, ra, 0
+ jr ra
SYM_FUNC_END(_save_fp)
EXPORT_SYMBOL(_save_fp)
fpu_restore_double a0 t1 # clobbers t1
fpu_restore_csr a0 t1
fpu_restore_cc a0 t1 t2 # clobbers t1, t2
- jirl zero, ra, 0
+ jr ra
SYM_FUNC_END(_restore_fp)
/*
movgr2fr.d $f30, t1
movgr2fr.d $f31, t1
- jirl zero, ra, 0
+ jr ra
SYM_FUNC_END(_init_fpu)
/*
sc_save_fcsr a2 t1
sc_save_fp a0
li.w a0, 0 # success
- jirl zero, ra, 0
+ jr ra
SYM_FUNC_END(_save_fp_context)
/*
sc_restore_fcc a1 t1 t2
sc_restore_fcsr a2 t1
li.w a0, 0 # success
- jirl zero, ra, 0
+ jr ra
SYM_FUNC_END(_restore_fp_context)
SYM_FUNC_START(fault)
li.w a0, -EFAULT # failure
- jirl zero, ra, 0
+ jr ra
SYM_FUNC_END(fault)
nop
idle 0
/* end of rollback region */
-1: jirl zero, ra, 0
+1: jr ra
SYM_FUNC_END(__arch_cpu_idle)
SYM_FUNC_START(handle_vint)
SYM_FUNC_START(handle_sys)
la.abs t0, handle_syscall
- jirl zero, t0, 0
+ jr t0
SYM_FUNC_END(handle_sys)
/* We might not get launched at the address the kernel is linked to,
so we jump there. */
la.abs t0, 0f
- jirl zero, t0, 0
+ jr t0
0:
la t0, __bss_start # clear .bss
st.d zero, t0, 0
ld.d tp, t0, CPU_BOOT_TINFO
la.abs t0, 0f
- jirl zero, t0, 0
+ jr t0
0:
bl start_secondary
SYM_CODE_END(smpboot_entry)
st.d zero, a0, -8
bne t0, a0, 1b
- jirl zero, ra, 0
+ jr ra
SYM_FUNC_END(clear_page)
EXPORT_SYMBOL(clear_page)
st.d t7, a0, -8
bne t8, a0, 1b
- jirl zero, ra, 0
+ jr ra
SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)
dbar 0
csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_0
- jirl zero, t0, 0
+ jr t0
SYM_FUNC_END(handle_tlb_load)
SYM_FUNC_START(handle_tlb_store)
dbar 0
csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1
- jirl zero, t0, 0
+ jr t0
SYM_FUNC_END(handle_tlb_store)
SYM_FUNC_START(handle_tlb_modify)
dbar 0
csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1
- jirl zero, t0, 0
+ jr t0
SYM_FUNC_END(handle_tlb_modify)
SYM_FUNC_START(handle_tlb_refill)