]> git.baikalelectronics.ru Git - kernel.git/commitdiff
LoongArch: Simplify "BEQ/BNE foo, zero" with BEQZ/BNEZ
authorWANG Xuerui <git@xen0n.name>
Tue, 26 Jul 2022 15:57:19 +0000 (23:57 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Fri, 29 Jul 2022 10:22:32 +0000 (18:22 +0800)
While B{EQ,NE}Z and B{EQ,NE} are different instructions, and the vastly
expanded range for branch destination does not really matter in the few
cases touched, use the B{EQ,NE}Z where possible for shorter lines and
better consistency (e.g. some places used "BEQ foo, zero", while some
used "BEQ zero, foo").

Signed-off-by: WANG Xuerui <git@xen0n.name>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/include/asm/atomic.h
arch/loongarch/include/asm/cmpxchg.h
arch/loongarch/include/asm/futex.h
arch/loongarch/mm/tlbex.S

index a0a33ee793d6a83e47783df505ede360515a0859..0869bec2c937cacbc523dcf17e0451c1abb7e4e3 100644 (file)
@@ -160,7 +160,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
                "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.w    %1, %2                                  \n"
-               "       beq     $zero, %1, 1b                           \n"
+               "       beqz    %1, 1b                                  \n"
                "2:                                                     \n"
                __WEAK_LLSC_MB
                : "=&r" (result), "=&r" (temp),
@@ -173,7 +173,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
                "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.w    %1, %2                                  \n"
-               "       beq     $zero, %1, 1b                           \n"
+               "       beqz    %1, 1b                                  \n"
                "2:                                                     \n"
                __WEAK_LLSC_MB
                : "=&r" (result), "=&r" (temp),
@@ -323,7 +323,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
                "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.d    %1, %2                                  \n"
-               "       beq     %1, $zero, 1b                           \n"
+               "       beqz    %1, 1b                                  \n"
                "2:                                                     \n"
                __WEAK_LLSC_MB
                : "=&r" (result), "=&r" (temp),
@@ -336,7 +336,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
                "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.d    %1, %2                                  \n"
-               "       beq     %1, $zero, 1b                           \n"
+               "       beqz    %1, 1b                                  \n"
                "2:                                                     \n"
                __WEAK_LLSC_MB
                : "=&r" (result), "=&r" (temp),
index 9e99391964719b6552c27b82fdb1841c5a1229b0..0a9b0fac1eeeb6115bfcd444d35b1975f45a1277 100644 (file)
@@ -57,7 +57,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
        "       bne     %0, %z3, 2f                     \n"             \
        "       move    $t0, %z4                        \n"             \
        "       " st "  $t0, %1                         \n"             \
-       "       beq     $zero, $t0, 1b                  \n"             \
+       "       beqz    $t0, 1b                         \n"             \
        "2:                                             \n"             \
        __WEAK_LLSC_MB                                                  \
        : "=&r" (__ret), "=ZB"(*m)                                      \
index 170ec9f97e583bddd68ec36b731a4bc22e78c744..837659335fb1510da7c6aaee87a9fc7877b9f70c 100644 (file)
@@ -17,7 +17,7 @@
        "1:     ll.w    %1, %4 # __futex_atomic_op\n"           \
        "       " insn  "                               \n"     \
        "2:     sc.w    $t0, %2                         \n"     \
-       "       beq     $t0, $zero, 1b                  \n"     \
+       "       beqz    $t0, 1b                         \n"     \
        "3:                                             \n"     \
        "       .section .fixup,\"ax\"                  \n"     \
        "4:     li.w    %0, %6                          \n"     \
@@ -84,7 +84,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
        "       bne     %1, %z4, 3f                             \n"
        "       move    $t0, %z5                                \n"
        "2:     sc.w    $t0, %2                                 \n"
-       "       beq     $zero, $t0, 1b                          \n"
+       "       beqz    $t0, 1b                                 \n"
        "3:                                                     \n"
        __WEAK_LLSC_MB
        "       .section .fixup,\"ax\"                          \n"
index f1234a9c311f5fbd4bfdabbb8727820d93966f19..4d16e27020e01462ac73283638130fc28f66af76 100644 (file)
@@ -80,7 +80,7 @@ vmalloc_done_load:
         * see if we need to jump to huge tlb processing.
         */
        andi    t0, ra, _PAGE_HUGE
-       bne     t0, zero, tlb_huge_update_load
+       bnez    t0, tlb_huge_update_load
 
        csrrd   t0, LOONGARCH_CSR_BADV
        srli.d  t0, t0, (PAGE_SHIFT + PTE_ORDER)
@@ -100,12 +100,12 @@ smp_pgtable_change_load:
 
        srli.d  ra, t0, _PAGE_PRESENT_SHIFT
        andi    ra, ra, 1
-       beq     ra, zero, nopage_tlb_load
+       beqz    ra, nopage_tlb_load
 
        ori     t0, t0, _PAGE_VALID
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, zero, smp_pgtable_change_load
+       beqz    t0, smp_pgtable_change_load
 #else
        st.d    t0, t1, 0
 #endif
@@ -139,13 +139,13 @@ tlb_huge_update_load:
 #endif
        srli.d  ra, t0, _PAGE_PRESENT_SHIFT
        andi    ra, ra, 1
-       beq     ra, zero, nopage_tlb_load
+       beqz    ra, nopage_tlb_load
        tlbsrch
 
        ori     t0, t0, _PAGE_VALID
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, zero, tlb_huge_update_load
+       beqz    t0, tlb_huge_update_load
        ld.d    t0, t1, 0
 #else
        st.d    t0, t1, 0
@@ -244,7 +244,7 @@ vmalloc_done_store:
         * see if we need to jump to huge tlb processing.
         */
        andi    t0, ra, _PAGE_HUGE
-       bne     t0, zero, tlb_huge_update_store
+       bnez    t0, tlb_huge_update_store
 
        csrrd   t0, LOONGARCH_CSR_BADV
        srli.d  t0, t0, (PAGE_SHIFT + PTE_ORDER)
@@ -265,12 +265,12 @@ smp_pgtable_change_store:
        srli.d  ra, t0, _PAGE_PRESENT_SHIFT
        andi    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
        xori    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
-       bne     ra, zero, nopage_tlb_store
+       bnez    ra, nopage_tlb_store
 
        ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, zero, smp_pgtable_change_store
+       beqz    t0, smp_pgtable_change_store
 #else
        st.d    t0, t1, 0
 #endif
@@ -306,14 +306,14 @@ tlb_huge_update_store:
        srli.d  ra, t0, _PAGE_PRESENT_SHIFT
        andi    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
        xori    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
-       bne     ra, zero, nopage_tlb_store
+       bnez    ra, nopage_tlb_store
 
        tlbsrch
        ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
 
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, zero, tlb_huge_update_store
+       beqz    t0, tlb_huge_update_store
        ld.d    t0, t1, 0
 #else
        st.d    t0, t1, 0
@@ -411,7 +411,7 @@ vmalloc_done_modify:
         * see if we need to jump to huge tlb processing.
         */
        andi    t0, ra, _PAGE_HUGE
-       bne     t0, zero, tlb_huge_update_modify
+       bnez    t0, tlb_huge_update_modify
 
        csrrd   t0, LOONGARCH_CSR_BADV
        srli.d  t0, t0, (PAGE_SHIFT + PTE_ORDER)
@@ -431,12 +431,12 @@ smp_pgtable_change_modify:
 
        srli.d  ra, t0, _PAGE_WRITE_SHIFT
        andi    ra, ra, 1
-       beq     ra, zero, nopage_tlb_modify
+       beqz    ra, nopage_tlb_modify
 
        ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, zero, smp_pgtable_change_modify
+       beqz    t0, smp_pgtable_change_modify
 #else
        st.d    t0, t1, 0
 #endif
@@ -471,14 +471,14 @@ tlb_huge_update_modify:
 
        srli.d  ra, t0, _PAGE_WRITE_SHIFT
        andi    ra, ra, 1
-       beq     ra, zero, nopage_tlb_modify
+       beqz    ra, nopage_tlb_modify
 
        tlbsrch
        ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
 
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, zero, tlb_huge_update_modify
+       beqz    t0, tlb_huge_update_modify
        ld.d    t0, t1, 0
 #else
        st.d    t0, t1, 0