aboutsummaryrefslogtreecommitdiffstats
path: root/arch/loongarch/include
diff options
context:
space:
mode:
authorWANG Xuerui <git@xen0n.name>2022-07-26 23:57:18 +0800
committerHuacai Chen <chenhuacai@loongson.cn>2022-07-29 18:22:32 +0800
commit57ce5d3eefacfaadfe2ed0a3a85713d1ae6287b9 (patch)
tree627915b42bd823231e06585855e74f4ccd6688cc /arch/loongarch/include
parent07b480695d24d1c9f27bb60fd4b980ae87e8bc1e (diff)
downloadlinux-57ce5d3eefacfaadfe2ed0a3a85713d1ae6287b9.tar.gz
LoongArch: Use the "move" pseudo-instruction where applicable
Some of the assembly code in the LoongArch port likely originated from a time when the assembler did not support pseudo-instructions like "move" or "jr", so the desugared form was used and readability suffers (to a minor degree) as a result. As the upstream toolchain supports these pseudo-instructions from the beginning, migrate the existing few usages to them for better readability. Signed-off-by: WANG Xuerui <git@xen0n.name> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
Diffstat (limited to 'arch/loongarch/include')
-rw-r--r--arch/loongarch/include/asm/atomic.h8
-rw-r--r--arch/loongarch/include/asm/cmpxchg.h2
-rw-r--r--arch/loongarch/include/asm/futex.h2
-rw-r--r--arch/loongarch/include/asm/uaccess.h2
4 files changed, 7 insertions, 7 deletions
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
index 979367ad4e2c..a0a33ee793d6 100644
--- a/arch/loongarch/include/asm/atomic.h
+++ b/arch/loongarch/include/asm/atomic.h
@@ -157,7 +157,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
__asm__ __volatile__(
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
" addi.w %0, %1, %3 \n"
- " or %1, %0, $zero \n"
+ " move %1, %0 \n"
" blt %0, $zero, 2f \n"
" sc.w %1, %2 \n"
" beq $zero, %1, 1b \n"
@@ -170,7 +170,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
__asm__ __volatile__(
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
" sub.w %0, %1, %3 \n"
- " or %1, %0, $zero \n"
+ " move %1, %0 \n"
" blt %0, $zero, 2f \n"
" sc.w %1, %2 \n"
" beq $zero, %1, 1b \n"
@@ -320,7 +320,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
__asm__ __volatile__(
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
" addi.d %0, %1, %3 \n"
- " or %1, %0, $zero \n"
+ " move %1, %0 \n"
" blt %0, $zero, 2f \n"
" sc.d %1, %2 \n"
" beq %1, $zero, 1b \n"
@@ -333,7 +333,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
__asm__ __volatile__(
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
" sub.d %0, %1, %3 \n"
- " or %1, %0, $zero \n"
+ " move %1, %0 \n"
" blt %0, $zero, 2f \n"
" sc.d %1, %2 \n"
" beq %1, $zero, 1b \n"
diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h
index 75b3a4478652..9e9939196471 100644
--- a/arch/loongarch/include/asm/cmpxchg.h
+++ b/arch/loongarch/include/asm/cmpxchg.h
@@ -55,7 +55,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
__asm__ __volatile__( \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
- " or $t0, %z4, $zero \n" \
+ " move $t0, %z4 \n" \
" " st " $t0, %1 \n" \
" beq $zero, $t0, 1b \n" \
"2: \n" \
diff --git a/arch/loongarch/include/asm/futex.h b/arch/loongarch/include/asm/futex.h
index 9de8231694ec..170ec9f97e58 100644
--- a/arch/loongarch/include/asm/futex.h
+++ b/arch/loongarch/include/asm/futex.h
@@ -82,7 +82,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
"# futex_atomic_cmpxchg_inatomic \n"
"1: ll.w %1, %3 \n"
" bne %1, %z4, 3f \n"
- " or $t0, %z5, $zero \n"
+ " move $t0, %z5 \n"
"2: sc.w $t0, %2 \n"
" beq $zero, $t0, 1b \n"
"3: \n"
diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h
index 42da43211765..2b44edc604a2 100644
--- a/arch/loongarch/include/asm/uaccess.h
+++ b/arch/loongarch/include/asm/uaccess.h
@@ -162,7 +162,7 @@ do { \
"2: \n" \
" .section .fixup,\"ax\" \n" \
"3: li.w %0, %3 \n" \
- " or %1, $zero, $zero \n" \
+ " move %1, $zero \n" \
" b 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \