aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWANG Xuerui <git@xen0n.name>2022-07-26 23:57:16 +0800
committerHuacai Chen <chenhuacai@loongson.cn>2022-07-29 18:22:32 +0800
commitd8e7f201a4cf148c3801cdc9603963061d28d64f (patch)
tree5bbe5881044a3dcb4ad37fc59a49f9c9e815c3db
parente0dccc3b76fb35bb257b4118367a883073d7390e (diff)
downloadlinux-gpio-intel-d8e7f201a4cf148c3801cdc9603963061d28d64f.tar.gz
LoongArch: Use ABI names of registers where appropriate
Some of the assembly in the LoongArch port seem to come from a prehistoric time, when the assembler didn't even have support for the ABI names we all come to know and love, thus used raw register numbers which hampered readability. The usages are found with a regex match inside arch/loongarch, then manually adjusted for those non-definitions. Signed-off-by: WANG Xuerui <git@xen0n.name> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
-rw-r--r--arch/loongarch/include/asm/barrier.h4
-rw-r--r--arch/loongarch/include/asm/loongson.h4
-rw-r--r--arch/loongarch/include/asm/stacktrace.h12
-rw-r--r--arch/loongarch/include/asm/thread_info.h4
-rw-r--r--arch/loongarch/include/asm/uaccess.h2
-rw-r--r--arch/loongarch/mm/page.S4
-rw-r--r--arch/loongarch/mm/tlbex.S80
7 files changed, 55 insertions, 55 deletions
diff --git a/arch/loongarch/include/asm/barrier.h b/arch/loongarch/include/asm/barrier.h
index b6517eeeb141d..cda9776758544 100644
--- a/arch/loongarch/include/asm/barrier.h
+++ b/arch/loongarch/include/asm/barrier.h
@@ -48,9 +48,9 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
__asm__ __volatile__(
"sltu %0, %1, %2\n\t"
#if (__SIZEOF_LONG__ == 4)
- "sub.w %0, $r0, %0\n\t"
+ "sub.w %0, $zero, %0\n\t"
#elif (__SIZEOF_LONG__ == 8)
- "sub.d %0, $r0, %0\n\t"
+ "sub.d %0, $zero, %0\n\t"
#endif
: "=r" (mask)
: "r" (index), "r" (size)
diff --git a/arch/loongarch/include/asm/loongson.h b/arch/loongarch/include/asm/loongson.h
index 6a8038725ba77..8522afafc24e0 100644
--- a/arch/loongarch/include/asm/loongson.h
+++ b/arch/loongarch/include/asm/loongson.h
@@ -58,7 +58,7 @@ static inline void xconf_writel(u32 val, volatile void __iomem *addr)
{
asm volatile (
" st.w %[v], %[hw], 0 \n"
- " ld.b $r0, %[hw], 0 \n"
+ " ld.b $zero, %[hw], 0 \n"
:
: [hw] "r" (addr), [v] "r" (val)
);
@@ -68,7 +68,7 @@ static inline void xconf_writeq(u64 val64, volatile void __iomem *addr)
{
asm volatile (
" st.d %[v], %[hw], 0 \n"
- " ld.b $r0, %[hw], 0 \n"
+ " ld.b $zero, %[hw], 0 \n"
:
: [hw] "r" (addr), [v] "r" (val64)
);
diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
index 26483e396ad12..6b5c2a7aa7066 100644
--- a/arch/loongarch/include/asm/stacktrace.h
+++ b/arch/loongarch/include/asm/stacktrace.h
@@ -23,13 +23,13 @@
static __always_inline void prepare_frametrace(struct pt_regs *regs)
{
__asm__ __volatile__(
- /* Save $r1 */
+ /* Save $ra */
STORE_ONE_REG(1)
- /* Use $r1 to save PC */
- "pcaddi $r1, 0\n\t"
- STR_LONG_S " $r1, %0\n\t"
- /* Restore $r1 */
- STR_LONG_L " $r1, %1, "STR_LONGSIZE"\n\t"
+ /* Use $ra to save PC */
+ "pcaddi $ra, 0\n\t"
+ STR_LONG_S " $ra, %0\n\t"
+ /* Restore $ra */
+ STR_LONG_L " $ra, %1, "STR_LONGSIZE"\n\t"
STORE_ONE_REG(2)
STORE_ONE_REG(3)
STORE_ONE_REG(4)
diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h
index 99beb11c2fa80..b7dd9f19a5a9c 100644
--- a/arch/loongarch/include/asm/thread_info.h
+++ b/arch/loongarch/include/asm/thread_info.h
@@ -44,14 +44,14 @@ struct thread_info {
}
/* How to get the thread information struct from C. */
-register struct thread_info *__current_thread_info __asm__("$r2");
+register struct thread_info *__current_thread_info __asm__("$tp");
static inline struct thread_info *current_thread_info(void)
{
return __current_thread_info;
}
-register unsigned long current_stack_pointer __asm__("$r3");
+register unsigned long current_stack_pointer __asm__("$sp");
#endif /* !__ASSEMBLY__ */
diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h
index 217c6a3727b12..42da432117658 100644
--- a/arch/loongarch/include/asm/uaccess.h
+++ b/arch/loongarch/include/asm/uaccess.h
@@ -162,7 +162,7 @@ do { \
"2: \n" \
" .section .fixup,\"ax\" \n" \
"3: li.w %0, %3 \n" \
- " or %1, $r0, $r0 \n" \
+ " or %1, $zero, $zero \n" \
" b 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S
index ddc78ab33c7b1..270d509adbaaa 100644
--- a/arch/loongarch/mm/page.S
+++ b/arch/loongarch/mm/page.S
@@ -32,7 +32,7 @@ SYM_FUNC_START(clear_page)
st.d zero, a0, -8
bne t0, a0, 1b
- jirl $r0, ra, 0
+ jirl zero, ra, 0
SYM_FUNC_END(clear_page)
EXPORT_SYMBOL(clear_page)
@@ -79,6 +79,6 @@ SYM_FUNC_START(copy_page)
st.d t7, a0, -8
bne t8, a0, 1b
- jirl $r0, ra, 0
+ jirl zero, ra, 0
SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index 7eee402715774..9e98afe7a67fa 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -47,7 +47,7 @@ SYM_FUNC_START(handle_tlb_load)
* The vmalloc handling is not in the hotpath.
*/
csrrd t0, LOONGARCH_CSR_BADV
- blt t0, $r0, vmalloc_load
+ blt t0, zero, vmalloc_load
csrrd t1, LOONGARCH_CSR_PGDL
vmalloc_done_load:
@@ -80,7 +80,7 @@ vmalloc_done_load:
* see if we need to jump to huge tlb processing.
*/
andi t0, ra, _PAGE_HUGE
- bne t0, $r0, tlb_huge_update_load
+ bne t0, zero, tlb_huge_update_load
csrrd t0, LOONGARCH_CSR_BADV
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
@@ -100,12 +100,12 @@ smp_pgtable_change_load:
srli.d ra, t0, _PAGE_PRESENT_SHIFT
andi ra, ra, 1
- beq ra, $r0, nopage_tlb_load
+ beq ra, zero, nopage_tlb_load
ori t0, t0, _PAGE_VALID
#ifdef CONFIG_SMP
sc.d t0, t1, 0
- beq t0, $r0, smp_pgtable_change_load
+ beq t0, zero, smp_pgtable_change_load
#else
st.d t0, t1, 0
#endif
@@ -139,23 +139,23 @@ tlb_huge_update_load:
#endif
srli.d ra, t0, _PAGE_PRESENT_SHIFT
andi ra, ra, 1
- beq ra, $r0, nopage_tlb_load
+ beq ra, zero, nopage_tlb_load
tlbsrch
ori t0, t0, _PAGE_VALID
#ifdef CONFIG_SMP
sc.d t0, t1, 0
- beq t0, $r0, tlb_huge_update_load
+ beq t0, zero, tlb_huge_update_load
ld.d t0, t1, 0
#else
st.d t0, t1, 0
#endif
- addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
+ addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
addi.d ra, t1, 0
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
tlbwr
- csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
+ csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
/*
* A huge PTE describes an area the size of the
@@ -178,27 +178,27 @@ tlb_huge_update_load:
addi.d t0, ra, 0
/* Convert to entrylo1 */
- addi.d t1, $r0, 1
+ addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
- addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
nopage_tlb_load:
dbar 0
csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_0
- jirl $r0, t0, 0
+ jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_load)
SYM_FUNC_START(handle_tlb_store)
@@ -210,7 +210,7 @@ SYM_FUNC_START(handle_tlb_store)
* The vmalloc handling is not in the hotpath.
*/
csrrd t0, LOONGARCH_CSR_BADV
- blt t0, $r0, vmalloc_store
+ blt t0, zero, vmalloc_store
csrrd t1, LOONGARCH_CSR_PGDL
vmalloc_done_store:
@@ -244,7 +244,7 @@ vmalloc_done_store:
* see if we need to jump to huge tlb processing.
*/
andi t0, ra, _PAGE_HUGE
- bne t0, $r0, tlb_huge_update_store
+ bne t0, zero, tlb_huge_update_store
csrrd t0, LOONGARCH_CSR_BADV
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
@@ -265,12 +265,12 @@ smp_pgtable_change_store:
srli.d ra, t0, _PAGE_PRESENT_SHIFT
andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
- bne ra, $r0, nopage_tlb_store
+ bne ra, zero, nopage_tlb_store
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP
sc.d t0, t1, 0
- beq t0, $r0, smp_pgtable_change_store
+ beq t0, zero, smp_pgtable_change_store
#else
st.d t0, t1, 0
#endif
@@ -306,24 +306,24 @@ tlb_huge_update_store:
srli.d ra, t0, _PAGE_PRESENT_SHIFT
andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
- bne ra, $r0, nopage_tlb_store
+ bne ra, zero, nopage_tlb_store
tlbsrch
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP
sc.d t0, t1, 0
- beq t0, $r0, tlb_huge_update_store
+ beq t0, zero, tlb_huge_update_store
ld.d t0, t1, 0
#else
st.d t0, t1, 0
#endif
- addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
+ addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
addi.d ra, t1, 0
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
tlbwr
- csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
+ csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
/*
* A huge PTE describes an area the size of the
* configured huge page size. This is twice the
@@ -345,28 +345,28 @@ tlb_huge_update_store:
addi.d t0, ra, 0
/* Convert to entrylo1 */
- addi.d t1, $r0, 1
+ addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
/* Reset default page size */
- addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
nopage_tlb_store:
dbar 0
csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1
- jirl $r0, t0, 0
+ jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_store)
SYM_FUNC_START(handle_tlb_modify)
@@ -378,7 +378,7 @@ SYM_FUNC_START(handle_tlb_modify)
* The vmalloc handling is not in the hotpath.
*/
csrrd t0, LOONGARCH_CSR_BADV
- blt t0, $r0, vmalloc_modify
+ blt t0, zero, vmalloc_modify
csrrd t1, LOONGARCH_CSR_PGDL
vmalloc_done_modify:
@@ -411,7 +411,7 @@ vmalloc_done_modify:
* see if we need to jump to huge tlb processing.
*/
andi t0, ra, _PAGE_HUGE
- bne t0, $r0, tlb_huge_update_modify
+ bne t0, zero, tlb_huge_update_modify
csrrd t0, LOONGARCH_CSR_BADV
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
@@ -431,12 +431,12 @@ smp_pgtable_change_modify:
srli.d ra, t0, _PAGE_WRITE_SHIFT
andi ra, ra, 1
- beq ra, $r0, nopage_tlb_modify
+ beq ra, zero, nopage_tlb_modify
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP
sc.d t0, t1, 0
- beq t0, $r0, smp_pgtable_change_modify
+ beq t0, zero, smp_pgtable_change_modify
#else
st.d t0, t1, 0
#endif
@@ -471,14 +471,14 @@ tlb_huge_update_modify:
srli.d ra, t0, _PAGE_WRITE_SHIFT
andi ra, ra, 1
- beq ra, $r0, nopage_tlb_modify
+ beq ra, zero, nopage_tlb_modify
tlbsrch
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP
sc.d t0, t1, 0
- beq t0, $r0, tlb_huge_update_modify
+ beq t0, zero, tlb_huge_update_modify
ld.d t0, t1, 0
#else
st.d t0, t1, 0
@@ -504,28 +504,28 @@ tlb_huge_update_modify:
addi.d t0, ra, 0
/* Convert to entrylo1 */
- addi.d t1, $r0, 1
+ addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbwr
/* Reset default page size */
- addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
nopage_tlb_modify:
dbar 0
csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1
- jirl $r0, t0, 0
+ jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_modify)
SYM_FUNC_START(handle_tlb_refill)