[PATCH 6/9] LoongArch: Simplify the invtlb wrappers

From: WANG Xuerui
Date: Fri Jun 23 2023 - 09:44:41 EST


From: WANG Xuerui <git@xxxxxxxxxx>

Of the 3 existing invtlb wrappers, invtlb_info is not used at all,
so it is removed; invtlb_all and invtlb_addr have their unused
argument(s) removed from their signatures.

Also, the invtlb instruction has been supported by upstream LoongArch
toolchains from day one, so ditch the raw opcode trickery and just use
plain inline asm for it.

Signed-off-by: WANG Xuerui <git@xxxxxxxxxx>
---
arch/loongarch/include/asm/tlb.h | 45 ++++++++++++--------------------
arch/loongarch/mm/tlb.c | 10 +++----
2 files changed, 21 insertions(+), 34 deletions(-)

diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h
index 0dc9ee2b05d2..5e6ee9a15f0f 100644
--- a/arch/loongarch/include/asm/tlb.h
+++ b/arch/loongarch/include/asm/tlb.h
@@ -88,52 +88,39 @@ enum invtlb_ops {
INVTLB_GID_ADDR = 0x16,
};

-/*
- * invtlb op info addr
- * (0x1 << 26) | (0x24 << 20) | (0x13 << 15) |
- * (addr << 10) | (info << 5) | op
- */
static inline void invtlb(u32 op, u32 info, u64 addr)
{
__asm__ __volatile__(
- "parse_r addr,%0\n\t"
- "parse_r info,%1\n\t"
- ".word ((0x6498000) | (addr << 10) | (info << 5) | %2)\n\t"
- :
- : "r"(addr), "r"(info), "i"(op)
- :
- );
-}
-
-static inline void invtlb_addr(u32 op, u32 info, u64 addr)
-{
- __asm__ __volatile__(
- "parse_r addr,%0\n\t"
- ".word ((0x6498000) | (addr << 10) | (0 << 5) | %1)\n\t"
- :
- : "r"(addr), "i"(op)
+ "invtlb %0, %1, %2\n\t"
:
+ : "i"(op), "r"(info), "r"(addr)
+ : "memory"
);
}

-static inline void invtlb_info(u32 op, u32 info, u64 addr)
+static inline void invtlb_addr(u32 op, u64 addr)
{
+ /*
+ * The ISA manual says $zero shall be used in case a particular op
+ * does not take the respective argument, hence the invtlb helper is
+ * not re-used to make sure this is the case.
+ */
__asm__ __volatile__(
- "parse_r info,%0\n\t"
- ".word ((0x6498000) | (0 << 10) | (info << 5) | %1)\n\t"
- :
- : "r"(info), "i"(op)
+ "invtlb %0, $zero, %1\n\t"
:
+ : "i"(op), "r"(addr)
+ : "memory"
);
}

-static inline void invtlb_all(u32 op, u32 info, u64 addr)
+static inline void invtlb_all(u32 op)
{
+ /* Similar to invtlb_addr, ensure the operands are actually $zero. */
__asm__ __volatile__(
- ".word ((0x6498000) | (0 << 10) | (0 << 5) | %0)\n\t"
+ "invtlb %0, $zero, $zero\n\t"
:
: "i"(op)
- :
+ : "memory"
);
}

diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
index 00bb563e3c89..de04d2624ef4 100644
--- a/arch/loongarch/mm/tlb.c
+++ b/arch/loongarch/mm/tlb.c
@@ -17,19 +17,19 @@

void local_flush_tlb_all(void)
{
- invtlb_all(INVTLB_CURRENT_ALL, 0, 0);
+ invtlb_all(INVTLB_CURRENT_ALL);
}
EXPORT_SYMBOL(local_flush_tlb_all);

void local_flush_tlb_user(void)
{
- invtlb_all(INVTLB_CURRENT_GFALSE, 0, 0);
+ invtlb_all(INVTLB_CURRENT_GFALSE);
}
EXPORT_SYMBOL(local_flush_tlb_user);

void local_flush_tlb_kernel(void)
{
- invtlb_all(INVTLB_CURRENT_GTRUE, 0, 0);
+ invtlb_all(INVTLB_CURRENT_GTRUE);
}
EXPORT_SYMBOL(local_flush_tlb_kernel);

@@ -100,7 +100,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
end &= (PAGE_MASK << 1);

while (start < end) {
- invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, start);
+ invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, start);
start += (PAGE_SIZE << 1);
}
} else {
@@ -131,7 +131,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
void local_flush_tlb_one(unsigned long page)
{
page &= (PAGE_MASK << 1);
- invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, page);
+ invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, page);
}

static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
--
2.40.0