From: Dylan Jhong dylan@andestech.com
commit 9a801afd3eb95e1a89aba17321062df06fb49d98 upstream.
Currently, we pass the CONTEXTID instead of the ASID to the TLB flush function. We should only take the ASID field to prevent from touching the reserved bit field.
Fixes: 3f1e782998cd ("riscv: add ASID-based tlbflushing methods") Signed-off-by: Dylan Jhong dylan@andestech.com Reviewed-by: Sergey Matyukevich sergey.matyukevich@syntacore.com Link: https://lore.kernel.org/r/20230313034906.2401730-1-dylan@andestech.com Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt palmer@rivosinc.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- arch/riscv/include/asm/tlbflush.h | 2 ++ arch/riscv/mm/context.c | 2 +- arch/riscv/mm/tlbflush.c | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-)
--- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -12,6 +12,8 @@ #include <asm/errata_list.h>
#ifdef CONFIG_MMU +extern unsigned long asid_mask; + static inline void local_flush_tlb_all(void) { __asm__ __volatile__ ("sfence.vma" : : : "memory"); --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -22,7 +22,7 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocat
static unsigned long asid_bits; static unsigned long num_asids; -static unsigned long asid_mask; +unsigned long asid_mask;
static atomic_long_t current_version;
--- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -42,7 +42,7 @@ static void __sbi_tlb_flush_range(struct /* check if the tlbflush needs to be sent to other CPUs */ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; if (static_branch_unlikely(&use_asid_allocator)) { - unsigned long asid = atomic_long_read(&mm->context.id); + unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
if (broadcast) { sbi_remote_sfence_vma_asid(cmask, start, size, asid);