]> git.itanic.dy.fi Git - linux-stable/commitdiff
Revert "riscv: mm: notify remote harts about mmu cache updates"
authorSergey Matyukevich <sergey.matyukevich@syntacore.com>
Sun, 26 Feb 2023 15:01:36 +0000 (18:01 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Mar 2023 12:31:33 +0000 (13:31 +0100)
commit e921050022f1f12d5029d1487a7dfc46cde15523 upstream.

This reverts the remaining bits of commit 4bd1d80efb5a ("riscv: mm:
notify remote harts harts about mmu cache updates").

According to bug reports, suggested approach to fix stale TLB entries
is not sufficient. It needs to be replaced by a more robust solution.

Fixes: 4bd1d80efb5a ("riscv: mm: notify remote harts about mmu cache updates")
Reported-by: Zong Li <zong.li@sifive.com>
Reported-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
Cc: stable@vger.kernel.org
Reviewed-by: Guo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20230226150137.1919750-2-geomatsi@gmail.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/riscv/include/asm/mmu.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/mm/context.c
arch/riscv/mm/tlbflush.c

index 5ff1f19fd45c29b4fc7d2c8b44ac4984caf1e75c..0099dc1161683ddd1e3c45460309e331b7e6b0a7 100644 (file)
@@ -19,8 +19,6 @@ typedef struct {
 #ifdef CONFIG_SMP
        /* A local icache flush is needed before user execution can resume. */
        cpumask_t icache_stale_mask;
-       /* A local tlb flush is needed before user execution can resume. */
-       cpumask_t tlb_stale_mask;
 #endif
 } mm_context_t;
 
index 907b9efd39a87dd3853c1f8f21f4baa2fdd1125c..801019381dea3fec53f50fe443fb350c3ab92c43 100644 (file)
@@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
 {
        ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
 }
-
-static inline void local_flush_tlb_all_asid(unsigned long asid)
-{
-       __asm__ __volatile__ ("sfence.vma x0, %0"
-                       :
-                       : "r" (asid)
-                       : "memory");
-}
-
-static inline void local_flush_tlb_page_asid(unsigned long addr,
-               unsigned long asid)
-{
-       __asm__ __volatile__ ("sfence.vma %0, %1"
-                       :
-                       : "r" (addr), "r" (asid)
-                       : "memory");
-}
-
 #else /* CONFIG_MMU */
 #define local_flush_tlb_all()                  do { } while (0)
 #define local_flush_tlb_page(addr)             do { } while (0)
index cc4a47bda82a0869aeb54f5065a75295ccf605b9..ee3459cb6750b40f12ecc9e78aa9d72b4fdecba1 100644 (file)
@@ -196,16 +196,6 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
 
        if (need_flush_tlb)
                local_flush_tlb_all();
-#ifdef CONFIG_SMP
-       else {
-               cpumask_t *mask = &mm->context.tlb_stale_mask;
-
-               if (cpumask_test_cpu(cpu, mask)) {
-                       cpumask_clear_cpu(cpu, mask);
-                       local_flush_tlb_all_asid(cntx & asid_mask);
-               }
-       }
-#endif
 }
 
 static void set_mm_noasid(struct mm_struct *mm)
index efefc3986c48cd727b32d62c5bd9ffbaaf67f09b..64f8201237c24e736d18942d27b295b580efad61 100644 (file)
@@ -5,7 +5,23 @@
 #include <linux/sched.h>
 #include <asm/sbi.h>
 #include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
+
+static inline void local_flush_tlb_all_asid(unsigned long asid)
+{
+       __asm__ __volatile__ ("sfence.vma x0, %0"
+                       :
+                       : "r" (asid)
+                       : "memory");
+}
+
+static inline void local_flush_tlb_page_asid(unsigned long addr,
+               unsigned long asid)
+{
+       __asm__ __volatile__ ("sfence.vma %0, %1"
+                       :
+                       : "r" (addr), "r" (asid)
+                       : "memory");
+}
 
 void flush_tlb_all(void)
 {
@@ -15,7 +31,6 @@ void flush_tlb_all(void)
 static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
                                  unsigned long size, unsigned long stride)
 {
-       struct cpumask *pmask = &mm->context.tlb_stale_mask;
        struct cpumask *cmask = mm_cpumask(mm);
        struct cpumask hmask;
        unsigned int cpuid;
@@ -30,15 +45,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
        if (static_branch_unlikely(&use_asid_allocator)) {
                unsigned long asid = atomic_long_read(&mm->context.id);
 
-               /*
-                * TLB will be immediately flushed on harts concurrently
-                * executing this MM context. TLB flush on other harts
-                * is deferred until this MM context migrates there.
-                */
-               cpumask_setall(pmask);
-               cpumask_clear_cpu(cpuid, pmask);
-               cpumask_andnot(pmask, pmask, cmask);
-
                if (broadcast) {
                        riscv_cpuid_to_hartid_mask(cmask, &hmask);
                        sbi_remote_sfence_vma_asid(cpumask_bits(&hmask),