]> git.itanic.dy.fi Git - linux-stable/commitdiff
riscv: Improve tlb_flush()
authorAlexandre Ghiti <alexghiti@rivosinc.com>
Mon, 30 Oct 2023 13:30:25 +0000 (14:30 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 16 Feb 2024 18:10:52 +0000 (19:10 +0100)
[ Upstream commit c5e9b2c2ae82231d85d9650854e7b3e97dde33da ]

For now, tlb_flush() simply calls flush_tlb_mm() which results in a
flush of the whole TLB. So let's use mmu_gather fields to provide a more
fine-grained flush of the TLB.

Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Reviewed-by: Samuel Holland <samuel.holland@sifive.com>
Tested-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> # On RZ/Five SMARC
Link: https://lore.kernel.org/r/20231030133027.19542-2-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Stable-dep-of: d9807d60c145 ("riscv: mm: execute local TLB flush after populating vmemmap")
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/riscv/include/asm/tlb.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/mm/tlbflush.c

index 120bcf2ed8a878554000f0d0ac73e14ec4aa6fa2..1eb5682b2af6065c9019e398df729f5b97a573c6 100644 (file)
@@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb);
 
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
-       flush_tlb_mm(tlb->mm);
+#ifdef CONFIG_MMU
+       if (tlb->fullmm || tlb->need_flush_all)
+               flush_tlb_mm(tlb->mm);
+       else
+               flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
+                                  tlb_get_unmap_size(tlb));
+#endif
 }
 
 #endif /* _ASM_RISCV_TLB_H */
index a09196f8de688ea90123bb74fc21e080cde19f22..f5c4fb0ae6424dea85575fcabba88098653d8cbd 100644 (file)
@@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr)
 #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
 void flush_tlb_all(void);
 void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+                       unsigned long end, unsigned int page_size);
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                     unsigned long end);
@@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 }
 
 #define flush_tlb_mm(mm) flush_tlb_all()
+#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
 #endif /* !CONFIG_SMP || !CONFIG_MMU */
 
 /* Flush a range of kernel pages */
index 77be59aadc735ea9979473fd2d4dd8ffa04394e2..fa03289853d8ac93dc28473c5f2d84f947baf282 100644 (file)
@@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm)
        __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
 }
 
+void flush_tlb_mm_range(struct mm_struct *mm,
+                       unsigned long start, unsigned long end,
+                       unsigned int page_size)
+{
+       __flush_tlb_range(mm, start, end - start, page_size);
+}
+
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 {
        __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);