lkml.org 
[lkml]   [2021]   [Jan]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC 12/20] mm/tlb: save the VMA that is flushed during tlb_start_vma()
Date
From: Nadav Amit <namit@vmware.com>

Certain architectures need information about the vma that is about to be
flushed. Currently, an artificial vma is constructed using the original
vma infromation. Instead of saving the flags, record the vma during
tlb_start_vma() and use this vma when calling flush_tlb_range().

Record the vma unconditionally as it would be needed for per-VMA
deferred TLB flush tracking and the overhead of tracking it
unconditionally should be negligible.

Signed-off-by: Nadav Amit <namit@vmware.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Nick Piggin <npiggin@gmail.com>
Cc: x86@kernel.org
---
include/asm-generic/tlb.h | 56 +++++++++++++--------------------------
1 file changed, 19 insertions(+), 37 deletions(-)

diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index b97136b7010b..041be2ef4426 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -252,6 +252,13 @@ extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
struct mmu_gather {
struct mm_struct *mm;

+ /*
+ * The current vma. This information is changing upon tlb_start_vma()
+ * and is therefore only valid between tlb_start_vma() and tlb_end_vma()
+ * calls.
+ */
+ struct vm_area_struct *vma;
+
#ifdef CONFIG_MMU_GATHER_TABLE_FREE
struct mmu_table_batch *batch;
#endif
@@ -283,12 +290,6 @@ struct mmu_gather {
unsigned int cleared_puds : 1;
unsigned int cleared_p4ds : 1;

- /*
- * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
- */
- unsigned int vma_exec : 1;
- unsigned int vma_huge : 1;
-
unsigned int batch_count;

#ifndef CONFIG_MMU_GATHER_NO_GATHER
@@ -352,10 +353,6 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm);
}

-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#define tlb_end_vma tlb_end_vma
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }

#else /* CONFIG_MMU_GATHER_NO_RANGE */
@@ -364,7 +361,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm

/*
* When an architecture does not provide its own tlb_flush() implementation
- * but does have a reasonably efficient flush_vma_range() implementation
+ * but does have a reasonably efficient flush_tlb_range() implementation
* use that.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
@@ -372,38 +369,20 @@ static inline void tlb_flush(struct mmu_gather *tlb)
if (tlb->fullmm || tlb->need_flush_all) {
flush_tlb_mm(tlb->mm);
} else if (tlb->end) {
- struct vm_area_struct vma = {
- .vm_mm = tlb->mm,
- .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
- (tlb->vma_huge ? VM_HUGETLB : 0),
- };
-
- flush_tlb_range(&vma, tlb->start, tlb->end);
+ VM_BUG_ON(!tlb->vma);
+ flush_tlb_range(tlb->vma, tlb->start, tlb->end);
}
}

static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
+tlb_update_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
- /*
- * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
- * mips-4k) flush only large pages.
- *
- * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
- * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
- * range.
- *
- * We rely on tlb_end_vma() to issue a flush, such that when we reset
- * these values the batch is empty.
- */
- tlb->vma_huge = is_vm_hugetlb_page(vma);
- tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
+ tlb->vma = vma;
}
-
#else

static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+tlb_update_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }

#endif

@@ -487,17 +466,17 @@ static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *
if (tlb->fullmm)
return;

- tlb_update_vma_flags(tlb, vma);
+ tlb_update_vma(tlb, vma);
flush_cache_range(vma, vma->vm_start, vma->vm_end);
}

static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
- return;
+ goto out;

if (IS_ENABLED(CONFIG_ARCH_WANT_AGGRESSIVE_TLB_FLUSH_BATCHING))
- return;
+ goto out;

/*
* Do a TLB flush and reset the range at VMA boundaries; this avoids
@@ -506,6 +485,9 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
* this.
*/
tlb_flush_mmu_tlbonly(tlb);
+out:
+ /* Reset the VMA as a precaution. */
+ tlb_update_vma(tlb, NULL);
}

#ifdef CONFIG_ARCH_HAS_TLB_GENERATIONS
--
2.25.1
\
 
 \ /
  Last update: 2021-01-31 01:20    [W:0.209 / U:1.968 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site