lkml.org 
[lkml]   [2018]   [Nov]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 3/4] x86/mm/cpa: Fold cpa_flush_range() and cpa_flush_array()
Note that the cache flush loop in cpa_flush_*() is identical when we
use __cpa_addr(); this then means the two functions are virtually
identical except for the TLB flushing.

Fold these two functions into a single cpa_flush() call.

One pesky detail is that __change_page_attr_set_clr() modifies @cpa,
so we have to save and restore part of that to ensure we flush the
original range.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
arch/x86/mm/pageattr.c | 96 ++++++++++++++-----------------------------------
1 file changed, 29 insertions(+), 67 deletions(-)

--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -244,7 +244,7 @@ static unsigned long __cpa_addr(struct c
if (cpa->flags & CPA_ARRAY)
return cpa->vaddr[idx];

- return *cpa->vaddr;
+ return *cpa->vaddr + idx * PAGE_SIZE;
}

/*
@@ -304,50 +304,6 @@ static void cpa_flush_all(unsigned long
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
}

-static bool cpa_check_flush_all(int cache)
-{
- BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
-
- if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
- cpa_flush_all(cache);
- return true;
- }
-
- return false;
-}
-
-static void cpa_flush_range(unsigned long start, int numpages, int cache)
-{
- unsigned int i, level;
- unsigned long addr;
-
- WARN_ON(PAGE_ALIGN(start) != start);
-
- if (cpa_check_flush_all(cache))
- return;
-
- flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
-
- if (!cache)
- return;
-
- /*
- * We only need to flush on one CPU,
- * clflush is a MESI-coherent instruction that
- * will cause all other CPUs to flush the same
- * cachelines:
- */
- for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
- pte_t *pte = lookup_address(addr, &level);
-
- /*
- * Only flush present addresses:
- */
- if (pte && (pte_val(*pte) & _PAGE_PRESENT))
- clflush_cache_range((void *) addr, PAGE_SIZE);
- }
-}
-
void __cpa_flush_array(void *data)
{
struct cpa_data *cpa = data;
@@ -357,33 +313,37 @@ void __cpa_flush_array(void *data)
__flush_tlb_one_kernel(__cpa_addr(cpa, i));
}

-static void cpa_flush_array(struct cpa_data *cpa, int cache)
+static void cpa_flush(struct cpa_data *data, int cache)
{
+ struct cpa_data *cpa = data;
unsigned int i;

- if (cpa_check_flush_all(cache))
+ BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
+
+ if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ cpa_flush_all(cache);
return;
+ }

- if (cpa->numpages <= tlb_single_page_flush_ceiling)
- on_each_cpu(__cpa_flush_array, cpa, 1);
- else
- flush_tlb_all();
+ if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
+ if (cpa->numpages <= tlb_single_page_flush_ceiling)
+ on_each_cpu(__cpa_flush_array, cpa, 1);
+ else
+ flush_tlb_all();
+ } else {
+ unsigned long start = __cpa_addr(cpa, 0);
+
+ flush_tlb_kernel_range(start, start + PAGE_SIZE * cpa->numpages);
+ }

if (!cache)
return;

- /*
- * We only need to flush on one CPU,
- * clflush is a MESI-coherent instruction that
- * will cause all other CPUs to flush the same
- * cachelines:
- */
for (i = 0; i < cpa->numpages; i++) {
unsigned long addr = __cpa_addr(cpa, i);
unsigned int level;
- pte_t *pte;

- pte = lookup_address(addr, &level);
+ pte_t *pte = lookup_address(addr, &level);

/*
* Only flush present addresses:
@@ -1695,7 +1655,7 @@ static int change_page_attr_set_clr(unsi
{
struct cpa_data cpa;
int ret, cache, checkalias;
- unsigned long baddr = 0;
+ unsigned long baddr = *addr;

memset(&cpa, 0, sizeof(cpa));

@@ -1781,11 +1741,11 @@ static int change_page_attr_set_clr(unsi
goto out;
}

- if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
- cpa_flush_array(&cpa, cache);
- else
- cpa_flush_range(baddr, numpages, cache);
+ /* Reset @cpa so that we flush the original range. */
+ cpa.vaddr = &baddr;
+ cpa.numpages = numpages;

+ cpa_flush(&cpa, cache);
out:
return ret;
}
@@ -2071,8 +2031,8 @@ int set_memory_global(unsigned long addr

static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
{
+ unsigned long start = addr;
struct cpa_data cpa;
- unsigned long start;
int ret;

/* Nothing to do if memory encryption is not active */
@@ -2099,7 +2059,7 @@ static int __set_memory_enc_dec(unsigned
/*
* Before changing the encryption attribute, we need to flush caches.
*/
- cpa_flush_range(start, numpages, 1);
+ cpa_flush(&cpa, 1);

ret = __change_page_attr_set_clr(&cpa, 1);

@@ -2110,7 +2070,9 @@ static int __set_memory_enc_dec(unsigned
* in case TLB flushing gets optimized in the cpa_flush_range()
* path use the same logic as above.
*/
- cpa_flush_range(start, numpages, 0);
+ cpa.vaddr = &start;
+ cpa.numpages = numpages;
+ cpa_flush(&cpa, 0);

return ret;
}

\
 
 \ /
  Last update: 2018-11-30 15:06    [W:0.134 / U:5.148 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site