lkml.org 
[lkml]   [2019]   [Dec]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 03/16] KVM: x86/mmu: Fold nonpaging_map() into nonpaging_page_fault()
Date
Fold nonpaging_map() into its sole caller, nonpaging_page_fault(), in
preparation for combining the bulk of nonpaging_page_fault() and
tdp_page_fault() into a common helper.

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
arch/x86/kvm/mmu/mmu.c | 106 +++++++++++++++++++----------------------
1 file changed, 49 insertions(+), 57 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index e2792305ce32..8217cdd6beac 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3657,60 +3657,6 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
bool *writable);

-static int nonpaging_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
- gfn_t gfn, bool prefault)
-{
- int r;
- int level;
- bool force_pt_level;
- kvm_pfn_t pfn;
- unsigned long mmu_seq;
- bool map_writable, write = error_code & PFERR_WRITE_MASK;
- bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
- is_nx_huge_page_enabled();
-
- force_pt_level = lpage_disallowed;
- level = mapping_level(vcpu, gfn, &force_pt_level);
- if (likely(!force_pt_level)) {
- /*
- * This path builds a PAE pagetable - so we can map
- * 2mb pages at maximum. Therefore check if the level
- * is larger than that.
- */
- if (level > PT_DIRECTORY_LEVEL)
- level = PT_DIRECTORY_LEVEL;
-
- gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
- }
-
- if (fast_page_fault(vcpu, gpa, level, error_code))
- return RET_PF_RETRY;
-
- mmu_seq = vcpu->kvm->mmu_notifier_seq;
- smp_rmb();
-
- if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
- return RET_PF_RETRY;
-
- if (handle_abnormal_pfn(vcpu, gpa, gfn, pfn, ACC_ALL, &r))
- return r;
-
- r = RET_PF_RETRY;
- spin_lock(&vcpu->kvm->mmu_lock);
- if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
- goto out_unlock;
- if (make_mmu_pages_available(vcpu) < 0)
- goto out_unlock;
- if (likely(!force_pt_level))
- transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
- r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
- prefault, false);
-out_unlock:
- spin_unlock(&vcpu->kvm->mmu_lock);
- kvm_release_pfn_clean(pfn);
- return r;
-}
-
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
struct list_head *invalid_list)
{
@@ -4172,12 +4118,21 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
u32 error_code, bool prefault)
{
- gfn_t gfn = gpa >> PAGE_SHIFT;
int r;
+ int level;
+ kvm_pfn_t pfn;
+ unsigned long mmu_seq;
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+ bool write = error_code & PFERR_WRITE_MASK;
+ bool force_pt_level, map_writable;
+ bool exec = error_code & PFERR_FETCH_MASK;
+ bool lpage_disallowed = exec && is_nx_huge_page_enabled();

/* Note, paging is disabled, ergo gva == gpa. */
pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);

+ gpa &= PAGE_MASK;
+
if (page_fault_handle_page_track(vcpu, error_code, gfn))
return RET_PF_EMULATE;

@@ -4187,9 +4142,46 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,

MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));

+ force_pt_level = lpage_disallowed;
+ level = mapping_level(vcpu, gfn, &force_pt_level);
+ if (likely(!force_pt_level)) {
+ /*
+ * This path builds a PAE pagetable - so we can map
+ * 2mb pages at maximum. Therefore check if the level
+ * is larger than that.
+ */
+ if (level > PT_DIRECTORY_LEVEL)
+ level = PT_DIRECTORY_LEVEL;

- return nonpaging_map(vcpu, gpa & PAGE_MASK,
- error_code, gfn, prefault);
+ gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+ }
+
+ if (fast_page_fault(vcpu, gpa, level, error_code))
+ return RET_PF_RETRY;
+
+ mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ smp_rmb();
+
+ if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
+ return RET_PF_RETRY;
+
+ if (handle_abnormal_pfn(vcpu, gpa, gfn, pfn, ACC_ALL, &r))
+ return r;
+
+ r = RET_PF_RETRY;
+ spin_lock(&vcpu->kvm->mmu_lock);
+ if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+ goto out_unlock;
+ if (make_mmu_pages_available(vcpu) < 0)
+ goto out_unlock;
+ if (likely(!force_pt_level))
+ transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
+ r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
+ prefault, false);
+out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ return r;
}

static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
--
2.24.0
\
 
 \ /
  Last update: 2019-12-07 00:59    [W:0.090 / U:3.204 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site