summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-01-08 12:24:41 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2020-01-27 20:00:05 +0100
commitdb5432165e9b51d2a36572be38d078e79f8df0d8 (patch)
treea5e2c57ac774f7542e6ff637911007dec213fe0d /arch/x86/kvm/mmu
parent17eff01904f5f2fa12f4a56666637ce69ce5c645 (diff)
KVM: x86/mmu: Walk host page tables to find THP mappings
Explicitly walk the host page tables to identify THP mappings instead of relying solely on the metadata in struct page. This sets the stage for using a common method of identifying huge mappings regardless of the underlying implementation (HugeTLB vs THB vs DAX), and hopefully avoids the pitfalls of relying on metadata to identify THP mappings, e.g. see commit 169226f7e0d2 ("mm: thp: handle page cache THP correctly in PageTransCompoundMap") and the need for KVM to explicitly check for a THP compound page. KVM will also naturally work with 1gb THP pages, if they are ever supported. Walking the tables for THP mappings is likely marginally slower than querying metadata, but a future patch will reuse the walk to identify HugeTLB mappings, at which point eliminating the existing VMA lookup for HugeTLB will make this a net positive. Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Barret Rhoden <brho@google.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu')
-rw-r--r--arch/x86/kvm/mmu/mmu.c33
1 files changed, 31 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 64c28a39d8ef..3acaadb7acb8 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3329,6 +3329,34 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
__direct_pte_prefetch(vcpu, sp, sptep);
}
+static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
+ kvm_pfn_t pfn)
+{
+ struct kvm_memory_slot *slot;
+ unsigned long hva;
+ pte_t *pte;
+ int level;
+
+ BUILD_BUG_ON(PT_PAGE_TABLE_LEVEL != (int)PG_LEVEL_4K ||
+ PT_DIRECTORY_LEVEL != (int)PG_LEVEL_2M ||
+ PT_PDPE_LEVEL != (int)PG_LEVEL_1G);
+
+ if (!PageCompound(pfn_to_page(pfn)))
+ return PT_PAGE_TABLE_LEVEL;
+
+ slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
+ if (!slot)
+ return PT_PAGE_TABLE_LEVEL;
+
+ hva = __gfn_to_hva_memslot(slot, gfn);
+
+ pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level);
+ if (unlikely(!pte))
+ return PT_PAGE_TABLE_LEVEL;
+
+ return level;
+}
+
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
int max_level, kvm_pfn_t *pfnp,
int *levelp)
@@ -3344,10 +3372,11 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
kvm_is_zone_device_pfn(pfn))
return;
- if (!kvm_is_transparent_hugepage(pfn))
+ level = host_pfn_mapping_level(vcpu, gfn, pfn);
+ if (level == PT_PAGE_TABLE_LEVEL)
return;
- level = PT_DIRECTORY_LEVEL;
+ level = min(level, max_level);
/*
* mmu_notifier_retry() was successful and mmu_lock is held, so