summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-07-02 19:35:29 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2020-07-09 13:29:38 -0400
commit53a3f4877152fe1c1d6c499a49bf573b60f5dc41 (patch)
tree31d66894cbe587c173a9755e6bcc522c0242439b /arch/x86/kvm/mmu
parent284aa868688ac87d0eac7792b22b9c05f7a3cc45 (diff)
KVM: x86/mmu: Try to avoid crashing KVM if a MMU memory cache is empty
Attempt to allocate a new object instead of crashing KVM (and likely the kernel) if a memory cache is unexpectedly empty. Use GFP_ATOMIC for the allocation as the caches are used while holding mmu_lock. The immediate BUG_ON() makes the code unnecessarily explosive and led to confusing minimums being used in the past, e.g. allocating 4 objects where 1 would suffice. Reviewed-by: Ben Gardon <bgardon@google.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200703023545.8771-6-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu')
-rw-r--r--arch/x86/kvm/mmu/mmu.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 7fae1c395cbd..a8eddb83c6f3 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1061,6 +1061,15 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
local_irq_enable();
}
+static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
+ gfp_t gfp_flags)
+{
+ if (mc->kmem_cache)
+ return kmem_cache_zalloc(mc->kmem_cache, gfp_flags);
+ else
+ return (void *)__get_free_page(gfp_flags);
+}
+
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
{
void *obj;
@@ -1068,10 +1077,7 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
if (mc->nobjs >= min)
return 0;
while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
- if (mc->kmem_cache)
- obj = kmem_cache_zalloc(mc->kmem_cache, GFP_KERNEL_ACCOUNT);
- else
- obj = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
+ obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
if (!obj)
return mc->nobjs >= min ? 0 : -ENOMEM;
mc->objects[mc->nobjs++] = obj;
@@ -1119,8 +1125,11 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{
void *p;
- BUG_ON(!mc->nobjs);
- p = mc->objects[--mc->nobjs];
+ if (WARN_ON(!mc->nobjs))
+ p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
+ else
+ p = mc->objects[--mc->nobjs];
+ BUG_ON(!p);
return p;
}