From 9ce372b33a2ebbd0b965148879ae169a0015d3f3 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 7 May 2020 16:36:02 +0200 Subject: KVM: x86: drop KVM_PV_REASON_PAGE_READY case from kvm_handle_page_fault() KVM guest code in Linux enables APF only when KVM_FEATURE_ASYNC_PF_INT is supported, this means we will never see KVM_PV_REASON_PAGE_READY when handling page fault vmexit in KVM. While on it, make sure we only follow genuine page fault path when APF reason is zero. If we happen to see something else this means that the underlying hypervisor is misbehaving. Leave WARN_ON_ONCE() to catch that. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6d6a0ae7800c..3ca70554d5f1 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4156,6 +4156,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len) { int r = 1; + u32 flags = vcpu->arch.apf.host_apf_flags; #ifndef CONFIG_X86_64 /* A 64-bit CR2 should be impossible on 32-bit KVM. */ @@ -4164,28 +4165,22 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, #endif vcpu->arch.l1tf_flush_l1d = true; - switch (vcpu->arch.apf.host_apf_flags) { - default: + if (!flags) { trace_kvm_page_fault(fault_address, error_code); if (kvm_event_needs_reinjection(vcpu)) kvm_mmu_unprotect_page_virt(vcpu, fault_address); r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn, insn_len); - break; - case KVM_PV_REASON_PAGE_NOT_PRESENT: + } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) { vcpu->arch.apf.host_apf_flags = 0; local_irq_disable(); kvm_async_pf_task_wait_schedule(fault_address); local_irq_enable(); - break; - case KVM_PV_REASON_PAGE_READY: - vcpu->arch.apf.host_apf_flags = 0; - local_irq_disable(); - kvm_async_pf_task_wake(fault_address); - local_irq_enable(); - break; + } else { + WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags); } + return r; } EXPORT_SYMBOL_GPL(kvm_handle_page_fault); -- cgit v1.2.3 From e8c22266e68f0db2a7e11b0a9f29fd88ec0cfd4a Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 15 Jun 2020 14:13:34 +0200 Subject: KVM: async_pf: change kvm_setup_async_pf()/kvm_arch_setup_async_pf() return type to bool Unlike normal 'int' functions returning '0' on success, kvm_setup_async_pf()/ kvm_arch_setup_async_pf() return '1' when a job to handle page fault asynchronously was scheduled and '0' otherwise. To avoid the confusion change return type to 'bool'. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Message-Id: <20200615121334.91300-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3ca70554d5f1..a1850120ede0 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4045,8 +4045,8 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) walk_shadow_page_lockless_end(vcpu); } -static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - gfn_t gfn) +static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + gfn_t gfn) { struct kvm_arch_async_pf arch; -- cgit v1.2.3 From f25a9dec2da3b303e4c62cf9fa67e836866198b2 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 22 Jun 2020 14:58:30 -0700 Subject: KVM: x86/mmu: Drop kvm_arch_write_log_dirty() wrapper Drop kvm_arch_write_log_dirty() in favor of invoking .write_log_dirty() directly from FNAME(update_accessed_dirty_bits). "kvm_arch" is usually used for x86 functions that are invoked from generic KVM, and implies that there are external callers, neither of which is true. Remove the check for a non-NULL kvm_x86_ops hook as the call is wrapped in PTTYPE_EPT and is unconditionally set by VMX. Signed-off-by: Sean Christopherson Message-Id: <20200622215832.22090-3-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 1 - arch/x86/kvm/mmu/mmu.c | 15 --------------- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 3 files changed, 1 insertion(+), 17 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 444bb9c54548..81cafc937cfb 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -222,7 +222,6 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn); -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa); int kvm_mmu_post_init_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index a1850120ede0..03ce2cad04f7 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1738,21 +1738,6 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); } -/** - * kvm_arch_write_log_dirty - emulate dirty page logging - * @vcpu: Guest mode vcpu - * - * Emulate arch specific page modification logging for the - * nested hypervisor - */ -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa) -{ - if (kvm_x86_ops.write_log_dirty) - return kvm_x86_ops.write_log_dirty(vcpu, l2_gpa); - - return 0; -} - bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn) { diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index bd70ece1ef8b..6886be325e1d 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, !(pte & PT_GUEST_DIRTY_MASK)) { trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); #if PTTYPE == PTTYPE_EPT - if (kvm_arch_write_log_dirty(vcpu, addr)) + if (kvm_x86_ops.write_log_dirty(vcpu, addr)) return -EINVAL; #endif pte |= PT_GUEST_DIRTY_MASK; -- cgit v1.2.3 From 2f1d48aae2961b68922a653587f359c245569ccb Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 22 Jun 2020 14:58:31 -0700 Subject: KVM: nVMX: WARN if PML emulation helper is invoked outside of nested guest WARN if vmx_write_pml_buffer() is called outside of guest mode instead of silently ignoring the condition. The only caller is nested EPT's ept_update_accessed_dirty_bits(), which should only be reachable when L2 is active. Signed-off-by: Sean Christopherson Message-Id: <20200622215832.22090-4-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 13745f2a5ecd..2c9594898fbc 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7485,32 +7485,33 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t dst; - if (is_guest_mode(vcpu)) { - WARN_ON_ONCE(vmx->nested.pml_full); + if (WARN_ON_ONCE(!is_guest_mode(vcpu))) + return 0; - /* - * Check if PML is enabled for the nested guest. - * Whether eptp bit 6 is set is already checked - * as part of A/D emulation. - */ - vmcs12 = get_vmcs12(vcpu); - if (!nested_cpu_has_pml(vmcs12)) - return 0; + if (WARN_ON_ONCE(vmx->nested.pml_full)) + return 1; - if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { - vmx->nested.pml_full = true; - return 1; - } + /* + * Check if PML is enabled for the nested guest. Whether eptp bit 6 is + * set is already checked as part of A/D emulation. + */ + vmcs12 = get_vmcs12(vcpu); + if (!nested_cpu_has_pml(vmcs12)) + return 0; + + if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { + vmx->nested.pml_full = true; + return 1; + } - gpa &= ~0xFFFull; - dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; + gpa &= ~0xFFFull; + dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; - if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, - offset_in_page(dst), sizeof(gpa))) - return 0; + if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, + offset_in_page(dst), sizeof(gpa))) + return 0; - vmcs12->guest_pml_index--; - } + vmcs12->guest_pml_index--; return 0; } -- cgit v1.2.3 From 02f5fb2e69f653ef09490d081ef65296a0cbf114 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 22 Jun 2020 14:58:32 -0700 Subject: KVM: x86/mmu: Make .write_log_dirty a nested operation Move .write_log_dirty() into kvm_x86_nested_ops to help differentiate it from the non-nested dirty log hooks. And because it's a nested-only operation. Signed-off-by: Sean Christopherson Message-Id: <20200622215832.22090-5-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- arch/x86/kvm/vmx/nested.c | 38 ++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 38 -------------------------------------- 3 files changed, 39 insertions(+), 39 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 6886be325e1d..7e370d8bd576 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, !(pte & PT_GUEST_DIRTY_MASK)) { trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); #if PTTYPE == PTTYPE_EPT - if (kvm_x86_ops.write_log_dirty(vcpu, addr)) + if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr)) return -EINVAL; #endif pte |= PT_GUEST_DIRTY_MASK; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index b26655104d4a..aeaac9febca4 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -3205,6 +3205,43 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) return true; } +static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) +{ + struct vmcs12 *vmcs12; + struct vcpu_vmx *vmx = to_vmx(vcpu); + gpa_t dst; + + if (WARN_ON_ONCE(!is_guest_mode(vcpu))) + return 0; + + if (WARN_ON_ONCE(vmx->nested.pml_full)) + return 1; + + /* + * Check if PML is enabled for the nested guest. Whether eptp bit 6 is + * set is already checked as part of A/D emulation. + */ + vmcs12 = get_vmcs12(vcpu); + if (!nested_cpu_has_pml(vmcs12)) + return 0; + + if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { + vmx->nested.pml_full = true; + return 1; + } + + gpa &= ~0xFFFull; + dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; + + if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, + offset_in_page(dst), sizeof(gpa))) + return 0; + + vmcs12->guest_pml_index--; + + return 0; +} + /* * Intel's VMX Instruction Reference specifies a common set of prerequisites * for running VMX instructions (except VMXON, whose prerequisites are @@ -6503,6 +6540,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = { .get_state = vmx_get_nested_state, .set_state = vmx_set_nested_state, .get_vmcs12_pages = nested_get_vmcs12_pages, + .write_log_dirty = nested_vmx_write_pml_buffer, .enable_evmcs = nested_enable_evmcs, .get_evmcs_version = nested_get_evmcs_version, }; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 2c9594898fbc..8411118e51a2 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7479,43 +7479,6 @@ static void vmx_flush_log_dirty(struct kvm *kvm) kvm_flush_pml_buffers(kvm); } -static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) -{ - struct vmcs12 *vmcs12; - struct vcpu_vmx *vmx = to_vmx(vcpu); - gpa_t dst; - - if (WARN_ON_ONCE(!is_guest_mode(vcpu))) - return 0; - - if (WARN_ON_ONCE(vmx->nested.pml_full)) - return 1; - - /* - * Check if PML is enabled for the nested guest. Whether eptp bit 6 is - * set is already checked as part of A/D emulation. - */ - vmcs12 = get_vmcs12(vcpu); - if (!nested_cpu_has_pml(vmcs12)) - return 0; - - if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { - vmx->nested.pml_full = true; - return 1; - } - - gpa &= ~0xFFFull; - dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; - - if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, - offset_in_page(dst), sizeof(gpa))) - return 0; - - vmcs12->guest_pml_index--; - - return 0; -} - static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t offset, unsigned long mask) @@ -7944,7 +7907,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .slot_disable_log_dirty = vmx_slot_disable_log_dirty, .flush_log_dirty = vmx_flush_log_dirty, .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, - .write_log_dirty = vmx_write_pml_buffer, .pre_block = vmx_pre_block, .post_block = vmx_post_block, -- cgit v1.2.3 From 6abe9c1386e5c86f360e4e8fde8eec95eee77aa3 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 22 Jun 2020 18:04:41 -0400 Subject: KVM: X86: Move ignore_msrs handling upper the stack MSR accesses can be one of: (1) KVM internal access, (2) userspace access (e.g., via KVM_SET_MSRS ioctl), (3) guest access. The ignore_msrs was previously handled by kvm_get_msr_common() and kvm_set_msr_common(), which is the bottom of the msr access stack. It's working in most cases, however it could dump unwanted warning messages to dmesg even if kvm get/set the msrs internally when calling __kvm_set_msr() or __kvm_get_msr() (e.g. kvm_cpuid()). Ideally we only want to trap cases (2) or (3), but not (1) above. To achieve this, move the ignore_msrs handling upper until the callers of __kvm_get_msr() and __kvm_set_msr(). To identify the "msr missing" event, a new return value (KVM_MSR_RET_INVALID==2) is used for that. Signed-off-by: Peter Xu Message-Id: <20200622220442.21998-2-peterx@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 80 ++++++++++++++++++++++++++++++++++++------------------ arch/x86/kvm/x86.h | 2 ++ 2 files changed, 56 insertions(+), 26 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 88c593f83b28..e7c1567b9501 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -243,6 +243,29 @@ static struct kmem_cache *x86_fpu_cache; static struct kmem_cache *x86_emulator_cache; +/* + * When called, it means the previous get/set msr reached an invalid msr. + * Return 0 if we want to ignore/silent this failed msr access, or 1 if we want + * to fail the caller. + */ +static int kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr, + u64 data, bool write) +{ + const char *op = write ? "wrmsr" : "rdmsr"; + + if (ignore_msrs) { + if (report_ignored_msrs) + vcpu_unimpl(vcpu, "ignored %s: 0x%x data 0x%llx\n", + op, msr, data); + /* Mask the error */ + return 0; + } else { + vcpu_debug_ratelimited(vcpu, "unhandled %s: 0x%x data 0x%llx\n", + op, msr, data); + return 1; + } +} + static struct kmem_cache *kvm_alloc_emulator_cache(void) { unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src); @@ -1516,6 +1539,17 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, return kvm_x86_ops.set_msr(vcpu, &msr); } +static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, + u32 index, u64 data, bool host_initiated) +{ + int ret = __kvm_set_msr(vcpu, index, data, host_initiated); + + if (ret == KVM_MSR_RET_INVALID) + ret = kvm_msr_ignored_check(vcpu, index, data, true); + + return ret; +} + /* * Read the MSR specified by @index into @data. Select MSR specific fault * checks are bypassed if @host_initiated is %true. @@ -1537,15 +1571,29 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, return ret; } +static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, + u32 index, u64 *data, bool host_initiated) +{ + int ret = __kvm_get_msr(vcpu, index, data, host_initiated); + + if (ret == KVM_MSR_RET_INVALID) { + /* Unconditionally clear *data for simplicity */ + *data = 0; + ret = kvm_msr_ignored_check(vcpu, index, 0, false); + } + + return ret; +} + int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) { - return __kvm_get_msr(vcpu, index, data, false); + return kvm_get_msr_ignored_check(vcpu, index, data, false); } EXPORT_SYMBOL_GPL(kvm_get_msr); int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) { - return __kvm_set_msr(vcpu, index, data, false); + return kvm_set_msr_ignored_check(vcpu, index, data, false); } EXPORT_SYMBOL_GPL(kvm_set_msr); @@ -1665,12 +1713,12 @@ EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); */ static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { - return __kvm_get_msr(vcpu, index, data, true); + return kvm_get_msr_ignored_check(vcpu, index, data, true); } static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { - return __kvm_set_msr(vcpu, index, *data, true); + return kvm_set_msr_ignored_check(vcpu, index, *data, true); } #ifdef CONFIG_X86_64 @@ -3066,17 +3114,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return xen_hvm_config(vcpu, data); if (kvm_pmu_is_valid_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); - if (!ignore_msrs) { - vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n", - msr, data); - return 1; - } else { - if (report_ignored_msrs) - vcpu_unimpl(vcpu, - "ignored wrmsr: 0x%x data 0x%llx\n", - msr, data); - break; - } + return KVM_MSR_RET_INVALID; } return 0; } @@ -3331,17 +3369,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) default: if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) return kvm_pmu_get_msr(vcpu, msr_info); - if (!ignore_msrs) { - vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n", - msr_info->index); - return 1; - } else { - if (report_ignored_msrs) - vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", - msr_info->index); - msr_info->data = 0; - } - break; + return KVM_MSR_RET_INVALID; } return 0; } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 6eb62e97e59f..8d42dd0cf81e 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -366,4 +366,6 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu); bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu); +#define KVM_MSR_RET_INVALID 2 + #endif -- cgit v1.2.3 From 12bc2132b15e0a969b3f455d90a5f215ef239eff Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 22 Jun 2020 18:04:42 -0400 Subject: KVM: X86: Do the same ignore_msrs check for feature msrs Logically the ignore_msrs and report_ignored_msrs should also apply to feature MSRs. Add them in. Signed-off-by: Peter Xu Message-Id: <20200622220442.21998-3-peterx@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/vmx/vmx.c | 2 +- arch/x86/kvm/x86.c | 10 ++++++++-- 3 files changed, 10 insertions(+), 4 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c0da4dd78ac5..70a824806274 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2359,7 +2359,7 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr) msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; break; default: - return 1; + return KVM_MSR_RET_INVALID; } return 0; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 8411118e51a2..608e992e8db9 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1815,7 +1815,7 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr) msr->data = vmx_get_perf_capabilities(); return 0; default: - return 1; + return KVM_MSR_RET_INVALID; } } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e7c1567b9501..ec9aba133c1c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1412,8 +1412,7 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr) rdmsrl_safe(msr->index, &msr->data); break; default: - if (kvm_x86_ops.get_msr_feature(msr)) - return 1; + return kvm_x86_ops.get_msr_feature(msr); } return 0; } @@ -1425,6 +1424,13 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) msr.index = index; r = kvm_get_msr_feature(&msr); + + if (r == KVM_MSR_RET_INVALID) { + /* Unconditionally clear the output for simplicity */ + *data = 0; + r = kvm_msr_ignored_check(vcpu, index, 0, false); + } + if (r) return r; -- cgit v1.2.3 From f5f6145e41d39c7fd04a17c3b2596c7abe933f10 Mon Sep 17 00:00:00 2001 From: Krish Sadhukhan Date: Fri, 22 May 2020 18:19:51 -0400 Subject: KVM: x86: Move the check for upper 32 reserved bits of DR6 to separate function Signed-off-by: Krish Sadhukhan Message-Id: <20200522221954.32131-2-krish.sadhukhan@oracle.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 2 +- arch/x86/kvm/x86.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ec9aba133c1c..82f457f0e7e0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1133,7 +1133,7 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) case 4: /* fall through */ case 6: - if (val & 0xffffffff00000000ULL) + if (!kvm_dr6_valid(val)) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); break; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 8d42dd0cf81e..31928bf18ba5 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -360,6 +360,11 @@ static inline bool kvm_dr7_valid(u64 data) /* Bits [63:32] are reserved */ return !(data >> 32); } +static inline bool kvm_dr6_valid(u64 data) +{ + /* Bits [63:32] are reserved */ + return !(data >> 32); +} void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From 1aef8161b38a531895a8bffad0e9fb1445ca91f7 Mon Sep 17 00:00:00 2001 From: Krish Sadhukhan Date: Fri, 22 May 2020 18:19:52 -0400 Subject: KVM: nSVM: Check that DR6[63:32] and DR7[64:32] are not set on vmrun of nested guests According to section "Canonicalization and Consistency Checks" in APM vol. 2 the following guest state is illegal: "DR6[63:32] are not zero." "DR7[63:32] are not zero." "Any MBZ bit of EFER is set." Signed-off-by: Krish Sadhukhan Message-Id: <20200522221954.32131-3-krish.sadhukhan@oracle.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 6bceafb19108..e4ef980981af 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -231,6 +231,9 @@ static bool nested_vmcb_checks(struct vmcb *vmcb) (vmcb->save.cr0 & X86_CR0_NW)) return false; + if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7)) + return false; + return nested_vmcb_check_controls(&vmcb->control); } -- cgit v1.2.3 From 78824fabc72e5e37d51e6e567fde70a4fc41a6d7 Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Mon, 25 May 2020 23:22:06 -0700 Subject: KVM: SVM: fix svn_pin_memory()'s use of get_user_pages_fast() There are two problems in svn_pin_memory(): 1) The return value of get_user_pages_fast() is stored in an unsigned long, although the declared return value is of type int. This will not cause any symptoms, but it is misleading. Fix this by changing the type of npinned to "int". 2) The number of pages passed into get_user_pages_fast() is stored in an unsigned long, even though get_user_pages_fast() accepts an int. This means that it is possible to silently overflow the number of pages. Fix this by adding a WARN_ON_ONCE() and an early error return. The npages variable is left as an unsigned long for convenience in checking for overflow. Fixes: 89c505809052 ("KVM: SVM: Add support for KVM_SEV_LAUNCH_UPDATE_DATA command") Cc: Ingo Molnar Cc: Borislav Petkov Cc: Thomas Gleixner Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Vitaly Kuznetsov Cc: Wanpeng Li Cc: Jim Mattson Cc: Joerg Roedel Cc: H. Peter Anvin Cc: x86@kernel.org Cc: kvm@vger.kernel.org Signed-off-by: John Hubbard Message-Id: <20200526062207.1360225-2-jhubbard@nvidia.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 5573a97f1520..ceeee4bb6150 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -313,7 +313,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, int write) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - unsigned long npages, npinned, size; + unsigned long npages, size; + int npinned; unsigned long locked, lock_limit; struct page **pages; unsigned long first, last; @@ -333,6 +334,9 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, return NULL; } + if (WARN_ON_ONCE(npages > INT_MAX)) + return NULL; + /* Avoid using vmalloc for smaller buffers. */ size = npages * sizeof(struct page *); if (size > PAGE_SIZE) -- cgit v1.2.3 From dc42c8ae0a7762378102dd043779d19331804cce Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Mon, 25 May 2020 23:22:07 -0700 Subject: KVM: SVM: convert get_user_pages() --> pin_user_pages() This code was using get_user_pages*(), in a "Case 2" scenario (DMA/RDMA), using the categorization from [1]. That means that it's time to convert the get_user_pages*() + put_page() calls to pin_user_pages*() + unpin_user_pages() calls. There is some helpful background in [2]: basically, this is a small part of fixing a long-standing disconnect between pinning pages, and file systems' use of those pages. [1] Documentation/core-api/pin_user_pages.rst [2] "Explicit pinning of user-space pages": https://lwn.net/Articles/807108/ Cc: Ingo Molnar Cc: Borislav Petkov Cc: Thomas Gleixner Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Vitaly Kuznetsov Cc: Wanpeng Li Cc: Jim Mattson Cc: Joerg Roedel Cc: H. Peter Anvin Cc: x86@kernel.org Cc: kvm@vger.kernel.org Signed-off-by: John Hubbard Message-Id: <20200526062207.1360225-3-jhubbard@nvidia.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index ceeee4bb6150..a893624b9275 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -348,7 +348,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, return NULL; /* Pin the user virtual address. */ - npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); + npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); if (npinned != npages) { pr_err("SEV: Failure locking %lu pages.\n", npages); goto err; @@ -361,7 +361,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, err: if (npinned > 0) - release_pages(pages, npinned); + unpin_user_pages(pages, npinned); kvfree(pages); return NULL; @@ -372,7 +372,7 @@ static void sev_unpin_memory(struct kvm *kvm, struct page **pages, { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - release_pages(pages, npages); + unpin_user_pages(pages, npages); kvfree(pages); sev->pages_locked -= npages; } -- cgit v1.2.3 From a8d908b5873cad212b0f74569f5a23b804e694ce Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 23 Jun 2020 05:12:24 -0400 Subject: KVM: x86: report sev_pin_memory errors with PTR_ERR Callers of sev_pin_memory() treat NULL differently: sev_launch_secret()/svm_register_enc_region() return -ENOMEM sev_dbg_crypt() returns -EFAULT. Switching to ERR_PTR() preserves the error and enables cleaner reporting of different kinds of failures. Suggested-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index a893624b9275..2b4916ffa906 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -320,7 +320,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, unsigned long first, last; if (ulen == 0 || uaddr + ulen < uaddr) - return NULL; + return ERR_PTR(-EINVAL); /* Calculate number of pages. */ first = (uaddr & PAGE_MASK) >> PAGE_SHIFT; @@ -331,11 +331,11 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit); - return NULL; + return ERR_PTR(-ENOMEM); } if (WARN_ON_ONCE(npages > INT_MAX)) - return NULL; + return ERR_PTR(-EINVAL); /* Avoid using vmalloc for smaller buffers. */ size = npages * sizeof(struct page *); @@ -345,7 +345,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, pages = kmalloc(size, GFP_KERNEL_ACCOUNT); if (!pages) - return NULL; + return ERR_PTR(-ENOMEM); /* Pin the user virtual address. */ npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); @@ -360,11 +360,13 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, return pages; err: - if (npinned > 0) + if (npinned > 0) { unpin_user_pages(pages, npinned); + npinned = -ENOMEM; + } kvfree(pages); - return NULL; + return ERR_PTR(npinned); } static void sev_unpin_memory(struct kvm *kvm, struct page **pages, @@ -864,8 +866,8 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) return -EFAULT; pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1); - if (!pages) - return -ENOMEM; + if (IS_ERR(pages)) + return PTR_ERR(pages); /* * The secret must be copied into contiguous memory region, lets verify @@ -991,8 +993,8 @@ int svm_register_enc_region(struct kvm *kvm, return -ENOMEM; region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); - if (!region->pages) { - ret = -ENOMEM; + if (IS_ERR(region->pages)) { + ret = PTR_ERR(region->pages); goto e_free; } -- cgit v1.2.3 From 73cd6e5f7f0b8c07330c48bfec4aaa84a6921cf4 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 3 Jun 2020 16:56:18 -0700 Subject: kvm: svm: Prefer vcpu->cpu to raw_smp_processor_id() The current logical processor id is cached in vcpu->cpu. Use it instead of raw_smp_processor_id() when a kvm_vcpu struct is available. Signed-off-by: Jim Mattson Reviewed-by: Oliver Upton Message-Id: <20200603235623.245638-2-jmattson@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 70a824806274..fd4a9188902c 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2992,21 +2992,18 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) static void reload_tss(struct kvm_vcpu *vcpu) { - int cpu = raw_smp_processor_id(); + struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); - struct svm_cpu_data *sd = per_cpu(svm_data, cpu); sd->tss_desc->type = 9; /* available 32/64-bit TSS */ load_TR_desc(); } static void pre_svm_run(struct vcpu_svm *svm) { - int cpu = raw_smp_processor_id(); - - struct svm_cpu_data *sd = per_cpu(svm_data, cpu); + struct svm_cpu_data *sd = per_cpu(svm_data, svm->vcpu.cpu); if (sev_guest(svm->vcpu.kvm)) - return pre_sev_run(svm, cpu); + return pre_sev_run(svm, svm->vcpu.cpu); /* FIXME: handle wraparound of asid_generation */ if (svm->asid_generation != sd->asid_generation) -- cgit v1.2.3 From 242636343c246e338b8ea317e32dbf4ed47edc65 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 3 Jun 2020 16:56:19 -0700 Subject: kvm: svm: Always set svm->last_cpu on VMRUN Previously, this field was only set when using SEV. Set it for all vCPU configurations, so that it can be communicated to userspace for diagnosing potential hardware errors. Signed-off-by: Jim Mattson Reviewed-by: Oliver Upton Reviewed-by: Peter Shier Message-Id: <20200603235623.245638-3-jmattson@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 1 - arch/x86/kvm/svm/svm.c | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 2b4916ffa906..a8444c74430e 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1189,7 +1189,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) svm->last_cpu == cpu) return; - svm->last_cpu = cpu; sd->sev_vmcbs[asid] = svm->vmcb; svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; mark_dirty(svm->vmcb, VMCB_ASID); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index fd4a9188902c..24b7f321874f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3396,6 +3396,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) */ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); + svm->last_cpu = vcpu->cpu; __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); #ifdef CONFIG_X86_64 -- cgit v1.2.3 From 80a1684c0161088710398d84a7cdd683c5d88228 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 3 Jun 2020 16:56:20 -0700 Subject: kvm: vmx: Add last_cpu to struct vcpu_vmx As we already do in svm, record the last logical processor on which a vCPU has run, so that it can be communicated to userspace for potential hardware errors. Signed-off-by: Jim Mattson Reviewed-by: Oliver Upton Reviewed-by: Peter Shier Message-Id: <20200603235623.245638-4-jmattson@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 1 + arch/x86/kvm/vmx/vmx.h | 3 +++ 2 files changed, 4 insertions(+) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 608e992e8db9..4d8f12c0a5c6 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6734,6 +6734,7 @@ reenter_guest: if (vcpu->arch.cr2 != read_cr2()) write_cr2(vcpu->arch.cr2); + vmx->last_cpu = vcpu->cpu; vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, vmx->loaded_vmcs->launched); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 639798e4a6ca..f8f9e214d285 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -300,6 +300,9 @@ struct vcpu_vmx { u64 ept_pointer; struct pt_desc pt_desc; + + /* which host CPU was used for running this vcpu */ + unsigned int last_cpu; }; enum ept_pointers_status { -- cgit v1.2.3 From 1aa561b1a4c0ae2a9a9b9c21a84b5ca66b4775d8 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 3 Jun 2020 16:56:21 -0700 Subject: kvm: x86: Add "last CPU" to some KVM_EXIT information More often than not, a failed VM-entry in an x86 production environment is induced by a defective CPU. To help identify the bad hardware, include the id of the last logical CPU to run a vCPU in the information provided to userspace on a KVM exit for failed VM-entry or for KVM internal errors not associated with emulation. The presence of this additional information is indicated by a new capability, KVM_CAP_LAST_CPU. Signed-off-by: Jim Mattson Reviewed-by: Oliver Upton Reviewed-by: Peter Shier Message-Id: <20200603235623.245638-5-jmattson@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 4 +++- arch/x86/kvm/vmx/vmx.c | 10 ++++++++-- arch/x86/kvm/x86.c | 1 + 3 files changed, 12 insertions(+), 3 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 24b7f321874f..8ecd46f2cb1e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2947,6 +2947,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_run->fail_entry.hardware_entry_failure_reason = svm->vmcb->control.exit_code; + kvm_run->fail_entry.cpu = svm->last_cpu; dump_vmcb(vcpu); return 0; } @@ -2970,8 +2971,9 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; - vcpu->run->internal.ndata = 1; + vcpu->run->internal.ndata = 2; vcpu->run->internal.data[0] = exit_code; + vcpu->run->internal.data[1] = svm->last_cpu; return 0; } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4d8f12c0a5c6..b52bcebfa094 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4781,10 +4781,11 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; - vcpu->run->internal.ndata = 3; + vcpu->run->internal.ndata = 4; vcpu->run->internal.data[0] = vect_info; vcpu->run->internal.data[1] = intr_info; vcpu->run->internal.data[2] = error_code; + vcpu->run->internal.data[3] = vmx->last_cpu; return 0; } @@ -6006,6 +6007,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = exit_reason; + vcpu->run->fail_entry.cpu = vmx->last_cpu; return 0; } @@ -6014,6 +6016,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = vmcs_read32(VM_INSTRUCTION_ERROR); + vcpu->run->fail_entry.cpu = vmx->last_cpu; return 0; } @@ -6040,6 +6043,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) vcpu->run->internal.data[3] = vmcs_read64(GUEST_PHYSICAL_ADDRESS); } + vcpu->run->internal.data[vcpu->run->internal.ndata++] = + vmx->last_cpu; return 0; } @@ -6095,8 +6100,9 @@ unexpected_vmexit: vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; - vcpu->run->internal.ndata = 1; + vcpu->run->internal.ndata = 2; vcpu->run->internal.data[0] = exit_reason; + vcpu->run->internal.data[1] = vmx->last_cpu; return 0; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 82f457f0e7e0..1a0fad1018f9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3510,6 +3510,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MSR_PLATFORM_INFO: case KVM_CAP_EXCEPTION_PAYLOAD: case KVM_CAP_SET_GUEST_DEBUG: + case KVM_CAP_LAST_CPU: r = 1; break; case KVM_CAP_SYNC_REGS: -- cgit v1.2.3 From 8a14fe4f0c54f27c89389d13c4a1e467a88c35ea Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 3 Jun 2020 16:56:22 -0700 Subject: kvm: x86: Move last_cpu into kvm_vcpu_arch as last_vmentry_cpu Both the vcpu_vmx structure and the vcpu_svm structure have a 'last_cpu' field. Move the common field into the kvm_vcpu_arch structure. For clarity, rename it to 'last_vmentry_cpu.' Suggested-by: Sean Christopherson Signed-off-by: Jim Mattson Reviewed-by: Oliver Upton Reviewed-by: Peter Shier Message-Id: <20200603235623.245638-6-jmattson@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 2 +- arch/x86/kvm/svm/svm.c | 6 +++--- arch/x86/kvm/svm/svm.h | 3 --- arch/x86/kvm/vmx/vmx.c | 12 ++++++------ arch/x86/kvm/vmx/vmx.h | 3 --- 5 files changed, 10 insertions(+), 16 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index a8444c74430e..e09aef93e92b 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1186,7 +1186,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) * 2) or this VMCB was executed on different host CPU in previous VMRUNs. */ if (sd->sev_vmcbs[asid] == svm->vmcb && - svm->last_cpu == cpu) + svm->vcpu.arch.last_vmentry_cpu == cpu) return; sd->sev_vmcbs[asid] = svm->vmcb; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8ecd46f2cb1e..c55ebf76ec6d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2947,7 +2947,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_run->fail_entry.hardware_entry_failure_reason = svm->vmcb->control.exit_code; - kvm_run->fail_entry.cpu = svm->last_cpu; + kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; dump_vmcb(vcpu); return 0; } @@ -2973,7 +2973,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; vcpu->run->internal.ndata = 2; vcpu->run->internal.data[0] = exit_code; - vcpu->run->internal.data[1] = svm->last_cpu; + vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; return 0; } @@ -3398,7 +3398,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) */ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); - svm->last_cpu = vcpu->cpu; + vcpu->arch.last_vmentry_cpu = vcpu->cpu; __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); #ifdef CONFIG_X86_64 diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 6ac4c00a5d82..613356f85da6 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -158,9 +158,6 @@ struct vcpu_svm { */ struct list_head ir_list; spinlock_t ir_list_lock; - - /* which host CPU was used for running this vcpu */ - unsigned int last_cpu; }; struct svm_cpu_data { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b52bcebfa094..d9ee31b0679b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4785,7 +4785,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) vcpu->run->internal.data[0] = vect_info; vcpu->run->internal.data[1] = intr_info; vcpu->run->internal.data[2] = error_code; - vcpu->run->internal.data[3] = vmx->last_cpu; + vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu; return 0; } @@ -6007,7 +6007,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = exit_reason; - vcpu->run->fail_entry.cpu = vmx->last_cpu; + vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; return 0; } @@ -6016,7 +6016,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = vmcs_read32(VM_INSTRUCTION_ERROR); - vcpu->run->fail_entry.cpu = vmx->last_cpu; + vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; return 0; } @@ -6044,7 +6044,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) vmcs_read64(GUEST_PHYSICAL_ADDRESS); } vcpu->run->internal.data[vcpu->run->internal.ndata++] = - vmx->last_cpu; + vcpu->arch.last_vmentry_cpu; return 0; } @@ -6102,7 +6102,7 @@ unexpected_vmexit: KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; vcpu->run->internal.ndata = 2; vcpu->run->internal.data[0] = exit_reason; - vcpu->run->internal.data[1] = vmx->last_cpu; + vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; return 0; } @@ -6740,7 +6740,7 @@ reenter_guest: if (vcpu->arch.cr2 != read_cr2()) write_cr2(vcpu->arch.cr2); - vmx->last_cpu = vcpu->cpu; + vcpu->arch.last_vmentry_cpu = vcpu->cpu; vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, vmx->loaded_vmcs->launched); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index f8f9e214d285..639798e4a6ca 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -300,9 +300,6 @@ struct vcpu_vmx { u64 ept_pointer; struct pt_desc pt_desc; - - /* which host CPU was used for running this vcpu */ - unsigned int last_cpu; }; enum ept_pointers_status { -- cgit v1.2.3 From c967118ddb21191178c0e0080fdc41f5d85ca1d1 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 3 Jun 2020 16:56:23 -0700 Subject: kvm: x86: Set last_vmentry_cpu in vcpu_enter_guest Since this field is now in kvm_vcpu_arch, clean things up a little by setting it in vendor-agnostic code: vcpu_enter_guest. Note that it must be set after the call to kvm_x86_ops.run(), since it can't be updated before pre_sev_run(). Suggested-by: Sean Christopherson Signed-off-by: Jim Mattson Reviewed-by: Oliver Upton Reviewed-by: Peter Shier Message-Id: <20200603235623.245638-7-jmattson@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 1 - arch/x86/kvm/vmx/vmx.c | 1 - arch/x86/kvm/x86.c | 1 + 3 files changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c55ebf76ec6d..38104f47cd25 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3398,7 +3398,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) */ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); - vcpu->arch.last_vmentry_cpu = vcpu->cpu; __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); #ifdef CONFIG_X86_64 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d9ee31b0679b..1de5dac952b6 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6740,7 +6740,6 @@ reenter_guest: if (vcpu->arch.cr2 != read_cr2()) write_cr2(vcpu->arch.cr2); - vcpu->arch.last_vmentry_cpu = vcpu->cpu; vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, vmx->loaded_vmcs->launched); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1a0fad1018f9..bd8690ca7b6b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8583,6 +8583,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (hw_breakpoint_active()) hw_breakpoint_restore(); + vcpu->arch.last_vmentry_cpu = vcpu->cpu; vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); vcpu->mode = OUTSIDE_GUEST_MODE; -- cgit v1.2.3 From b2656e4d8b29a25ea26ae92d14694904274e63b0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 8 Jun 2020 18:56:07 -0700 Subject: KVM: nVMX: Wrap VM-Fail valid path in generic VM-Fail helper Add nested_vmx_fail() to wrap VM-Fail paths that _may_ result in VM-Fail Valid to make it clear at the call sites that the Valid flavor isn't guaranteed. Suggested-by: Vitaly Kuznetsov Signed-off-by: Sean Christopherson Message-Id: <20200609015607.6994-1-sean.j.christopherson@intel.com> Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 77 ++++++++++++++++++++++------------------------- 1 file changed, 36 insertions(+), 41 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index aeaac9febca4..7693d41a2446 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -171,15 +171,6 @@ static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) static int nested_vmx_failValid(struct kvm_vcpu *vcpu, u32 vm_instruction_error) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - - /* - * failValid writes the error number to the current VMCS, which - * can't be done if there isn't a current VMCS. - */ - if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) - return nested_vmx_failInvalid(vcpu); - vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_SF | X86_EFLAGS_OF)) @@ -192,6 +183,20 @@ static int nested_vmx_failValid(struct kvm_vcpu *vcpu, return kvm_skip_emulated_instruction(vcpu); } +static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * failValid writes the error number to the current VMCS, which + * can't be done if there isn't a current VMCS. + */ + if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) + return nested_vmx_failInvalid(vcpu); + + return nested_vmx_failValid(vcpu, vm_instruction_error); +} + static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) { /* TODO: not to reset guest simply here. */ @@ -3493,19 +3498,18 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) * when using the merged vmcs02. */ if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) - return nested_vmx_failValid(vcpu, - VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); + return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); if (vmcs12->launch_state == launch) - return nested_vmx_failValid(vcpu, + return nested_vmx_fail(vcpu, launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS : VMXERR_VMRESUME_NONLAUNCHED_VMCS); if (nested_vmx_check_controls(vcpu, vmcs12)) - return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); if (nested_vmx_check_host_state(vcpu, vmcs12)) - return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); + return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); /* * We're finally done with prerequisite checking, and can start with @@ -3554,7 +3558,7 @@ vmentry_failed: if (status == NVMX_VMENTRY_VMEXIT) return 1; WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); - return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); } /* @@ -4497,7 +4501,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, * flag and the VM-instruction error field of the VMCS * accordingly, and skip the emulated instruction. */ - (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); /* * Restore L1's host state to KVM's software model. We're here @@ -4797,8 +4801,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) } if (vmx->nested.vmxon) - return nested_vmx_failValid(vcpu, - VMXERR_VMXON_IN_VMX_ROOT_OPERATION); + return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { @@ -4889,12 +4892,10 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) return r; if (!page_address_valid(vcpu, vmptr)) - return nested_vmx_failValid(vcpu, - VMXERR_VMCLEAR_INVALID_ADDRESS); + return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); if (vmptr == vmx->nested.vmxon_ptr) - return nested_vmx_failValid(vcpu, - VMXERR_VMCLEAR_VMXON_POINTER); + return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); /* * When Enlightened VMEntry is enabled on the calling CPU we treat @@ -4964,8 +4965,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) offset = vmcs_field_to_offset(field); if (offset < 0) - return nested_vmx_failValid(vcpu, - VMXERR_UNSUPPORTED_VMCS_COMPONENT); + return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); @@ -5068,8 +5068,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) offset = vmcs_field_to_offset(field); if (offset < 0) - return nested_vmx_failValid(vcpu, - VMXERR_UNSUPPORTED_VMCS_COMPONENT); + return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); /* * If the vCPU supports "VMWRITE to any supported field in the @@ -5077,8 +5076,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) */ if (vmcs_field_readonly(field) && !nested_cpu_has_vmwrite_any_field(vcpu)) - return nested_vmx_failValid(vcpu, - VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); + return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); /* * Ensure vmcs12 is up-to-date before any VMWRITE that dirties @@ -5153,12 +5151,10 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) return r; if (!page_address_valid(vcpu, vmptr)) - return nested_vmx_failValid(vcpu, - VMXERR_VMPTRLD_INVALID_ADDRESS); + return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); if (vmptr == vmx->nested.vmxon_ptr) - return nested_vmx_failValid(vcpu, - VMXERR_VMPTRLD_VMXON_POINTER); + return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); /* Forbid normal VMPTRLD if Enlightened version was used */ if (vmx->nested.hv_evmcs) @@ -5175,7 +5171,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) * given physical address won't match the required * VMCS12_REVISION identifier. */ - return nested_vmx_failValid(vcpu, + return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); } @@ -5185,7 +5181,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) (new_vmcs12->hdr.shadow_vmcs && !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { kvm_vcpu_unmap(vcpu, &map, false); - return nested_vmx_failValid(vcpu, + return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); } @@ -5270,8 +5266,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; if (type >= 32 || !(types & (1 << type))) - return nested_vmx_failValid(vcpu, - VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); + return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); /* According to the Intel VMX instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) @@ -5292,7 +5287,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) switch (type) { case VMX_EPT_EXTENT_CONTEXT: if (!nested_vmx_check_eptp(vcpu, operand.eptp)) - return nested_vmx_failValid(vcpu, + return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); roots_to_free = 0; @@ -5352,7 +5347,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; if (type >= 32 || !(types & (1 << type))) - return nested_vmx_failValid(vcpu, + return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); /* according to the intel vmx instruction reference, the memory @@ -5366,7 +5361,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) return vmx_handle_memory_failure(vcpu, r, &e); if (operand.vpid >> 16) - return nested_vmx_failValid(vcpu, + return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); vpid02 = nested_get_vpid02(vcpu); @@ -5374,14 +5369,14 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: if (!operand.vpid || is_noncanonical_address(operand.gla, vcpu)) - return nested_vmx_failValid(vcpu, + return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); vpid_sync_vcpu_addr(vpid02, operand.gla); break; case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: if (!operand.vpid) - return nested_vmx_failValid(vcpu, + return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); vpid_sync_context(vpid02); break; -- cgit v1.2.3 From 7693b3eb537947ecffe302732f98e6f561befb70 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 25 Jun 2020 10:03:22 +0200 Subject: KVM: SVM: Rename struct nested_state to svm_nested_state Renaming is only needed in the svm.h header file. No functional changes. Signed-off-by: Joerg Roedel Message-Id: <20200625080325.28439-2-joro@8bytes.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 613356f85da6..e4251c5f80eb 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -81,7 +81,7 @@ struct kvm_svm { struct kvm_vcpu; -struct nested_state { +struct svm_nested_state { struct vmcb *hsave; u64 hsave_msr; u64 vm_cr_msr; @@ -133,7 +133,7 @@ struct vcpu_svm { ulong nmi_iret_rip; - struct nested_state nested; + struct svm_nested_state nested; bool nmi_singlestep; u64 nmi_singlestep_guest_rflags; -- cgit v1.2.3 From 06e7852c0ffb30bb7cac1686db2f5d6458039b44 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 25 Jun 2020 10:03:23 +0200 Subject: KVM: SVM: Add vmcb_ prefix to mark_*() functions Make it more clear what data structure these functions operate on. No functional changes. Signed-off-by: Joerg Roedel Message-Id: <20200625080325.28439-3-joro@8bytes.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/avic.c | 2 +- arch/x86/kvm/svm/nested.c | 6 +++--- arch/x86/kvm/svm/sev.c | 2 +- arch/x86/kvm/svm/svm.c | 44 ++++++++++++++++++++++---------------------- arch/x86/kvm/svm/svm.h | 8 ++++---- 5 files changed, 31 insertions(+), 31 deletions(-) (limited to 'arch/x86/kvm') diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index e80daa98682f..ac830cd50830 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -665,7 +665,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) } else { vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK; } - mark_dirty(vmcb, VMCB_AVIC); + vmcb_mark_dirty(vmcb, VMCB_AVIC); svm_set_pi_irte_mode(vcpu, activated); } diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index e4ef980981af..426a7ec2525f 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -106,7 +106,7 @@ void recalc_intercepts(struct vcpu_svm *svm) { struct vmcb_control_area *c, *h, *g; - mark_dirty(svm->vmcb, VMCB_INTERCEPTS); + vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); if (!is_guest_mode(&svm->vcpu)) return; @@ -378,7 +378,7 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm) */ recalc_intercepts(svm); - mark_all_dirty(svm->vmcb); + vmcb_mark_all_dirty(svm->vmcb); } void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, @@ -601,7 +601,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm) svm->vmcb->save.cpl = 0; svm->vmcb->control.exit_int_info = 0; - mark_all_dirty(svm->vmcb); + vmcb_mark_all_dirty(svm->vmcb); trace_kvm_nested_vmexit_inject(nested_vmcb->control.exit_code, nested_vmcb->control.exit_info_1, diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index e09aef93e92b..f7f1f4ecf08e 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1191,5 +1191,5 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) sd->sev_vmcbs[asid] = svm->vmcb; svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; - mark_dirty(svm->vmcb, VMCB_ASID); + vmcb_mark_dirty(svm->vmcb, VMCB_ASID); } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 38104f47cd25..9eaa3247dcbe 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -282,7 +282,7 @@ void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) } svm->vmcb->save.efer = efer | EFER_SVME; - mark_dirty(svm->vmcb, VMCB_CR); + vmcb_mark_dirty(svm->vmcb, VMCB_CR); } static int is_external_interrupt(u32 info) @@ -713,7 +713,7 @@ static void grow_ple_window(struct kvm_vcpu *vcpu) pause_filter_count_max); if (control->pause_filter_count != old) { - mark_dirty(svm->vmcb, VMCB_INTERCEPTS); + vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); trace_kvm_ple_window_update(vcpu->vcpu_id, control->pause_filter_count, old); } @@ -731,7 +731,7 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu) pause_filter_count_shrink, pause_filter_count); if (control->pause_filter_count != old) { - mark_dirty(svm->vmcb, VMCB_INTERCEPTS); + vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); trace_kvm_ple_window_update(vcpu->vcpu_id, control->pause_filter_count, old); } @@ -966,7 +966,7 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) svm->vmcb->control.tsc_offset = offset + g_tsc_offset; - mark_dirty(svm->vmcb, VMCB_INTERCEPTS); + vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); return svm->vmcb->control.tsc_offset; } @@ -1123,7 +1123,7 @@ static void init_vmcb(struct vcpu_svm *svm) clr_exception_intercept(svm, UD_VECTOR); } - mark_all_dirty(svm->vmcb); + vmcb_mark_all_dirty(svm->vmcb); enable_gif(svm); @@ -1257,7 +1257,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (unlikely(cpu != vcpu->cpu)) { svm->asid_generation = 0; - mark_all_dirty(svm->vmcb); + vmcb_mark_all_dirty(svm->vmcb); } #ifdef CONFIG_X86_64 @@ -1367,7 +1367,7 @@ static void svm_set_vintr(struct vcpu_svm *svm) control->int_ctl &= ~V_INTR_PRIO_MASK; control->int_ctl |= V_IRQ_MASK | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); - mark_dirty(svm->vmcb, VMCB_INTR); + vmcb_mark_dirty(svm->vmcb, VMCB_INTR); } static void svm_clear_vintr(struct vcpu_svm *svm) @@ -1385,7 +1385,7 @@ static void svm_clear_vintr(struct vcpu_svm *svm) svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask; } - mark_dirty(svm->vmcb, VMCB_INTR); + vmcb_mark_dirty(svm->vmcb, VMCB_INTR); } static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) @@ -1503,7 +1503,7 @@ static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) svm->vmcb->save.idtr.limit = dt->size; svm->vmcb->save.idtr.base = dt->address ; - mark_dirty(svm->vmcb, VMCB_DT); + vmcb_mark_dirty(svm->vmcb, VMCB_DT); } static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) @@ -1520,7 +1520,7 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) svm->vmcb->save.gdtr.limit = dt->size; svm->vmcb->save.gdtr.base = dt->address ; - mark_dirty(svm->vmcb, VMCB_DT); + vmcb_mark_dirty(svm->vmcb, VMCB_DT); } static void update_cr0_intercept(struct vcpu_svm *svm) @@ -1531,7 +1531,7