summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-26 11:46:28 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-26 11:46:28 -0800
commit42b00f122cfbfed79fc29b0b3610f3abbb1e3864 (patch)
treec110a18c03e9ada45b6f3593843f0a06b36773dc /arch/x86/kvm
parent460023a5d1d2aa0f733b6708b2fae5ea9f9dfec0 (diff)
parenta0aea130afebcd091d5396d13f25b9da24c9144a (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "ARM: - selftests improvements - large PUD support for HugeTLB - single-stepping fixes - improved tracing - various timer and vGIC fixes x86: - Processor Tracing virtualization - STIBP support - some correctness fixes - refactorings and splitting of vmx.c - use the Hyper-V range TLB flush hypercall - reduce order of vcpu struct - WBNOINVD support - do not use -ftrace for __noclone functions - nested guest support for PAUSE filtering on AMD - more Hyper-V enlightenments (direct mode for synthetic timers) PPC: - nested VFIO s390: - bugfixes only this time" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (171 commits) KVM: x86: Add CPUID support for new instruction WBNOINVD kvm: selftests: ucall: fix exit mmio address guessing Revert "compiler-gcc: disable -ftracer for __noclone functions" KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines KVM: VMX: Explicitly reference RCX as the vmx_vcpu pointer in asm blobs KVM: x86: Use jmp to invoke kvm_spurious_fault() from .fixup MAINTAINERS: Add arch/x86/kvm sub-directories to existing KVM/x86 entry KVM/x86: Use SVM assembly instruction mnemonics instead of .byte streams KVM/MMU: Flush tlb directly in the kvm_zap_gfn_range() KVM/MMU: Flush tlb directly in kvm_set_pte_rmapp() KVM/MMU: Move tlb flush in kvm_set_pte_rmapp() to kvm_mmu_notifier_change_pte() KVM: Make kvm_set_spte_hva() return int KVM: Replace old tlb flush function with new one to flush a specified range. KVM/MMU: Add tlb flush with range helper function KVM/VMX: Add hv tlb range flush support x86/hyper-v: Add HvFlushGuestAddressList hypercall support KVM: Add tlb_remote_flush_with_range callback in kvm_x86_ops KVM: x86: Disable Intel PT when VMXON in L1 guest KVM: x86: Set intercept for Intel PT MSRs read/write KVM: x86: Implement Intel PT MSRs read/write emulation ...
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/cpuid.c31
-rw-r--r--arch/x86/kvm/hyperv.c305
-rw-r--r--arch/x86/kvm/hyperv.h4
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h2
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/mmu.c98
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
-rw-r--r--arch/x86/kvm/svm.c68
-rw-r--r--arch/x86/kvm/trace.h10
-rw-r--r--arch/x86/kvm/vmx.c15252
-rw-r--r--arch/x86/kvm/vmx/capabilities.h343
-rw-r--r--arch/x86/kvm/vmx/evmcs.c (renamed from arch/x86/kvm/vmx_evmcs.h)78
-rw-r--r--arch/x86/kvm/vmx/evmcs.h202
-rw-r--r--arch/x86/kvm/vmx/nested.c5721
-rw-r--r--arch/x86/kvm/vmx/nested.h282
-rw-r--r--arch/x86/kvm/vmx/ops.h285
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c (renamed from arch/x86/kvm/pmu_intel.c)0
-rw-r--r--arch/x86/kvm/vmx/vmcs.h136
-rw-r--r--arch/x86/kvm/vmx/vmcs12.c157
-rw-r--r--arch/x86/kvm/vmx/vmcs12.h462
-rw-r--r--arch/x86/kvm/vmx/vmcs_shadow_fields.h (renamed from arch/x86/kvm/vmx_shadow_fields.h)0
-rw-r--r--arch/x86/kvm/vmx/vmenter.S57
-rw-r--r--arch/x86/kvm/vmx/vmx.c7935
-rw-r--r--arch/x86/kvm/vmx/vmx.h519
-rw-r--r--arch/x86/kvm/x86.c161
26 files changed, 16669 insertions, 15449 deletions
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index dc4f2fdf5e57..69b3a7c30013 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -16,7 +16,7 @@ kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
hyperv.o page_track.o debugfs.o
-kvm-intel-y += vmx.o pmu_intel.o
+kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o vmx/evmcs.o vmx/nested.o
kvm-amd-y += svm.o pmu_amd.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7bcfa61375c0..bbffa6c54697 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -67,9 +67,6 @@ u64 kvm_supported_xcr0(void)
#define F(x) bit(X86_FEATURE_##x)
-/* For scattered features from cpufeatures.h; we currently expose none */
-#define KF(x) bit(KVM_CPUID_BIT_##x)
-
int kvm_update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -337,6 +334,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
+ unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
/* cpuid 1.edx */
const u32 kvm_cpuid_1_edx_x86_features =
@@ -380,8 +378,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 0x80000008.ebx */
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
- F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
- F(AMD_SSB_NO);
+ F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
+ F(AMD_SSB_NO) | F(AMD_STIBP);
/* cpuid 0xC0000001.edx */
const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -395,7 +393,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
- F(SHA_NI) | F(AVX512BW) | F(AVX512VL);
+ F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | f_intel_pt;
/* cpuid 0xD.1.eax */
const u32 kvm_cpuid_D_1_eax_x86_features =
@@ -411,7 +409,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 7.0.edx*/
const u32 kvm_cpuid_7_0_edx_x86_features =
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
- F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
+ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -426,7 +424,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
switch (function) {
case 0:
- entry->eax = min(entry->eax, (u32)0xd);
+ entry->eax = min(entry->eax, (u32)(f_intel_pt ? 0x14 : 0xd));
break;
case 1:
entry->edx &= kvm_cpuid_1_edx_x86_features;
@@ -603,6 +601,23 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
break;
}
+ /* Intel PT */
+ case 0x14: {
+ int t, times = entry->eax;
+
+ if (!f_intel_pt)
+ break;
+
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ for (t = 1; t <= times; ++t) {
+ if (*nent >= maxnent)
+ goto out;
+ do_cpuid_1_ent(&entry[t], function, t);
+ entry[t].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ ++*nent;
+ }
+ break;
+ }
case KVM_CPUID_SIGNATURE: {
static const char signature[12] = "KVMKVMKVM\0\0";
const u32 *sigptr = (const u32 *)signature;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 4e80080f277a..c90a5352d158 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -38,6 +38,9 @@
#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
+static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
+ bool vcpu_kick);
+
static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
{
return atomic64_read(&synic->sint[sint]);
@@ -158,59 +161,24 @@ static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
return (synic->active) ? synic : NULL;
}
-static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic,
- u32 sint)
-{
- struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
- struct page *page;
- gpa_t gpa;
- struct hv_message *msg;
- struct hv_message_page *msg_page;
-
- gpa = synic->msg_page & PAGE_MASK;
- page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
- if (is_error_page(page)) {
- vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
- gpa);
- return;
- }
- msg_page = kmap_atomic(page);
-
- msg = &msg_page->sint_message[sint];
- msg->header.message_flags.msg_pending = 0;
-
- kunmap_atomic(msg_page);
- kvm_release_page_dirty(page);
- kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
-}
-
static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_stimer *stimer;
- int gsi, idx, stimers_pending;
+ int gsi, idx;
trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
- if (synic->msg_page & HV_SYNIC_SIMP_ENABLE)
- synic_clear_sint_msg_pending(synic, sint);
-
/* Try to deliver pending Hyper-V SynIC timers messages */
- stimers_pending = 0;
for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
stimer = &hv_vcpu->stimer[idx];
- if (stimer->msg_pending &&
- (stimer->config & HV_STIMER_ENABLE) &&
- HV_STIMER_SINT(stimer->config) == sint) {
- set_bit(stimer->index,
- hv_vcpu->stimer_pending_bitmap);
- stimers_pending++;
- }
+ if (stimer->msg_pending && stimer->config.enable &&
+ !stimer->config.direct_mode &&
+ stimer->config.sintx == sint)
+ stimer_mark_pending(stimer, false);
}
- if (stimers_pending)
- kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
idx = srcu_read_lock(&kvm->irq_srcu);
gsi = atomic_read(&synic->sint_to_gsi[sint]);
@@ -497,7 +465,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
ktime_now = ktime_get();
- if (stimer->config & HV_STIMER_PERIODIC) {
+ if (stimer->config.periodic) {
if (stimer->exp_time) {
if (time_now >= stimer->exp_time) {
u64 remainder;
@@ -546,13 +514,18 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
bool host)
{
+ union hv_stimer_config new_config = {.as_uint64 = config},
+ old_config = {.as_uint64 = stimer->config.as_uint64};
+
trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
stimer->index, config, host);
stimer_cleanup(stimer);
- if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0)
- config &= ~HV_STIMER_ENABLE;
- stimer->config = config;
+ if (old_config.enable &&
+ !new_config.direct_mode && new_config.sintx == 0)
+ new_config.enable = 0;
+ stimer->config.as_uint64 = new_config.as_uint64;
+
stimer_mark_pending(stimer, false);
return 0;
}
@@ -566,16 +539,16 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
stimer_cleanup(stimer);
stimer->count = count;
if (stimer->count == 0)
- stimer->config &= ~HV_STIMER_ENABLE;
- else if (stimer->config & HV_STIMER_AUTOENABLE)
- stimer->config |= HV_STIMER_ENABLE;
+ stimer->config.enable = 0;
+ else if (stimer->config.auto_enable)
+ stimer->config.enable = 1;
stimer_mark_pending(stimer, false);
return 0;
}
static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
{
- *pconfig = stimer->config;
+ *pconfig = stimer->config.as_uint64;
return 0;
}
@@ -586,44 +559,60 @@ static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
}
static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
- struct hv_message *src_msg)
+ struct hv_message *src_msg, bool no_retry)
{
struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
- struct page *page;
- gpa_t gpa;
- struct hv_message *dst_msg;
+ int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
+ gfn_t msg_page_gfn;
+ struct hv_message_header hv_hdr;
int r;
- struct hv_message_page *msg_page;
if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
return -ENOENT;
- gpa = synic->msg_page & PAGE_MASK;
- page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
- if (is_error_page(page))
- return -EFAULT;
+ msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
- msg_page = kmap_atomic(page);
- dst_msg = &msg_page->sint_message[sint];
- if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE,
- src_msg->header.message_type) != HVMSG_NONE) {
- dst_msg->header.message_flags.msg_pending = 1;
- r = -EAGAIN;
- } else {
- memcpy(&dst_msg->u.payload, &src_msg->u.payload,
- src_msg->header.payload_size);
- dst_msg->header.message_type = src_msg->header.message_type;
- dst_msg->header.payload_size = src_msg->header.payload_size;
- r = synic_set_irq(synic, sint);
- if (r >= 1)
- r = 0;
- else if (r == 0)
- r = -EFAULT;
+ /*
+ * Strictly following the spec-mandated ordering would assume setting
+ * .msg_pending before checking .message_type. However, this function
+ * is only called in vcpu context so the entire update is atomic from
+ * guest POV and thus the exact order here doesn't matter.
+ */
+ r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
+ msg_off + offsetof(struct hv_message,
+ header.message_type),
+ sizeof(hv_hdr.message_type));
+ if (r < 0)
+ return r;
+
+ if (hv_hdr.message_type != HVMSG_NONE) {
+ if (no_retry)
+ return 0;
+
+ hv_hdr.message_flags.msg_pending = 1;
+ r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
+ &hv_hdr.message_flags,
+ msg_off +
+ offsetof(struct hv_message,
+ header.message_flags),
+ sizeof(hv_hdr.message_flags));
+ if (r < 0)
+ return r;
+ return -EAGAIN;
}
- kunmap_atomic(msg_page);
- kvm_release_page_dirty(page);
- kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
- return r;
+
+ r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
+ sizeof(src_msg->header) +
+ src_msg->header.payload_size);
+ if (r < 0)
+ return r;
+
+ r = synic_set_irq(synic, sint);
+ if (r < 0)
+ return r;
+ if (r == 0)
+ return -EFAULT;
+ return 0;
}
static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
@@ -633,24 +622,45 @@ static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
struct hv_timer_message_payload *payload =
(struct hv_timer_message_payload *)&msg->u.payload;
+ /*
+ * To avoid piling up periodic ticks, don't retry message
+ * delivery for them (within "lazy" lost ticks policy).
+ */
+ bool no_retry = stimer->config.periodic;
+
payload->expiration_time = stimer->exp_time;
payload->delivery_time = get_time_ref_counter(vcpu->kvm);
return synic_deliver_msg(vcpu_to_synic(vcpu),
- HV_STIMER_SINT(stimer->config), msg);
+ stimer->config.sintx, msg,
+ no_retry);
+}
+
+static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
+{
+ struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
+ struct kvm_lapic_irq irq = {
+ .delivery_mode = APIC_DM_FIXED,
+ .vector = stimer->config.apic_vector
+ };
+
+ return !kvm_apic_set_irq(vcpu, &irq, NULL);
}
static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
{
- int r;
+ int r, direct = stimer->config.direct_mode;
stimer->msg_pending = true;
- r = stimer_send_msg(stimer);
+ if (!direct)
+ r = stimer_send_msg(stimer);
+ else
+ r = stimer_notify_direct(stimer);
trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
- stimer->index, r);
+ stimer->index, direct, r);
if (!r) {
stimer->msg_pending = false;
- if (!(stimer->config & HV_STIMER_PERIODIC))
- stimer->config &= ~HV_STIMER_ENABLE;
+ if (!(stimer->config.periodic))
+ stimer->config.enable = 0;
}
}
@@ -664,7 +674,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
stimer = &hv_vcpu->stimer[i];
- if (stimer->config & HV_STIMER_ENABLE) {
+ if (stimer->config.enable) {
exp_time = stimer->exp_time;
if (exp_time) {
@@ -674,7 +684,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
stimer_expiration(stimer);
}
- if ((stimer->config & HV_STIMER_ENABLE) &&
+ if ((stimer->config.enable) &&
stimer->count) {
if (!stimer->msg_pending)
stimer_start(stimer);
@@ -815,9 +825,9 @@ static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
if (host)
- hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
+ hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
- if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
+ if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) {
vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
hv->hv_crash_param[0],
@@ -1758,3 +1768,124 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
return kvm_hv_eventfd_deassign(kvm, args->conn_id);
return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
}
+
+int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+{
+ uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
+ struct kvm_cpuid_entry2 cpuid_entries[] = {
+ { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
+ { .function = HYPERV_CPUID_INTERFACE },
+ { .function = HYPERV_CPUID_VERSION },
+ { .function = HYPERV_CPUID_FEATURES },
+ { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
+ { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
+ { .function = HYPERV_CPUID_NESTED_FEATURES },
+ };
+ int i, nent = ARRAY_SIZE(cpuid_entries);
+
+ /* Skip NESTED_FEATURES if eVMCS is not supported */
+ if (!evmcs_ver)
+ --nent;
+
+ if (cpuid->nent < nent)
+ return -E2BIG;
+
+ if (cpuid->nent > nent)
+ cpuid->nent = nent;
+
+ for (i = 0; i < nent; i++) {
+ struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
+ u32 signature[3];
+
+ switch (ent->function) {
+ case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
+ memcpy(signature, "Linux KVM Hv", 12);
+
+ ent->eax = HYPERV_CPUID_NESTED_FEATURES;
+ ent->ebx = signature[0];
+ ent->ecx = signature[1];
+ ent->edx = signature[2];
+ break;
+
+ case HYPERV_CPUID_INTERFACE:
+ memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
+ ent->eax = signature[0];
+ break;
+
+ case HYPERV_CPUID_VERSION:
+ /*
+ * We implement some Hyper-V 2016 functions so let's use
+ * this version.
+ */
+ ent->eax = 0x00003839;
+ ent->ebx = 0x000A0000;
+ break;
+
+ case HYPERV_CPUID_FEATURES:
+ ent->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
+ ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
+ ent->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
+ ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
+ ent->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
+ ent->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+ ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
+ ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
+ ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
+ ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE;
+ ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
+ ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
+
+ ent->ebx |= HV_X64_POST_MESSAGES;
+ ent->ebx |= HV_X64_SIGNAL_EVENTS;
+
+ ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
+ ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+ ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
+
+ break;
+
+ case HYPERV_CPUID_ENLIGHTMENT_INFO:
+ ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+ ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
+ ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED;
+ ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
+ ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
+ ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
+ ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
+
+ /*
+ * Default number of spinlock retry attempts, matches
+ * HyperV 2016.
+ */
+ ent->ebx = 0x00000FFF;
+
+ break;
+
+ case HYPERV_CPUID_IMPLEMENT_LIMITS:
+ /* Maximum number of virtual processors */
+ ent->eax = KVM_MAX_VCPUS;
+ /*
+ * Maximum number of logical processors, matches
+ * HyperV 2016.
+ */
+ ent->ebx = 64;
+
+ break;
+
+ case HYPERV_CPUID_NESTED_FEATURES:
+ ent->eax = evmcs_ver;
+
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (copy_to_user(entries, cpuid_entries,
+ nent * sizeof(struct kvm_cpuid_entry2)))
+ return -EFAULT;
+
+ return 0;
+}
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 0e66c12ed2c3..fd7cf13a2144 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -24,6 +24,8 @@
#ifndef __ARCH_X86_KVM_HYPERV_H__
#define __ARCH_X86_KVM_HYPERV_H__
+#include <linux/kvm_host.h>
+
static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu)
{
return &vcpu->arch.hyperv;
@@ -95,5 +97,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
void kvm_hv_init_vm(struct kvm *kvm);
void kvm_hv_destroy_vm(struct kvm *kvm);
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
+int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries);
#endif
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 9619dcc2b325..f8f56a93358b 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -2,6 +2,8 @@
#ifndef ASM_KVM_CACHE_REGS_H
#define ASM_KVM_CACHE_REGS_H
+#include <linux/kvm_host.h>
+
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
#define KVM_POSSIBLE_CR4_GUEST_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index c4533d05c214..9f089e2e09d0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -251,10 +251,9 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
if (enabled != apic->sw_enabled) {
apic->sw_enabled = enabled;
- if (enabled) {
+ if (enabled)
static_key_slow_dec_deferred(&apic_sw_disabled);
- recalculate_apic_map(apic->vcpu->kvm);
- } else
+ else
static_key_slow_inc(&apic_sw_disabled.key);
}
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7c03c0f35444..ce770b446238 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -264,6 +264,35 @@ static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
+
+static inline bool kvm_available_flush_tlb_with_range(void)
+{
+ return kvm_x86_ops->tlb_remote_flush_with_range;
+}
+
+static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range)
+{
+ int ret = -ENOTSUPP;
+
+ if (range && kvm_x86_ops->tlb_remote_flush_with_range)
+ ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
+
+ if (ret)
+ kvm_flush_remote_tlbs(kvm);
+}
+
+static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
+ u64 start_gfn, u64 pages)
+{
+ struct kvm_tlb_range range;
+
+ range.start_gfn = start_gfn;
+ range.pages = pages;
+
+ kvm_flush_remote_tlbs_with_range(kvm, &range);
+}
+
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
{
BUG_ON((mmio_mask & mmio_value) != mmio_value);
@@ -1456,8 +1485,12 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
- if (__drop_large_spte(vcpu->kvm, sptep))
- kvm_flush_remote_tlbs(vcpu->kvm);
+ if (__drop_large_spte(vcpu->kvm, sptep)) {
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
+
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
+ KVM_PAGES_PER_HPAGE(sp->role.level));
+ }
}
/*
@@ -1743,10 +1776,12 @@ restart:
}
}
- if (need_flush)
- kvm_flush_remote_tlbs(kvm);
+ if (need_flush && kvm_available_flush_tlb_with_range()) {
+ kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
+ return 0;
+ }
- return 0;
+ return need_flush;
}
struct slot_rmap_walk_iterator {
@@ -1880,9 +1915,9 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
- kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
+ return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
}
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
@@ -1925,7 +1960,8 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
+ KVM_PAGES_PER_HPAGE(sp->role.level));
}
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
@@ -2441,7 +2477,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
account_shadowed(vcpu->kvm, sp);
if (level == PT_PAGE_TABLE_LEVEL &&
rmap_write_protect(vcpu, gfn))
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
@@ -2561,7 +2597,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
return;
drop_parent_pte(child, sptep);
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
}
}
@@ -2985,8 +3021,10 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
ret = RET_PF_EMULATE;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
+
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
+ KVM_PAGES_PER_HPAGE(level));
if (unlikely(is_mmio_spte(*sptep)))
ret = RET_PF_EMULATE;
@@ -5586,8 +5624,13 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
+ bool flush_tlb = true;
+ bool flush = false;
int i;
+ if (kvm_available_flush_tlb_with_range())
+ flush_tlb = false;
+
spin_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
@@ -5599,12 +5642,17 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
if (start >= end)
continue;
- slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
- PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
- start, end - 1, true);
+ flush |= slot_handle_level_range(kvm, memslot,
+ kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
+ PT_MAX_HUGEPAGE_LEVEL, start,
+ end - 1, flush_tlb);
}
}
+ if (flush)
+ kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+ gfn_end - gfn_start + 1);
+
spin_unlock(&kvm->mmu_lock);
}
@@ -5638,12 +5686,13 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
* spte from present to present (changing the spte from present
* to nonpresent will flush all the TLBs immediately), in other
* words, the only case we care is mmu_spte_update() where we
- * haved checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
+ * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
* instead of PT_WRITABLE_MASK, that means it does not depend
* on PT_WRITABLE_MASK anymore.
*/
if (flush)
- kvm_flush_remote_tlbs(kvm);
+ kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+ memslot->npages);
}
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
@@ -5671,7 +5720,13 @@ restart:
!kvm_is_reserved_pfn(pfn) &&
PageTransCompoundMap(pfn_to_page(pfn))) {
pte_list_remove(rmap_head, sptep);
- need_tlb_flush = 1;
+
+ if (kvm_available_flush_tlb_with_range())
+ kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
+ KVM_PAGES_PER_HPAGE(sp->role.level));
+ else
+ need_tlb_flush = 1;
+
goto restart;
}
}
@@ -5707,7 +5762,8 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
* dirty_bitmap.
*/
if (flush)
- kvm_flush_remote_tlbs(kvm);
+ kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+ memslot->npages);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
@@ -5725,7 +5781,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
lockdep_assert_held(&kvm->slots_lock);
if (flush)
- kvm_flush_remote_tlbs(kvm);
+ kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+ memslot->npages);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
@@ -5742,7 +5799,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
/* see kvm_mmu_slot_leaf_clear_dirty */
if (flush)
- kvm_flush_remote_tlbs(kvm);
+ kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+ memslot->npages);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 7cf2185b7eb5..6bdca39829bc 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -894,7 +894,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm,
+ sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
if (!rmap_can_add(vcpu))
break;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 101f53ccf571..307e5bddb6d9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -675,11 +675,6 @@ struct svm_cpu_data {
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
-struct svm_init_data {
- int cpu;
- int r;
-};