summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-12 11:34:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-12 11:34:04 -0700
commitb357bf6023a948cf6a9472f07a1b0caac0e4f8e8 (patch)
tree1471a2691cd56e8640cf6ad51e255b54903a164b /arch/arm64
parent0725d4e1b8b08a60838db3a6e65c23ea8824a048 (diff)
parent766d3571d8e50d3a73b77043dc632226f9e6b389 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "Small update for KVM: ARM: - lazy context-switching of FPSIMD registers on arm64 - "split" regions for vGIC redistributor s390: - cleanups for nested - clock handling - crypto - storage keys - control register bits x86: - many bugfixes - implement more Hyper-V super powers - implement lapic_timer_advance_ns even when the LAPIC timer is emulated using the processor's VMX preemption timer. - two security-related bugfixes at the top of the branch" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (79 commits) kvm: fix typo in flag name kvm: x86: use correct privilege level for sgdt/sidt/fxsave/fxrstor access KVM: x86: pass kvm_vcpu to kvm_read_guest_virt and kvm_write_guest_virt_system KVM: x86: introduce linear_{read,write}_system kvm: nVMX: Enforce cpl=0 for VMX instructions kvm: nVMX: Add support for "VMWRITE to any supported field" kvm: nVMX: Restrict VMX capability MSR changes KVM: VMX: Optimize tscdeadline timer latency KVM: docs: nVMX: Remove known limitations as they do not exist now KVM: docs: mmu: KVM support exposing SLAT to guests kvm: no need to check return value of debugfs_create functions kvm: Make VM ioctl do valloc for some archs kvm: Change return type to vm_fault_t KVM: docs: mmu: Fix link to NPT presentation from KVM Forum 2008 kvm: x86: Amend the KVM_GET_SUPPORTED_CPUID API documentation KVM: x86: hyperv: declare KVM_CAP_HYPERV_TLBFLUSH capability KVM: x86: hyperv: simplistic HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}_EX implementation KVM: x86: hyperv: simplistic HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE} implementation KVM: introduce kvm_make_vcpus_request_mask() API KVM: x86: hyperv: do rep check for each hypercall separately ...
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig7
-rw-r--r--arch/arm64/include/asm/cpufeature.h29
-rw-r--r--arch/arm64/include/asm/fpsimd.h21
-rw-r--r--arch/arm64/include/asm/kvm_asm.h8
-rw-r--r--arch/arm64/include/asm/kvm_host.h49
-rw-r--r--arch/arm64/include/asm/processor.h15
-rw-r--r--arch/arm64/include/asm/thread_info.h13
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h1
-rw-r--r--arch/arm64/kernel/fpsimd.c177
-rw-r--r--arch/arm64/kernel/ptrace.c1
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/debug.c8
-rw-r--r--arch/arm64/kvm/fpsimd.c110
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c6
-rw-r--r--arch/arm64/kvm/hyp/entry.S43
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S19
-rw-r--r--arch/arm64/kvm/hyp/switch.c124
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c4
-rw-r--r--arch/arm64/kvm/sys_regs.c9
20 files changed, 392 insertions, 255 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9795b59aa28a..9fd4a8ccce07 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1128,6 +1128,7 @@ endmenu
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
+ depends on !KVM || ARM64_VHE
help
The Scalable Vector Extension (SVE) is an extension to the AArch64
execution state which complements and extends the SIMD functionality
@@ -1153,6 +1154,12 @@ config ARM64_SVE
booting the kernel. If unsure and you are not observing these
symptoms, you should assume that it is safe to say Y.
+ CPUs that support SVE are architecturally required to support the
+ Virtualization Host Extensions (VHE), so the kernel makes no
+ provision for supporting SVE alongside KVM without VHE enabled.
+ Thus, you will need to enable CONFIG_ARM64_VHE if you want to support
+ KVM in the same kernel image.
+
config ARM64_MODULE_PLTS
bool
select HAVE_MOD_ARCH_SPECIFIC
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 55bc1f073bfb..1717ba1db35d 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -11,9 +11,7 @@
#include <asm/cpucaps.h>
#include <asm/cputype.h>
-#include <asm/fpsimd.h>
#include <asm/hwcap.h>
-#include <asm/sigcontext.h>
#include <asm/sysreg.h>
/*
@@ -510,33 +508,6 @@ static inline bool system_supports_sve(void)
cpus_have_const_cap(ARM64_SVE);
}
-/*
- * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
- * vector length.
- *
- * Use only if SVE is present.
- * This function clobbers the SVE vector length.
- */
-static inline u64 read_zcr_features(void)
-{
- u64 zcr;
- unsigned int vq_max;
-
- /*
- * Set the maximum possible VL, and write zeroes to all other
- * bits to see if they stick.
- */
- sve_kernel_enable(NULL);
- write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
-
- zcr = read_sysreg_s(SYS_ZCR_EL1);
- zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
- vq_max = sve_vq_from_vl(sve_get_vl());
- zcr |= vq_max - 1; /* set LEN field to maximum effective value */
-
- return zcr;
-}
-
#define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index aa7162ae93e3..fa92747a49c8 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -18,6 +18,8 @@
#include <asm/ptrace.h>
#include <asm/errno.h>
+#include <asm/processor.h>
+#include <asm/sigcontext.h>
#ifndef __ASSEMBLY__
@@ -41,6 +43,8 @@ struct task_struct;
extern void fpsimd_save_state(struct user_fpsimd_state *state);
extern void fpsimd_load_state(struct user_fpsimd_state *state);
+extern void fpsimd_save(void);
+
extern void fpsimd_thread_switch(struct task_struct *next);
extern void fpsimd_flush_thread(void);
@@ -49,12 +53,27 @@ extern void fpsimd_preserve_current_state(void);
extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
+extern void fpsimd_bind_task_to_cpu(void);
+extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state);
+
extern void fpsimd_flush_task_state(struct task_struct *target);
+extern void fpsimd_flush_cpu_state(void);
extern void sve_flush_cpu_state(void);
/* Maximum VL that SVE VL-agnostic software can transparently support */
#define SVE_VL_ARCH_MAX 0x100
+/* Offset of FFR in the SVE register dump */
+static inline size_t sve_ffr_offset(int vl)
+{
+ return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
+}
+
+static inline void *sve_pffr(struct thread_struct *thread)
+{
+ return (char *)thread->sve_state + sve_ffr_offset(thread->sve_vl);
+}
+
extern void sve_save_state(void *state, u32 *pfpsr);
extern void sve_load_state(void const *state, u32 const *pfpsr,
unsigned long vq_minus_1);
@@ -63,6 +82,8 @@ extern unsigned int sve_get_vl(void);
struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+extern u64 read_zcr_features(void);
+
extern int __ro_after_init sve_max_vl;
#ifdef CONFIG_ARM64_SVE
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 951b2076a5e2..102b5a5c47b6 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -33,19 +33,19 @@
/* The hyp-stub will return this for any kvm_call_hyp() call */
#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
-#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
-#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+#ifndef __ASSEMBLY__
+
+#include <linux/mm.h>
/* Translate a kernel address of @sym into its equivalent linear mapping */
#define kvm_ksym_ref(sym) \
({ \
void *val = &sym; \
if (!is_kernel_in_hyp_mode()) \
- val = phys_to_virt((u64)&sym - kimage_voffset); \
+ val = lm_alias(&sym); \
val; \
})
-#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 95d8a0e15b5f..fda9a8ca48be 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -30,6 +30,7 @@
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
+#include <asm/thread_info.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -219,8 +220,8 @@ struct kvm_vcpu_arch {
/* State of various workarounds, see kvm_asm.h for bit assignment */
u64 workaround_flags;
- /* Guest debug state */
- u64 debug_flags;
+ /* Miscellaneous vcpu state flags */
+ u64 flags;
/*
* We maintain more than a single set of debug registers to support
@@ -241,6 +242,10 @@ struct kvm_vcpu_arch {
/* Pointer to host CPU context */
kvm_cpu_context_t *host_cpu_context;
+
+ struct thread_info *host_thread_info; /* hyp VA */
+ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
+
struct {
/* {Break,watch}point registers */
struct kvm_guest_debug_arch regs;
@@ -296,6 +301,12 @@ struct kvm_vcpu_arch {
bool sysregs_loaded_on_cpu;
};
+/* vcpu_arch flags field values: */
+#define KVM_ARM64_DEBUG_DIRTY (1 << 0)
+#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
+#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
+#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
+
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
/*
@@ -397,6 +408,19 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
}
+static inline bool kvm_arch_check_sve_has_vhe(void)
+{
+ /*
+ * The Arm architecture specifies that implementation of SVE
+ * requires VHE also to be implemented. The KVM code for arm64
+ * relies on this when SVE is present:
+ */
+ if (system_supports_sve())
+ return has_vhe();
+ else
+ return true;
+}
+
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
@@ -423,15 +447,18 @@ static inline void __cpu_init_stage2(void)
"PARange is %d bits, unsupported configuration!", parange);
}
-/*
- * All host FP/SIMD state is restored on guest exit, so nothing needs
- * doing here except in the SVE case:
-*/
-static inline void kvm_fpsimd_flush_cpu_state(void)
+/* Guest/host FPSIMD coordination helpers */
+int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
+
+#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
+static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
- if (system_supports_sve())
- sve_flush_cpu_state();
+ return kvm_arch_vcpu_run_map_fp(vcpu);
}
+#endif
static inline void kvm_arm_vhe_guest_enter(void)
{
@@ -481,4 +508,8 @@ static inline int kvm_arm_have_ssbd(void)
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
+#define __KVM_HAVE_ARCH_VM_ALLOC
+struct kvm *kvm_arch_alloc_vm(void);
+void kvm_arch_free_vm(struct kvm *kvm);
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 65ab83e8926e..a73ae1e49200 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -158,7 +158,9 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
/* Sync TPIDR_EL0 back to thread_struct for current */
void tls_preserve_current_state(void);
-#define INIT_THREAD { }
+#define INIT_THREAD { \
+ .fpsimd_cpu = NR_CPUS, \
+}
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
{
@@ -249,6 +251,17 @@ void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused);
extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
extern void __init minsigstksz_setup(void);
+/*
+ * Not at the top of the file due to a direct #include cycle between
+ * <asm/fpsimd.h> and <asm/processor.h>. Deferring this #include
+ * ensures that contents of processor.h are visible to fpsimd.h even if
+ * processor.h is included first.
+ *
+ * These prctl helpers are the only things in this file that require
+ * fpsimd.h. The core code expects them to be in this header.
+ */
+#include <asm/fpsimd.h>
+
/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
#define SVE_GET_VL() sve_get_current_vl()
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index cbcf11b5e637..cb2c10a8f0a8 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -45,12 +45,6 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, <0 => bug */
};
-#define INIT_THREAD_INFO(tsk) \
-{ \
- .preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
-}
-
#define thread_saved_pc(tsk) \
((unsigned long)(tsk->thread.cpu_context.pc))
#define thread_saved_sp(tsk) \
@@ -118,5 +112,12 @@ void arch_release_task_struct(struct task_struct *tsk);
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
_TIF_NOHZ)
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .flags = _TIF_FOREIGN_FPSTATE, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_THREAD_INFO_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 04b3256f8e6d..4e76630dd655 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -91,6 +91,7 @@ struct kvm_regs {
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
#define KVM_VGIC_ITS_ADDR_TYPE 4
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 3b527ae46e49..84c68b14f1b2 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -36,12 +36,14 @@
#include <linux/sched/task_stack.h>
#include <linux/signal.h>
#include <linux/slab.h>
+#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <asm/esr.h>
#include <asm/fpsimd.h>
#include <asm/cpufeature.h>
#include <asm/cputype.h>
+#include <asm/processor.h>
#include <asm/simd.h>
#include <asm/sigcontext.h>
#include <asm/sysreg.h>
@@ -117,7 +119,6 @@
*/
struct fpsimd_last_state_struct {
struct user_fpsimd_state *st;
- bool sve_in_use;
};
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
@@ -158,19 +159,6 @@ static void sve_free(struct task_struct *task)
__sve_free(task);
}
-
-/* Offset of FFR in the SVE register dump */
-static size_t sve_ffr_offset(int vl)
-{
- return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
-}
-
-static void *sve_pffr(struct task_struct *task)
-{
- return (char *)task->thread.sve_state +
- sve_ffr_offset(task->thread.sve_vl);
-}
-
static void change_cpacr(u64 val, u64 mask)
{
u64 cpacr = read_sysreg(CPACR_EL1);
@@ -251,31 +239,24 @@ static void task_fpsimd_load(void)
WARN_ON(!in_softirq() && !irqs_disabled());
if (system_supports_sve() && test_thread_flag(TIF_SVE))
- sve_load_state(sve_pffr(current),
+ sve_load_state(sve_pffr(&current->thread),
&current->thread.uw.fpsimd_state.fpsr,
sve_vq_from_vl(current->thread.sve_vl) - 1);
else
fpsimd_load_state(&current->thread.uw.fpsimd_state);
-
- if (system_supports_sve()) {
- /* Toggle SVE trapping for userspace if needed */
- if (test_thread_flag(TIF_SVE))
- sve_user_enable();
- else
- sve_user_disable();
-
- /* Serialised by exception return to user */
- }
}
/*
- * Ensure current's FPSIMD/SVE storage in thread_struct is up to date
- * with respect to the CPU registers.
+ * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
+ * date with respect to the CPU registers.
*
* Softirqs (and preemption) must be disabled.
*/
-static void task_fpsimd_save(void)
+void fpsimd_save(void)
{
+ struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
+ /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
+
WARN_ON(!in_softirq() && !irqs_disabled());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
@@ -290,10 +271,9 @@ static void task_fpsimd_save(void)
return;
}
- sve_save_state(sve_pffr(current),
- &current->thread.uw.fpsimd_state.fpsr);
+ sve_save_state(sve_pffr(&current->thread), &st->fpsr);
} else
- fpsimd_save_state(&current->thread.uw.fpsimd_state);
+ fpsimd_save_state(st);
}
}
@@ -588,7 +568,7 @@ int sve_set_vector_length(struct task_struct *task,
if (task == current) {
local_bh_disable();
- task_fpsimd_save();
+ fpsimd_save();
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
@@ -608,10 +588,8 @@ int sve_set_vector_length(struct task_struct *task,
task->thread.sve_vl = vl;
out:
- if (flags & PR_SVE_VL_INHERIT)
- set_tsk_thread_flag(task, TIF_SVE_VL_INHERIT);
- else
- clear_tsk_thread_flag(task, TIF_SVE_VL_INHERIT);
+ update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT,
+ flags & PR_SVE_VL_INHERIT);
return 0;
}
@@ -755,6 +733,33 @@ void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
isb();
}
+/*
+ * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
+ * vector length.
+ *
+ * Use only if SVE is present.
+ * This function clobbers the SVE vector length.
+ */
+u64 read_zcr_features(void)
+{
+ u64 zcr;
+ unsigned int vq_max;
+
+ /*
+ * Set the maximum possible VL, and write zeroes to all other
+ * bits to see if they stick.
+ */
+ sve_kernel_enable(NULL);
+ write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
+
+ zcr = read_sysreg_s(SYS_ZCR_EL1);
+ zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
+ vq_max = sve_vq_from_vl(sve_get_vl());
+ zcr |= vq_max - 1; /* set LEN field to maximum effective value */
+
+ return zcr;
+}
+
void __init sve_setup(void)
{
u64 zcr;
@@ -829,7 +834,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
local_bh_disable();
- task_fpsimd_save();
+ fpsimd_save();
fpsimd_to_sve(current);
/* Force ret_to_user to reload the registers: */
@@ -882,31 +887,25 @@ asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
void fpsimd_thread_switch(struct task_struct *next)
{
+ bool wrong_task, wrong_cpu;
+
if (!system_supports_fpsimd())
return;
+
+ /* Save unsaved fpsimd state, if any: */
+ fpsimd_save();
+
/*
- * Save the current FPSIMD state to memory, but only if whatever is in
- * the registers is in fact the most recent userland FPSIMD state of
- * 'current'.
+ * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
+ * state. For kernel threads, FPSIMD registers are never loaded
+ * and wrong_task and wrong_cpu will always be true.
*/
- if (current->mm)
- task_fpsimd_save();
+ wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
+ &next->thread.uw.fpsimd_state;
+ wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
- if (next->mm) {
- /*
- * If we are switching to a task whose most recent userland
- * FPSIMD state is already in the registers of *this* cpu,
- * we can skip loading the state from memory. Otherwise, set
- * the TIF_FOREIGN_FPSTATE flag so the state will be loaded
- * upon the next return to userland.
- */
- if (__this_cpu_read(fpsimd_last_state.st) ==
- &next->thread.uw.fpsimd_state
- && next->thread.fpsimd_cpu == smp_processor_id())
- clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
- else
- set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
- }
+ update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
+ wrong_task || wrong_cpu);
}
void fpsimd_flush_thread(void)
@@ -972,7 +971,7 @@ void fpsimd_preserve_current_state(void)
return;
local_bh_disable();
- task_fpsimd_save();
+ fpsimd_save();
local_bh_enable();
}
@@ -992,14 +991,33 @@ void fpsimd_signal_preserve_current_state(void)
* Associate current's FPSIMD context with this cpu
* Preemption must be disabled when calling this function.
*/
-static void fpsimd_bind_to_cpu(void)
+void fpsimd_bind_task_to_cpu(void)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
last->st = &current->thread.uw.fpsimd_state;
- last->sve_in_use = test_thread_flag(TIF_SVE);
current->thread.fpsimd_cpu = smp_processor_id();
+
+ if (system_supports_sve()) {
+ /* Toggle SVE trapping for userspace if needed */
+ if (test_thread_flag(TIF_SVE))
+ sve_user_enable();
+ else
+ sve_user_disable();
+
+ /* Serialised by exception return to user */
+ }
+}
+
+void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
+{
+ struct fpsimd_last_state_struct *last =
+ this_cpu_ptr(&fpsimd_last_state);
+
+ WARN_ON(!in_softirq() && !irqs_disabled());
+
+ last->st = st;
}
/*
@@ -1016,7 +1034,7 @@ void fpsimd_restore_current_state(void)
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
task_fpsimd_load();
- fpsimd_bind_to_cpu();
+ fpsimd_bind_task_to_cpu();
}
local_bh_enable();
@@ -1039,9 +1057,9 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
fpsimd_to_sve(current);
task_fpsimd_load();
+ fpsimd_bind_task_to_cpu();
- if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE))
- fpsimd_bind_to_cpu();
+ clear_thread_flag(TIF_FOREIGN_FPSTATE);
local_bh_enable();
}
@@ -1054,29 +1072,12 @@ void fpsimd_flush_task_state(struct task_struct *t)
t->thread.fpsimd_cpu = NR_CPUS;
}
-static inline void fpsimd_flush_cpu_state(void)
+void fpsimd_flush_cpu_state(void)
{
__this_cpu_write(fpsimd_last_state.st, NULL);
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
}
-/*
- * Invalidate any task SVE state currently held in this CPU's regs.
- *
- * This is used to prevent the kernel from trying to reuse SVE register data
- * that is detroyed by KVM guest enter/exit. This function should go away when
- * KVM SVE support is implemented. Don't use it for anything else.
- */
-#ifdef CONFIG_ARM64_SVE
-void sve_flush_cpu_state(void)
-{
- struct fpsimd_last_state_struct const *last =
- this_cpu_ptr(&fpsimd_last_state);
-
- if (last->st && last->sve_in_use)
- fpsimd_flush_cpu_state();
-}
-#endif /* CONFIG_ARM64_SVE */
-
#ifdef CONFIG_KERNEL_MODE_NEON
DEFINE_PER_CPU(bool, kernel_neon_busy);
@@ -1110,11 +1111,8 @@ void kernel_neon_begin(void)
__this_cpu_write(kernel_neon_busy, true);
- /* Save unsaved task fpsimd state, if any: */
- if (current->mm) {
- task_fpsimd_save();
- set_thread_flag(TIF_FOREIGN_FPSTATE);
- }
+ /* Save unsaved fpsimd state, if any: */
+ fpsimd_save();
/* Invalidate any task state remaining in the fpsimd regs: */
fpsimd_flush_cpu_state();
@@ -1236,13 +1234,10 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
{
switch (cmd) {
case CPU_PM_ENTER:
- if (current->mm)
- task_fpsimd_save();
+ fpsimd_save();
fpsimd_flush_cpu_state();
break;
case CPU_PM_EXIT:
- if (current->mm)
- set_thread_flag(TIF_FOREIGN_FPSTATE);
break;
case CPU_PM_ENTER_FAILED:
default:
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index bd732644c2f6..5c338ce5a7fa 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -44,6 +44,7 @@
#include <asm/compat.h>
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
+#include <asm/fpsimd.h>
#include <asm/pgtable.h>
#include <asm/stacktrace.h>
#include <asm/syscall.h>
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index a2e3a5af1113..47b23bf617c7 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -39,6 +39,7 @@ config KVM
select HAVE_KVM_IRQ_ROUTING
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
+ select HAVE_KVM_VCPU_RUN_PID_CHANGE
---help---
Support hosting virtualized guest machines.
We don't support KVM with 16K page tables yet, due to the multiple
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 93afff91cb7c..0f2a135ba15b 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -19,7 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index a1f4ebdfe6d3..00d422336a45 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -103,7 +103,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
*
* Additionally, KVM only traps guest accesses to the debug registers if
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
- * flag on vcpu->arch.debug_flags). Since the guest must not interfere
+ * flag on vcpu->arch.flags). Since the guest must not interfere
* with the hardware state when debugging the guest, we must ensure that
* trapping is enabled whenever we are debugging the guest using the
* debug registers.
@@ -111,7 +111,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
{
- bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY);
+ bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
unsigned long mdscr;
trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
@@ -184,7 +184,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
- vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
trap_debug = true;
trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
@@ -206,7 +206,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
/* If KDE or MDE are set, perform a full save/restore cycle. */
if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
- vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
new file mode 100644
index 000000000000..dc6ecfa5a2d2
--- /dev/null
+++ b/arch/arm64/kvm/fpsimd.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers
+ *
+ * Copyright 2018 Arm Limited
+ * Author: Dave Martin <Dave.Martin@arm.com>
+ */
+#include <linux/bottom_half.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_mmu.h>
+
+/*
+ * Called on entry to KVM_RUN unless this vcpu previously ran at least
+ * once and the most recent prior KVM_RUN for this vcpu was called from
+ * the same task as current (highly likely).
+ *
+ * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu),
+ * such that on entering hyp the relevant parts of current are already
+ * mapped.
+ */
+int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ struct thread_info *ti = &current->thread_info;
+ struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
+
+ /*
+ * Make sure the host task thread flags and fpsimd state are
+ * visible to hyp:
+ */
+ ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP);
+ if (ret)
+ goto error;
+
+ ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP);
+ if (ret)
+ goto error;
+
+ vcpu->arch.host_thread_info = kern_hyp_va(ti);
+ vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
+error:
+ return ret;
+}
+
+/*
+ * Prepare vcpu for saving the host's FPSIMD state and loading the guest's.
+ * The actual loading is done by the FPSIMD access trap taken to hyp.
+ *
+ * Here, we just set the correct metadata to indicate that the FPSIMD
+ * state in the cpu regs (if any) belongs to current on the host.
+ *
+ * TIF_SVE is backed up here, since it may get clobbered with guest state.
+ * This flag is restored by kvm_arch_vcpu_put_fp(vcpu).
+ */
+void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
+{
+ BUG_ON(!current->mm);
+
+ vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE);
+ vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+ if (test_thread_flag(TIF_SVE))
+ vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
+}
+
+/*
+ * If the guest FPSIMD state was loaded, update the host's context
+ * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu
+ * so that they will be written back if the kernel clobbers them due to
+ * kernel-mode NEON before re-entry into the guest.
+ */
+void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
+{
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+ fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs);
+ clear_thread_flag(TIF_FOREIGN_FPSTATE);
+ clear_thread_flag(TIF_SVE);
+ }
+}
+
+/*
+ * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the
+ * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu
+ * disappears and another task or vcpu appears that recycles the same
+ * struct fpsimd_state.
+ */
+void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
+{
+ local_bh_disable();
+
+ update_thread_flag(TIF_SVE,
+ vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+
+ if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+ /* Clean guest FP state to memory and invalidate cpu view */
+ fpsimd_save();
+ fpsimd_flush_cpu_state();
+ } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ /* Ensure user trap controls are correctly restored */
+ fpsimd_bind_task_to_cpu();
+ }
+
+ local_bh_enable();
+}
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 3e717f66f011..50009766e5e5 100644
--- a/