summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp/switch.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 10:05:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 10:05:01 -0700
commit3cd86a58f7734bf9cef38f6f899608ebcaa3da13 (patch)
tree6ae5b8109011ee40deef645a9701e2d8dc4e4fce /arch/arm64/kvm/hyp/switch.c
parenta8222fd5b80c7ec83f257060670becbeea9b50b9 (diff)
parentb2a84de2a2deb76a6a51609845341f508c518c03 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: "The bulk is in-kernel pointer authentication, activity monitors and lots of asm symbol annotations. I also queued the sys_mremap() patch commenting the asymmetry in the address untagging. Summary: - In-kernel Pointer Authentication support (previously only offered to user space). - ARM Activity Monitors (AMU) extension support allowing better CPU utilisation numbers for the scheduler (frequency invariance). - Memory hot-remove support for arm64. - Lots of asm annotations (SYM_*) in preparation for the in-kernel Branch Target Identification (BTI) support. - arm64 perf updates: ARMv8.5-PMU 64-bit counters, refactoring the PMU init callbacks, support for new DT compatibles. - IPv6 header checksum optimisation. - Fixes: SDEI (software delegated exception interface) double-lock on hibernate with shared events. - Minor clean-ups and refactoring: cpu_ops accessor, cpu_do_switch_mm() converted to C, cpufeature finalisation helper. - sys_mremap() comment explaining the asymmetric address untagging behaviour" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (81 commits) mm/mremap: Add comment explaining the untagging behaviour of mremap() arm64: head: Convert install_el2_stub to SYM_INNER_LABEL arm64: Introduce get_cpu_ops() helper function arm64: Rename cpu_read_ops() to init_cpu_ops() arm64: Declare ACPI parking protocol CPU operation if needed arm64: move kimage_vaddr to .rodata arm64: use mov_q instead of literal ldr arm64: Kconfig: verify binutils support for ARM64_PTR_AUTH lkdtm: arm64: test kernel pointer authentication arm64: compile the kernel with ptrauth return address signing kconfig: Add support for 'as-option' arm64: suspend: restore the kernel ptrauth keys arm64: __show_regs: strip PAC from lr in printk arm64: unwind: strip PAC from kernel addresses arm64: mask PAC bits of __builtin_return_address arm64: initialize ptrauth keys for kernel booting task arm64: initialize and switch ptrauth kernel keys arm64: enable ptrauth earlier arm64: cpufeature: handle conflicts based on capability arm64: cpufeature: Move cpu capability helpers inside C file ...
Diffstat (limited to 'arch/arm64/kvm/hyp/switch.c')
-rw-r--r--arch/arm64/kvm/hyp/switch.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 925086b46136..eaa05c3c7235 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -98,6 +98,18 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
val &= ~CPACR_EL1_ZEN;
+
+ /*
+ * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
+ * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
+ * except for some missing controls, such as TAM.
+ * In this case, CPTR_EL2.TAM has the same position with or without
+ * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
+ * shift value for trapping the AMU accesses.
+ */
+
+ val |= CPTR_EL2_TAM;
+
if (update_fp_enabled(vcpu)) {
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN;
@@ -119,7 +131,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
__activate_traps_common(vcpu);
val = CPTR_EL2_DEFAULT;
- val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
+ val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM;
if (!update_fp_enabled(vcpu)) {
val |= CPTR_EL2_TFP;
__activate_traps_fpsimd32(vcpu);
@@ -127,7 +139,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
write_sysreg(val, cptr_el2);
- if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
isb();
@@ -146,12 +158,12 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
{
u64 hcr = vcpu->arch.hcr_el2;
- if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
+ if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
hcr |= HCR_TVM;
write_sysreg(hcr, hcr_el2);
- if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
+ if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
if (has_vhe())
@@ -181,7 +193,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
{
u64 mdcr_el2 = read_sysreg(mdcr_el2);
- if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
u64 val;
/*
@@ -328,7 +340,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
* resolve the IPA using the AT instruction.
*/
if (!(esr & ESR_ELx_S1PTW) &&
- (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
+ (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
if (!__translate_far_to_hpfar(far, &hpfar))
return false;
@@ -498,7 +510,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
- if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+ if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
handle_tx2_tvm(vcpu))
return true;
@@ -555,7 +567,7 @@ exit:
static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
{
- if (!cpus_have_const_cap(ARM64_SSBD))
+ if (!cpus_have_final_cap(ARM64_SSBD))
return false;
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);