summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2020-09-18 12:25:40 +0100
committerWill Deacon <will@kernel.org>2020-09-29 16:08:16 +0100
commit29e8910a566aad3ee72f729e45842858d51ced8d (patch)
tree24f724d8fc710a804931c0adcbe1418794491e56 /arch/arm64/include/asm
parentc28762070ca651fe7a981b8f31d972c9b7d2c386 (diff)
KVM: arm64: Simplify handling of ARCH_WORKAROUND_2
Owing to the fact that the host kernel is always mitigated, we can drastically simplify the WA2 handling by keeping the mitigation state ON when entering the guest. This means the guest is either unaffected or not mitigated. This results in a nice simplification of the mitigation space, and the removal of a lot of code that was never really used anyway. Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/kvm_asm.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h14
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h17
3 files changed, 0 insertions, 34 deletions
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index e9378cc8049d..abe02cf66880 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -9,9 +9,6 @@
#include <asm/virt.h>
-#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
-#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
-
#define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 49a55be2b9a2..96eccb107ec2 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
}
-static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
-}
-
-static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
- bool flag)
-{
- if (flag)
- vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
- else
- vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
-}
-
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 873e12430ac7..36606ef9e435 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -526,23 +526,6 @@ static inline int kvm_map_vectors(void)
}
#endif
-DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
-
-static inline int hyp_map_aux_data(void)
-{
- int cpu, err;
-
- for_each_possible_cpu(cpu) {
- u64 *ptr;
-
- ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
- err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
- if (err)
- return err;
- }
- return 0;
-}
-
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
/*