summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/kvm_host.h')
-rw-r--r--arch/arm64/include/asm/kvm_host.h22
1 files changed, 4 insertions, 18 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e21d4a01372f..f81151ad3d3c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -97,17 +97,6 @@ struct kvm_arch {
bool return_nisv_io_abort_to_user;
};
-#define KVM_NR_MEM_OBJS 40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
- int nobjs;
- void *objects[KVM_NR_MEM_OBJS];
-};
-
struct kvm_vcpu_fault_info {
u32 esr_el2; /* Hyp Syndrom Register */
u64 far_el2; /* Hyp Fault Address Register */
@@ -486,18 +475,15 @@ u64 __kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
- int exception_index);
-void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
- int exception_index);
+int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
+void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
/* MMIO helpers */
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
- phys_addr_t fault_ipa);
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
+int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
int kvm_perf_init(void);
int kvm_perf_teardown(void);