summaryrefslogtreecommitdiffstats
path: root/arch/s390/boot/head_kdump.S
blob: 174d6959bf5bd7d0ba498e532da5322f633f6225 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * S390 kdump lowlevel functions (new kernel)
 *
 * Copyright IBM Corp. 2011
 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
 */

#include <asm/sigp.h>

#define DATAMOVER_ADDR	0x4000
#define COPY_PAGE_ADDR	0x6000

#ifdef CONFIG_CRASH_DUMP

#
# kdump entry (new kernel - not yet relocated)
#
# Note: This code has to be position independent
#

.align 2
.Lep_startup_kdump:
	lhi	%r1,2				# mode 2 = esame (dump)
	sigp	%r1,%r0,SIGP_SET_ARCHITECTURE	# Switch to esame mode
	sam64					# Switch to 64 bit addressing
	basr	%r13,0
.Lbase:
	larl	%r2,.Lbase_addr			# Check, if we have been
	lg	%r2,0(%r2)			# already relocated:
	clgr	%r2,%r13			#
	jne	.Lrelocate			# No : Start data mover
	lghi	%r2,0				# Yes: Start kdump kernel
	brasl	%r14,startup_kdump_relocated

.Lrelocate:
	larl	%r4,startup
	lg	%r2,0x418(%r4)			# Get kdump base
	lg	%r3,0x420(%r4)			# Get kdump size

	larl	%r10,.Lcopy_start		# Source of data mover
	lghi	%r8,DATAMOVER_ADDR		# Target of data mover
	mvc	0(256,%r8),0(%r10)		# Copy data mover code

	agr	%r8,%r2				# Copy data mover to
	mvc	0(256,%r8),0(%r10)		# reserved mem

	lghi	%r14,DATAMOVER_ADDR		# Jump to copied data mover
	basr	%r14,%r14
.Lbase_addr:
	.quad	.Lbase

#
# kdump data mover code (runs at address DATAMOVER_ADDR)
#
# r2: kdump base address
# r3: kdump size
#
.Lcopy_start:
	basr	%r13,0				# Base
0:
	lgr	%r11,%r2			# Save kdump base address
	lgr	%r12,%r2
	agr	%r12,%r3			# Compute kdump end address

	lghi	%r5,0
	lghi	%r10,COPY_PAGE_ADDR		# Load copy page address
1:
	mvc	0(256,%r10),0(%r5)		# Copy old kernel to tmp
	mvc	0(256,%r5),0(%r11)		# Copy new kernel to old
	mvc	0(256,%r11),0(%r10)		# Copy tmp to new
	aghi	%r11,256
	aghi	%r5,256
	clgr	%r11,%r12
	jl	1b

	lg	%r14,.Lstartup_kdump-0b(%r13)
	basr	%r14,%r14			# Start relocated kernel
.Lstartup_kdump:
	.long	0x00000000,0x00000000 + startup_kdump_relocated
.Lcopy_end:

#
# Startup of kdump (relocated new kernel)
#
.align 2
startup_kdump_relocated:
	basr	%r13,0
0:	lpswe	.Lrestart_psw-0b(%r13)		# Start new kernel...
.align	8
.Lrestart_psw:
	.quad	0x0000000080000000,0x0000000000000000 + startup
#else
.align 2
.Lep_startup_kdump:
	larl	%r13,startup_kdump_crash
	lpswe	0(%r13)
.align 8
startup_kdump_crash:
	.quad	0x0002000080000000,0x0000000000000000 + startup_kdump_crash
#endif /* CONFIG_CRASH_DUMP */
s="w"> (nr == BP_VECTOR) || (nr == OF_VECTOR); } static inline bool is_protmode(struct kvm_vcpu *vcpu) { return kvm_read_cr0_bits(vcpu, X86_CR0_PE); } static inline int is_long_mode(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 return vcpu->arch.efer & EFER_LMA; #else return 0; #endif } static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) { int cs_db, cs_l; if (!is_long_mode(vcpu)) return false; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); return cs_l; } static inline bool is_la57_mode(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 return (vcpu->arch.efer & EFER_LMA) && kvm_read_cr4_bits(vcpu, X86_CR4_LA57); #else return 0; #endif } static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) { return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; } static inline int is_pae(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); } static inline int is_pse(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); } static inline int is_paging(struct kvm_vcpu *vcpu) { return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); } static inline u32 bit(int bitno) { return 1 << (bitno & 31); } static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; } static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) { return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48; } static inline u64 get_canonical(u64 la, u8 vaddr_bits) { return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); } static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; #else return false; #endif } static inline bool emul_is_noncanonical_address(u64 la, struct x86_emulate_ctxt *ctxt) { #ifdef CONFIG_X86_64 return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la; #else return false; #endif } static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access) { /* * If this is a shadow nested page table, the "GVA" is * actually a nGPA. */ vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; vcpu->arch.access = access; vcpu->arch.mmio_gfn = gfn; vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; } static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) { return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; } /* * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we * clear all mmio cache info. */ #define MMIO_GVA_ANY (~(gva_t)0) static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) { if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) return; vcpu->arch.mmio_gva = 0; } static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) { if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK)) return true; return false; } static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) { if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) return true; return false; } static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, enum kvm_reg reg) { unsigned long val = kvm_register_read(vcpu, reg); return is_64_bit_mode(vcpu) ? val : (u32)val; } static inline void kvm_register_writel(struct kvm_vcpu *vcpu, enum kvm_reg reg, unsigned long val) { if (!is_64_bit_mode(vcpu)) val = (u32)val; return kvm_register_write(vcpu, reg, val); } static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) { return !(kvm->arch.disabled_quirks & quirk); } void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); void kvm_set_pending_timer(struct kvm_vcpu *vcpu); int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); u64 get_kvmclock_ns(struct kvm *kvm); int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num); bool kvm_vector_hashing_enabled(void); #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ | XFEATURE_MASK_PKRU) extern u64 host_xcr0; extern u64 kvm_supported_xcr0(void); extern unsigned int min_timer_period_us; extern unsigned int lapic_timer_advance_ns; extern struct static_key kvm_no_apic_vcpu; static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); } /* Same "calling convention" as do_div: * - divide (n << 32) by base * - put result in n * - return remainder */ #define do_shl32_div32(n, base) \ ({ \ u32 __quot, __rem; \ asm("divl %2" : "=a" (__quot), "=d" (__rem) \ : "rm" (base), "0" (0), "1" ((u32) n)); \ n = __quot; \ __rem; \ }) static inline bool kvm_mwait_in_guest(void) { unsigned int eax, ebx, ecx, edx; if (!cpu_has(&boot_cpu_data, X86_FEATURE_MWAIT)) return false; switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: /* All AMD CPUs have a working MWAIT implementation */ return </