From 7e4372028929e43a1f986438572a533b728dd266 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 28 Aug 2018 09:40:16 +0200 Subject: x86/paravirt: Make paravirt_patch_call() and paravirt_patch_jmp() static paravirt_patch_call() and paravirt_patch_jmp() are used in paravirt.c only. Convert them to static. Signed-off-by: Juergen Gross Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-6-jgross@suse.com --- arch/x86/kernel/paravirt.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index afdb303285f8..1abdbde35049 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -81,10 +81,10 @@ struct branch { u32 delta; } __attribute__((packed)); -unsigned paravirt_patch_call(void *insnbuf, - const void *target, u16 tgt_clobbers, - unsigned long addr, u16 site_clobbers, - unsigned len) +static unsigned paravirt_patch_call(void *insnbuf, + const void *target, u16 tgt_clobbers, + unsigned long addr, u16 site_clobbers, + unsigned len) { struct branch *b = insnbuf; unsigned long delta = (unsigned long)target - (addr+5); @@ -103,8 +103,8 @@ unsigned paravirt_patch_call(void *insnbuf, return 5; } -unsigned paravirt_patch_jmp(void *insnbuf, const void *target, - unsigned long addr, unsigned len) +static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, + unsigned long addr, unsigned len) { struct branch *b = insnbuf; unsigned long delta = (unsigned long)target - (addr+5); -- cgit v1.2.3 From abc745f85c1193d2a052addf0031d59b4436c246 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 28 Aug 2018 09:40:17 +0200 Subject: x86/paravirt: Remove clobbers parameter from paravirt patch functions The clobbers parameter from paravirt_patch_default() et al isn't used any longer. Remove it. Signed-off-by: Juergen Gross Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-7-jgross@suse.com --- arch/x86/kernel/alternative.c | 2 +- arch/x86/kernel/paravirt.c | 14 +++++--------- arch/x86/kernel/paravirt_patch_32.c | 5 ++--- arch/x86/kernel/paravirt_patch_64.c | 5 ++--- arch/x86/kernel/vsmp_64.c | 6 +++--- 5 files changed, 13 insertions(+), 19 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index b9d5e7c9ef43..9e755e1cfe92 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -594,7 +594,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, BUG_ON(p->len > MAX_PATCH_LEN); /* prep the buffer with the original instructions */ memcpy(insnbuf, p->instr, p->len); - used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, + used = pv_init_ops.patch(p->instrtype, insnbuf, (unsigned long)p->instr, p->len); BUG_ON(used > p->len); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 1abdbde35049..287d34513f6a 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -81,10 +81,8 @@ struct branch { u32 delta; } __attribute__((packed)); -static unsigned paravirt_patch_call(void *insnbuf, - const void *target, u16 tgt_clobbers, - unsigned long addr, u16 site_clobbers, - unsigned len) +static unsigned paravirt_patch_call(void *insnbuf, const void *target, + unsigned long addr, unsigned len) { struct branch *b = insnbuf; unsigned long delta = (unsigned long)target - (addr+5); @@ -149,7 +147,7 @@ static void *get_call_destination(u8 type) return *((void **)&tmpl + type); } -unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, +unsigned paravirt_patch_default(u8 type, void *insnbuf, unsigned long addr, unsigned len) { void *opfunc = get_call_destination(type); @@ -172,10 +170,8 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, /* If operation requires a jmp, then jmp */ ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); else - /* Otherwise call the function; assume target could - clobber any caller-save reg */ - ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY, - addr, clobbers, len); + /* Otherwise call the function. */ + ret = paravirt_patch_call(insnbuf, opfunc, addr, len); return ret; } diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 758e69d72ebf..e5c3a438149e 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -30,8 +30,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) extern bool pv_is_native_spin_unlock(void); extern bool pv_is_native_vcpu_is_preempted(void); -unsigned native_patch(u8 type, u16 clobbers, void *ibuf, - unsigned long addr, unsigned len) +unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) { const unsigned char *start, *end; unsigned ret; @@ -70,7 +69,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, default: patch_default: __maybe_unused - ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); + ret = paravirt_patch_default(type, ibuf, addr, len); break; patch_site: diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 9cb98f7b07c9..835f1985a115 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -38,8 +38,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) extern bool pv_is_native_spin_unlock(void); extern bool pv_is_native_vcpu_is_preempted(void); -unsigned native_patch(u8 type, u16 clobbers, void *ibuf, - unsigned long addr, unsigned len) +unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) { const unsigned char *start, *end; unsigned ret; @@ -80,7 +79,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, default: patch_default: __maybe_unused - ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); + ret = paravirt_patch_default(type, ibuf, addr, len); break; patch_site: diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 44685fb2a192..f194e5e1e95c 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -69,7 +69,7 @@ asmlinkage __visible void vsmp_irq_enable(void) } PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable); -static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, +static unsigned __init vsmp_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) { switch (type) { @@ -77,9 +77,9 @@ static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, case PARAVIRT_PATCH(pv_irq_ops.irq_disable): case PARAVIRT_PATCH(pv_irq_ops.save_fl): case PARAVIRT_PATCH(pv_irq_ops.restore_fl): - return paravirt_patch_default(type, clobbers, ibuf, addr, len); + return paravirt_patch_default(type, ibuf, addr, len); default: - return native_patch(type, clobbers, ibuf, addr, len); + return native_patch(type, ibuf, addr, len); } } -- cgit v1.2.3 From 5c83511bdb9832c86be20fb86b783356e2f58062 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 28 Aug 2018 09:40:19 +0200 Subject: x86/paravirt: Use a single ops structure Instead of using six globally visible paravirt ops structures combine them in a single structure, keeping the original structures as sub-structures. This avoids the need to assemble struct paravirt_patch_template at runtime on the stack each time apply_paravirt() is being called (i.e. when loading a module). [ tglx: Made the struct and the initializer tabular for readability sake ] Signed-off-by: Juergen Gross Signed-off-by: Thomas Gleixner Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-9-jgross@suse.com --- arch/x86/kernel/alternative.c | 2 +- arch/x86/kernel/asm-offsets.c | 12 +- arch/x86/kernel/asm-offsets_64.c | 7 +- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/cpu/vmware.c | 4 +- arch/x86/kernel/kvm.c | 19 +-- arch/x86/kernel/kvmclock.c | 4 +- arch/x86/kernel/paravirt-spinlocks.c | 15 +- arch/x86/kernel/paravirt.c | 290 +++++++++++++++++------------------ arch/x86/kernel/paravirt_patch_32.c | 48 +++--- arch/x86/kernel/paravirt_patch_64.c | 56 +++---- arch/x86/kernel/tsc.c | 2 +- arch/x86/kernel/vsmp_64.c | 18 +-- 13 files changed, 230 insertions(+), 249 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 9e755e1cfe92..22e4da4ae8b9 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -594,7 +594,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, BUG_ON(p->len > MAX_PATCH_LEN); /* prep the buffer with the original instructions */ memcpy(insnbuf, p->instr, p->len); - used = pv_init_ops.patch(p->instrtype, insnbuf, + used = pv_ops.init.patch(p->instrtype, insnbuf, (unsigned long)p->instr, p->len); BUG_ON(used > p->len); diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 01de31db300d..46d14498e214 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -66,13 +66,11 @@ void common(void) { #ifdef CONFIG_PARAVIRT BLANK(); - OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); - OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); - OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); - OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); - OFFSET(PV_CPU_iret, pv_cpu_ops, iret); - OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); - OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); + OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); + OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); + OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); + OFFSET(PV_CPU_read_cr0, paravirt_patch_template, cpu.read_cr0); + OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2); #endif #ifdef CONFIG_XEN diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 3b9405e7ba2b..3384b03e717f 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -21,10 +21,11 @@ static char syscalls_ia32[] = { int main(void) { #ifdef CONFIG_PARAVIRT - OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); - OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); + OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template, + cpu.usergs_sysret64); + OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs); #ifdef CONFIG_DEBUG_ENTRY - OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl); + OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl); #endif BLANK(); #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 44c4ef3d989b..c4da9c23a96c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1243,7 +1243,7 @@ static void generic_identify(struct cpuinfo_x86 *c) # ifdef CONFIG_PARAVIRT do { extern void native_iret(void); - if (pv_cpu_ops.iret == native_iret) + if (pv_ops.cpu.iret == native_iret) set_cpu_bug(c, X86_BUG_ESPFIX); } while (0); # else diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 8e005329648b..d9ab49bed8af 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -97,14 +97,14 @@ static void __init vmware_sched_clock_setup(void) d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul, d->cyc2ns_shift); - pv_time_ops.sched_clock = vmware_sched_clock; + pv_ops.time.sched_clock = vmware_sched_clock; pr_info("using sched offset of %llu ns\n", d->cyc2ns_offset); } static void __init vmware_paravirt_ops_setup(void) { pv_info.name = "VMware hypervisor"; - pv_cpu_ops.io_delay = paravirt_nop; + pv_ops.cpu.io_delay = paravirt_nop; if (vmware_tsc_khz && vmw_sched_clock) vmware_sched_clock_setup(); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index d9b71924c23c..ba4bfb7f6a36 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -283,7 +283,7 @@ static void __init paravirt_ops_setup(void) pv_info.name = "KVM"; if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) - pv_cpu_ops.io_delay = kvm_io_delay; + pv_ops.cpu.io_delay = kvm_io_delay; #ifdef CONFIG_X86_IO_APIC no_timer_check = 1; @@ -632,14 +632,14 @@ static void __init kvm_guest_init(void) if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { has_steal_clock = 1; - pv_time_ops.steal_clock = kvm_steal_clock; + pv_ops.time.steal_clock = kvm_steal_clock; } if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && !kvm_para_has_hint(KVM_HINTS_REALTIME) && kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { - pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; - pv_mmu_ops.tlb_remove_table = tlb_remove_table; + pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; + pv_ops.mmu.tlb_remove_table = tlb_remove_table; } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) @@ -850,13 +850,14 @@ void __init kvm_spinlock_init(void) return; __pv_init_lock_hash(); - pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; - pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); - pv_lock_ops.wait = kvm_wait; - pv_lock_ops.kick = kvm_kick_cpu; + pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; + pv_ops.lock.queued_spin_unlock = + PV_CALLEE_SAVE(__pv_queued_spin_unlock); + pv_ops.lock.wait = kvm_wait; + pv_ops.lock.kick = kvm_kick_cpu; if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { - pv_lock_ops.vcpu_is_preempted = + pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); } } diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 1e6764648af3..a36b93a722a2 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -116,13 +116,13 @@ static u64 kvm_sched_clock_read(void) static inline void kvm_sched_clock_init(bool stable) { if (!stable) { - pv_time_ops.sched_clock = kvm_clock_read; + pv_ops.time.sched_clock = kvm_clock_read; clear_sched_clock_stable(); return; } kvm_sched_clock_offset = kvm_clock_read(); - pv_time_ops.sched_clock = kvm_sched_clock_read; + pv_ops.time.sched_clock = kvm_sched_clock_read; pr_info("kvm-clock: using sched offset of %llu cycles", kvm_sched_clock_offset); diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 71f2d1125ec0..4f75d0cf6305 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -17,7 +17,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); bool pv_is_native_spin_unlock(void) { - return pv_lock_ops.queued_spin_unlock.func == + return pv_ops.lock.queued_spin_unlock.func == __raw_callee_save___native_queued_spin_unlock; } @@ -29,17 +29,6 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); bool pv_is_native_vcpu_is_preempted(void) { - return pv_lock_ops.vcpu_is_preempted.func == + return pv_ops.lock.vcpu_is_preempted.func == __raw_callee_save___native_vcpu_is_preempted; } - -struct pv_lock_ops pv_lock_ops = { -#ifdef CONFIG_SMP - .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, - .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), - .wait = paravirt_nop, - .kick = paravirt_nop, - .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted), -#endif /* SMP */ -}; -EXPORT_SYMBOL(pv_lock_ops); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 287d34513f6a..0ca0576c88ff 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -128,29 +128,14 @@ void __init native_pv_lock_init(void) static_branch_disable(&virt_spin_lock_key); } -/* - * Neat trick to map patch type back to the call within the - * corresponding structure. - */ -static void *get_call_destination(u8 type) -{ - struct paravirt_patch_template tmpl = { - .pv_init_ops = pv_init_ops, - .pv_time_ops = pv_time_ops, - .pv_cpu_ops = pv_cpu_ops, - .pv_irq_ops = pv_irq_ops, - .pv_mmu_ops = pv_mmu_ops, -#ifdef CONFIG_PARAVIRT_SPINLOCKS - .pv_lock_ops = pv_lock_ops, -#endif - }; - return *((void **)&tmpl + type); -} - unsigned paravirt_patch_default(u8 type, void *insnbuf, unsigned long addr, unsigned len) { - void *opfunc = get_call_destination(type); + /* + * Neat trick to map patch type back to the call within the + * corresponding structure. + */ + void *opfunc = *((void **)&pv_ops + type); unsigned ret; if (opfunc == NULL) @@ -165,8 +150,8 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf, else if (opfunc == _paravirt_ident_64) ret = paravirt_patch_ident_64(insnbuf, len); - else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || - type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64)) + else if (type == PARAVIRT_PATCH(cpu.iret) || + type == PARAVIRT_PATCH(cpu.usergs_sysret64)) /* If operation requires a jmp, then jmp */ ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); else @@ -316,77 +301,6 @@ struct pv_info pv_info = { #endif }; -struct pv_init_ops pv_init_ops = { - .patch = native_patch, -}; - -struct pv_time_ops pv_time_ops = { - .sched_clock = native_sched_clock, - .steal_clock = native_steal_clock, -}; - -__visible struct pv_irq_ops pv_irq_ops = { - .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), - .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), - .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), - .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), - .safe_halt = native_safe_halt, - .halt = native_halt, -}; - -__visible struct pv_cpu_ops pv_cpu_ops = { - .cpuid = native_cpuid, - .get_debugreg = native_get_debugreg, - .set_debugreg = native_set_debugreg, - .read_cr0 = native_read_cr0, - .write_cr0 = native_write_cr0, - .write_cr4 = native_write_cr4, -#ifdef CONFIG_X86_64 - .read_cr8 = native_read_cr8, - .write_cr8 = native_write_cr8, -#endif - .wbinvd = native_wbinvd, - .read_msr = native_read_msr, - .write_msr = native_write_msr, - .read_msr_safe = native_read_msr_safe, - .write_msr_safe = native_write_msr_safe, - .read_pmc = native_read_pmc, - .load_tr_desc = native_load_tr_desc, - .set_ldt = native_set_ldt, - .load_gdt = native_load_gdt, - .load_idt = native_load_idt, - .store_tr = native_store_tr, - .load_tls = native_load_tls, -#ifdef CONFIG_X86_64 - .load_gs_index = native_load_gs_index, -#endif - .write_ldt_entry = native_write_ldt_entry, - .write_gdt_entry = native_write_gdt_entry, - .write_idt_entry = native_write_idt_entry, - - .alloc_ldt = paravirt_nop, - .free_ldt = paravirt_nop, - - .load_sp0 = native_load_sp0, - -#ifdef CONFIG_X86_64 - .usergs_sysret64 = native_usergs_sysret64, -#endif - .iret = native_iret, - .swapgs = native_swapgs, - - .set_iopl_mask = native_set_iopl_mask, - .io_delay = native_io_delay, - - .start_context_switch = paravirt_nop, - .end_context_switch = paravirt_nop, -}; - -/* At this point, native_get/set_debugreg has real function entries */ -NOKPROBE_SYMBOL(native_get_debugreg); -NOKPROBE_SYMBOL(native_set_debugreg); -NOKPROBE_SYMBOL(native_load_idt); - #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) /* 32-bit pagetable entries */ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) @@ -395,85 +309,163 @@ NOKPROBE_SYMBOL(native_load_idt); #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) #endif -struct pv_mmu_ops pv_mmu_ops __ro_after_init = { - - .read_cr2 = native_read_cr2, - .write_cr2 = native_write_cr2, - .read_cr3 = __native_read_cr3, - .write_cr3 = native_write_cr3, - - .flush_tlb_user = native_flush_tlb, - .flush_tlb_kernel = native_flush_tlb_global, - .flush_tlb_one_user = native_flush_tlb_one_user, - .flush_tlb_others = native_flush_tlb_others, - .tlb_remove_table = (void (*)(struct mmu_gather *, void *))tlb_remove_page, - - .pgd_alloc = __paravirt_pgd_alloc, - .pgd_free = paravirt_nop, +struct paravirt_patch_template pv_ops = { + /* Init ops. */ + .init.patch = native_patch, + + /* Time ops. */ + .time.sched_clock = native_sched_clock, + .time.steal_clock = native_steal_clock, + + /* Cpu ops. */ + .cpu.cpuid = native_cpuid, + .cpu.get_debugreg = native_get_debugreg, + .cpu.set_debugreg = native_set_debugreg, + .cpu.read_cr0 = native_read_cr0, + .cpu.write_cr0 = native_write_cr0, + .cpu.write_cr4 = native_write_cr4, +#ifdef CONFIG_X86_64 + .cpu.read_cr8 = native_read_cr8, + .cpu.write_cr8 = native_write_cr8, +#endif + .cpu.wbinvd = native_wbinvd, + .cpu.read_msr = native_read_msr, + .cpu.write_msr = native_write_msr, + .cpu.read_msr_safe = native_read_msr_safe, + .cpu.write_msr_safe = native_write_msr_safe, + .cpu.read_pmc = native_read_pmc, + .cpu.load_tr_desc = native_load_tr_desc, + .cpu.set_ldt = native_set_ldt, + .cpu.load_gdt = native_load_gdt, + .cpu.load_idt = native_load_idt, + .cpu.store_tr = native_store_tr, + .cpu.load_tls = native_load_tls, +#ifdef CONFIG_X86_64 + .cpu.load_gs_index = native_load_gs_index, +#endif + .cpu.write_ldt_entry = native_write_ldt_entry, + .cpu.write_gdt_entry = native_write_gdt_entry, + .cpu.write_idt_entry = native_write_idt_entry, - .alloc_pte = paravirt_nop, - .alloc_pmd = paravirt_nop, - .alloc_pud = paravirt_nop, - .alloc_p4d = paravirt_nop, - .release_pte = paravirt_nop, - .release_pmd = paravirt_nop, - .release_pud = paravirt_nop, - .release_p4d = paravirt_nop, + .cpu.alloc_ldt = paravirt_nop, + .cpu.free_ldt = paravirt_nop, - .set_pte = native_set_pte, - .set_pte_at = native_set_pte_at, - .set_pmd = native_set_pmd, + .cpu.load_sp0 = native_load_sp0, - .ptep_modify_prot_start = __ptep_modify_prot_start, - .ptep_modify_prot_commit = __ptep_modify_prot_commit, +#ifdef CONFIG_X86_64 + .cpu.usergs_sysret64 = native_usergs_sysret64, +#endif + .cpu.iret = native_iret, + .cpu.swapgs = native_swapgs, + + .cpu.set_iopl_mask = native_set_iopl_mask, + .cpu.io_delay = native_io_delay, + + .cpu.start_context_switch = paravirt_nop, + .cpu.end_context_switch = paravirt_nop, + + /* Irq ops. */ + .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), + .irq.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), + .irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), + .irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), + .irq.safe_halt = native_safe_halt, + .irq.halt = native_halt, + + /* Mmu ops. */ + .mmu.read_cr2 = native_read_cr2, + .mmu.write_cr2 = native_write_cr2, + .mmu.read_cr3 = __native_read_cr3, + .mmu.write_cr3 = native_write_cr3, + + .mmu.flush_tlb_user = native_flush_tlb, + .mmu.flush_tlb_kernel = native_flush_tlb_global, + .mmu.flush_tlb_one_user = native_flush_tlb_one_user, + .mmu.flush_tlb_others = native_flush_tlb_others, + .mmu.tlb_remove_table = + (void (*)(struct mmu_gather *, void *))tlb_remove_page, + + .mmu.pgd_alloc = __paravirt_pgd_alloc, + .mmu.pgd_free = paravirt_nop, + + .mmu.alloc_pte = paravirt_nop, + .mmu.alloc_pmd = paravirt_nop, + .mmu.alloc_pud = paravirt_nop, + .mmu.alloc_p4d = paravirt_nop, + .mmu.release_pte = paravirt_nop, + .mmu.release_pmd = paravirt_nop, + .mmu.release_pud = paravirt_nop, + .mmu.release_p4d = paravirt_nop, + + .mmu.set_pte = native_set_pte, + .mmu.set_pte_at = native_set_pte_at, + .mmu.set_pmd = native_set_pmd, + + .mmu.ptep_modify_prot_start = __ptep_modify_prot_start, + .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit, #if CONFIG_PGTABLE_LEVELS >= 3 #ifdef CONFIG_X86_PAE - .set_pte_atomic = native_set_pte_atomic, - .pte_clear = native_pte_clear, - .pmd_clear = native_pmd_clear, + .mmu.set_pte_atomic = native_set_pte_atomic, + .mmu.pte_clear = native_pte_clear, + .mmu.pmd_clear = native_pmd_clear, #endif - .set_pud = native_set_pud, + .mmu.set_pud = native_set_pud, - .pmd_val = PTE_IDENT, - .make_pmd = PTE_IDENT, + .mmu.pmd_val = PTE_IDENT, + .mmu.make_pmd = PTE_IDENT, #if CONFIG_PGTABLE_LEVELS >= 4 - .pud_val = PTE_IDENT, - .make_pud = PTE_IDENT, + .mmu.pud_val = PTE_IDENT, + .mmu.make_pud = PTE_IDENT, - .set_p4d = native_set_p4d, + .mmu.set_p4d = native_set_p4d, #if CONFIG_PGTABLE_LEVELS >= 5 - .p4d_val = PTE_IDENT, - .make_p4d = PTE_IDENT, + .mmu.p4d_val = PTE_IDENT, + .mmu.make_p4d = PTE_IDENT, - .set_pgd = native_set_pgd, + .mmu.set_pgd = native_set_pgd, #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ #endif /* CONFIG_PGTABLE_LEVELS >= 4 */ #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ - .pte_val = PTE_IDENT, - .pgd_val = PTE_IDENT, + .mmu.pte_val = PTE_IDENT, + .mmu.pgd_val = PTE_IDENT, - .make_pte = PTE_IDENT, - .make_pgd = PTE_IDENT, + .mmu.make_pte = PTE_IDENT, + .mmu.make_pgd = PTE_IDENT, - .dup_mmap = paravirt_nop, - .exit_mmap = paravirt_nop, - .activate_mm = paravirt_nop, + .mmu.dup_mmap = paravirt_nop, + .mmu.exit_mmap = paravirt_nop, + .mmu.activate_mm = paravirt_nop, - .lazy_mode = { - .enter = paravirt_nop, - .leave = paravirt_nop, - .flush = paravirt_nop, + .mmu.lazy_mode = { + .enter = paravirt_nop, + .leave = paravirt_nop, + .flush = paravirt_nop, }, - .set_fixmap = native_set_fixmap, + .mmu.set_fixmap = native_set_fixmap, + +#if defined(CONFIG_PARAVIRT_SPINLOCKS) + /* Lock ops. */ +#ifdef CONFIG_SMP + .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, + .lock.queued_spin_unlock = + PV_CALLEE_SAVE(__native_queued_spin_unlock), + .lock.wait = paravirt_nop, + .lock.kick = paravirt_nop, + .lock.vcpu_is_preempted = + PV_CALLEE_SAVE(__native_vcpu_is_preempted), +#endif /* SMP */ +#endif }; -EXPORT_SYMBOL_GPL(pv_time_ops); -EXPORT_SYMBOL (pv_cpu_ops); -EXPORT_SYMBOL (pv_mmu_ops); +/* At this point, native_get/set_debugreg has real function entries */ +NOKPROBE_SYMBOL(native_get_debugreg); +NOKPROBE_SYMBOL(native_set_debugreg); +NOKPROBE_SYMBOL(native_load_idt); + +EXPORT_SYMBOL_GPL(pv_ops); EXPORT_SYMBOL_GPL(pv_info); -EXPORT_SYMBOL (pv_irq_ops); diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index e5c3a438149e..026fa43e9261 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -1,18 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 #include -DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); -DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); -DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); -DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); -DEF_NATIVE(pv_cpu_ops, iret, "iret"); -DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); -DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); -DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); +DEF_NATIVE(irq, irq_disable, "cli"); +DEF_NATIVE(irq, irq_enable, "sti"); +DEF_NATIVE(irq, restore_fl, "push %eax; popf"); +DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); +DEF_NATIVE(cpu, iret, "iret"); +DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); +DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); +DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); #if defined(CONFIG_PARAVIRT_SPINLOCKS) -DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)"); -DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax"); +DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); +DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); #endif unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) @@ -41,27 +41,27 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) end = end_##ops##_##x; \ goto patch_site switch (type) { - PATCH_SITE(pv_irq_ops, irq_disable); - PATCH_SITE(pv_irq_ops, irq_enable); - PATCH_SITE(pv_irq_ops, restore_fl); - PATCH_SITE(pv_irq_ops, save_fl); - PATCH_SITE(pv_cpu_ops, iret); - PATCH_SITE(pv_mmu_ops, read_cr2); - PATCH_SITE(pv_mmu_ops, read_cr3); - PATCH_SITE(pv_mmu_ops, write_cr3); + PATCH_SITE(irq, irq_disable); + PATCH_SITE(irq, irq_enable); + PATCH_SITE(irq, restore_fl); + PATCH_SITE(irq, save_fl); + PATCH_SITE(cpu, iret); + PATCH_SITE(mmu, read_cr2); + PATCH_SITE(mmu, read_cr3); + PATCH_SITE(mmu, write_cr3); #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): + case PARAVIRT_PATCH(lock.queued_spin_unlock): if (pv_is_native_spin_unlock()) { - start = start_pv_lock_ops_queued_spin_unlock; - end = end_pv_lock_ops_queued_spin_unlock; + start = start_lock_queued_spin_unlock; + end = end_lock_queued_spin_unlock; goto patch_site; } goto patch_default; - case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted): + case PARAVIRT_PATCH(lock.vcpu_is_preempted): if (pv_is_native_vcpu_is_preempted()) { - start = start_pv_lock_ops_vcpu_is_preempted; - end = end_pv_lock_ops_vcpu_is_preempted; + start = start_lock_vcpu_is_preempted; + end = end_lock_vcpu_is_preempted; goto patch_site; } goto patch_default; diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 835f1985a115..582e893728e8 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -3,24 +3,24 @@ #include #include -DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); -DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); -DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq"); -DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); -DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); -DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); -DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); -DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); +DEF_NATIVE(irq, irq_disable, "cli"); +DEF_NATIVE(irq, irq_enable, "sti"); +DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq"); +DEF_NATIVE(irq, save_fl, "pushfq; popq %rax"); +DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); +DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); +DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); +DEF_NATIVE(cpu, wbinvd, "wbinvd"); -DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); -DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); +DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); +DEF_NATIVE(cpu, swapgs, "swapgs"); DEF_NATIVE(, mov32, "mov %edi, %eax"); DEF_NATIVE(, mov64, "mov %rdi, %rax"); #if defined(CONFIG_PARAVIRT_SPINLOCKS) -DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)"); -DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax"); +DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); +DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); #endif unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) @@ -49,29 +49,29 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) end = end_##ops##_##x; \ goto patch_site switch(type) { - PATCH_SITE(pv_irq_ops, restore_fl); - PATCH_SITE(pv_irq_ops, save_fl); - PATCH_SITE(pv_irq_ops, irq_enable); - PATCH_SITE(pv_irq_ops, irq_disable); - PATCH_SITE(pv_cpu_ops, usergs_sysret64); - PATCH_SITE(pv_cpu_ops, swapgs); - PATCH_SITE(pv_mmu_ops, read_cr2); - PATCH_SITE(pv_mmu_ops, read_cr3); - PATCH_SITE(pv_mmu_ops, write_cr3); - PATCH_SITE(pv_cpu_ops, wbinvd); + PATCH_SITE(irq, restore_fl); + PATCH_SITE(irq, save_fl); + PATCH_SITE(irq, irq_enable); + PATCH_SITE(irq, irq_disable); + PATCH_SITE(cpu, usergs_sysret64); + PATCH_SITE(cpu, swapgs); + PATCH_SITE(mmu, read_cr2); + PATCH_SITE(mmu, read_cr3); + PATCH_SITE(mmu, write_cr3); + PATCH_SITE(cpu, wbinvd); #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): + case PARAVIRT_PATCH(lock.queued_spin_unlock): if (pv_is_native_spin_unlock()) { - start = start_pv_lock_ops_queued_spin_unlock; - end = end_pv_lock_ops_queued_spin_unlock; + start = start_lock_queued_spin_unlock; + end = end_lock_queued_spin_unlock; goto patch_site; } goto patch_default; - case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted): + case PARAVIRT_PATCH(lock.vcpu_is_preempted): if (pv_is_native_vcpu_is_preempted()) { - start = start_pv_lock_ops_vcpu_is_preempted; - end = end_pv_lock_ops_vcpu_is_preempted; + start = start_lock_vcpu_is_preempted; + end = end_lock_vcpu_is_preempted; goto patch_site; } goto patch_default; diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 1463468ba9a0..9044aa5e2389 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -246,7 +246,7 @@ unsigned long long sched_clock(void) bool using_native_sched_clock(void) { - return pv_time_ops.sched_clock == native_sched_clock; + return pv_ops.time.sched_clock == native_sched_clock; } #else unsigned long long diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index f194e5e1e95c..789918d78697 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -73,10 +73,10 @@ static unsigned __init vsmp_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) { switch (type) { - case PARAVIRT_PATCH(pv_irq_ops.irq_enable): - case PARAVIRT_PATCH(pv_irq_ops.irq_disable): - case PARAVIRT_PATCH(pv_irq_ops.save_fl): - case PARAVIRT_PATCH(pv_irq_ops.restore_fl): + case PARAVIRT_PATCH(irq.irq_enable): + case PARAVIRT_PATCH(irq.irq_disable): + case PARAVIRT_PATCH(irq.save_fl): + case PARAVIRT_PATCH(irq.restore_fl): return paravirt_patch_default(type, ibuf, addr, len); default: return native_patch(type, ibuf, addr, len); @@ -111,11 +111,11 @@ static void __init set_vsmp_pv_ops(void) if (cap & ctl & (1 << 4)) { /* Setup irq ops and turn on vSMP IRQ fastpath handling */ - pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); - pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable); - pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); - pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); - pv_init_ops.patch = vsmp_patch; + pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); + pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable); + pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); + pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); + pv_ops.init.patch = vsmp_patch; ctl &= ~(1 << 4); } writel(ctl, address + 4); -- cgit v1.2.3 From 5def7a4cd5bef80f7c997cf37c09022fa8c9cd76 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 28 Aug 2018 09:40:20 +0200 Subject: x86/paravirt: Remove unused paravirt bits The macros ENABLE_INTERRUPTS_SYSEXIT, GET_CR0_INTO_EAX and PARAVIRT_ADJUST_EXCEPTION_FRAME are used nowhere. Remove their definitions. Signed-off-by: Juergen Gross Signed-off-by: Thomas Gleixner Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-10-jgross@suse.com --- arch/x86/kernel/asm-offsets.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 46d14498e214..37e323f3d8c9 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -69,7 +69,6 @@ void common(void) { OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); - OFFSET(PV_CPU_read_cr0, paravirt_patch_template, cpu.read_cr0); OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2); #endif -- cgit v1.2.3 From 40181646db45fb72f46563a2f3b792adc5380710 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 28 Aug 2018 09:40:22 +0200 Subject: x86/paravirt: Move items in pv_info under PARAVIRT_XXL umbrella All items but name in pv_info are needed by Xen PV only. Define them with CONFIG_PARAVIRT_XXL set only. Signed-off-by: Juergen Gross Signed-off-by: Thomas Gleixner Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-12-jgross@suse.com --- arch/x86/kernel/paravirt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 0ca0576c88ff..ce0e28506942 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -293,12 +293,14 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) struct pv_info pv_info = { .name = "bare hardware", +#ifdef CONFIG_PARAVIRT_XXL .kernel_rpl = 0, .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ #ifdef CONFIG_X86_64 .extra_user_64bit_cs = __USER_CS, #endif +#endif }; #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) -- cgit v1.2.3 From 9bad5658ea710f45e4ee68b88a01cfe1839d8b00 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 28 Aug 2018 09:40:23 +0200 Subject: x86/paravirt: Move the Xen-only pv_cpu_ops under the PARAVIRT_XXL umbrella Most of the paravirt ops defined in pv_cpu_ops are for Xen PV guests only. Define them only if CONFIG_PARAVIRT_XXL is set. Signed-off-by: Juergen Gross Signed-off-by: Thomas Gleixner Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-13-jgross@suse.com --- arch/x86/kernel/asm-offsets.c | 2 ++ arch/x86/kernel/asm-offsets_64.c | 2 ++ arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/head_64.S | 2 ++ arch/x86/kernel/paravirt.c | 13 ++++++++++++- arch/x86/kernel/paravirt_patch_32.c | 4 ++++ arch/x86/kernel/paravirt_patch_64.c | 6 +++++- 7 files changed, 28 insertions(+), 3 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 37e323f3d8c9..0fe233d98d17 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -68,7 +68,9 @@ void common(void) { BLANK(); OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); +#ifdef CONFIG_PARAVIRT_XXL OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); +#endif OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2); #endif diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 3384b03e717f..2a15d420a84d 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -21,9 +21,11 @@ static char syscalls_ia32[] = { int main(void) { #ifdef CONFIG_PARAVIRT +#ifdef CONFIG_PARAVIRT_XXL OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template, cpu.usergs_sysret64); OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs); +#endif #ifdef CONFIG_DEBUG_ENTRY OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl); #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c4da9c23a96c..d45cd5e0fb11 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1240,7 +1240,7 @@ static void generic_identify(struct cpuinfo_x86 *c) * ESPFIX issue, we can change this. */ #ifdef CONFIG_X86_32 -# ifdef CONFIG_PARAVIRT +# ifdef CONFIG_PARAVIRT_XXL do { extern void native_iret(void); if (pv_ops.cpu.iret == native_iret) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 15ebc2fc166e..a5bd72a0ee1a 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -31,6 +31,8 @@ #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg #else #define GET_CR2_INTO(reg) movq %cr2, reg +#endif +#ifndef CONFIG_PARAVIRT_XXL #define INTERRUPT_RETURN iretq #endif diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index ce0e28506942..1d5f40cc872a 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -101,6 +101,7 @@ static unsigned paravirt_patch_call(void *insnbuf, const void *target, return 5; } +#ifdef CONFIG_PARAVIRT_XXL static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, unsigned long addr, unsigned len) { @@ -119,6 +120,7 @@ static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, return 5; } +#endif DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); @@ -150,10 +152,12 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf, else if (opfunc == _paravirt_ident_64) ret = paravirt_patch_ident_64(insnbuf, len); +#ifdef CONFIG_PARAVIRT_XXL else if (type == PARAVIRT_PATCH(cpu.iret) || type == PARAVIRT_PATCH(cpu.usergs_sysret64)) /* If operation requires a jmp, then jmp */ ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); +#endif else /* Otherwise call the function. */ ret = paravirt_patch_call(insnbuf, opfunc, addr, len); @@ -262,6 +266,7 @@ void paravirt_flush_lazy_mmu(void) preempt_enable(); } +#ifdef CONFIG_PARAVIRT_XXL void paravirt_start_context_switch(struct task_struct *prev) { BUG_ON(preemptible()); @@ -282,6 +287,7 @@ void paravirt_end_context_switch(struct task_struct *next) if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) arch_enter_lazy_mmu_mode(); } +#endif enum paravirt_lazy_mode paravirt_get_lazy_mode(void) { @@ -320,6 +326,9 @@ struct paravirt_patch_template pv_ops = { .time.steal_clock = native_steal_clock, /* Cpu ops. */ + .cpu.io_delay = native_io_delay, + +#ifdef CONFIG_PARAVIRT_XXL .cpu.cpuid = native_cpuid, .cpu.get_debugreg = native_get_debugreg, .cpu.set_debugreg = native_set_debugreg, @@ -361,10 +370,10 @@ struct paravirt_patch_template pv_ops = { .cpu.swapgs = native_swapgs, .cpu.set_iopl_mask = native_set_iopl_mask, - .cpu.io_delay = native_io_delay, .cpu.start_context_switch = paravirt_nop, .cpu.end_context_switch = paravirt_nop, +#endif /* CONFIG_PARAVIRT_XXL */ /* Irq ops. */ .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), @@ -464,10 +473,12 @@ struct paravirt_patch_template pv_ops = { #endif }; +#ifdef CONFIG_PARAVIRT_XXL /* At this point, native_get/set_debugreg has real function entries */ NOKPROBE_SYMBOL(native_get_debugreg); NOKPROBE_SYMBOL(native_set_debugreg); NOKPROBE_SYMBOL(native_load_idt); +#endif EXPORT_SYMBOL_GPL(pv_ops); EXPORT_SYMBOL_GPL(pv_info); diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 026fa43e9261..5a20aa56efc0 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -5,7 +5,9 @@ DEF_NATIVE(irq, irq_disable, "cli"); DEF_NATIVE(irq, irq_enable, "sti"); DEF_NATIVE(irq, restore_fl, "push %eax; popf"); DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); +#ifdef CONFIG_PARAVIRT_XXL DEF_NATIVE(cpu, iret, "iret"); +#endif DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); @@ -45,7 +47,9 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(irq, irq_enable); PATCH_SITE(irq, restore_fl); PATCH_SITE(irq, save_fl); +#ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(cpu, iret); +#endif PATCH_SITE(mmu, read_cr2); PATCH_SITE(mmu, read_cr3); PATCH_SITE(mmu, write_cr3); diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 582e893728e8..461aba038ada 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -10,10 +10,12 @@ DEF_NATIVE(irq, save_fl, "pushfq; popq %rax"); DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); +#ifdef CONFIG_PARAVIRT_XXL DEF_NATIVE(cpu, wbinvd, "wbinvd"); DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); DEF_NATIVE(cpu, swapgs, "swapgs"); +#endif DEF_NATIVE(, mov32, "mov %edi, %eax"); DEF_NATIVE(, mov64, "mov %rdi, %rax"); @@ -53,12 +55,14 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(irq, save_fl); PATCH_SITE(irq, irq_enable); PATCH_SITE(irq, irq_disable); +#ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(cpu, usergs_sysret64); PATCH_SITE(cpu, swapgs); + PATCH_SITE(cpu, wbinvd); +#endif PATCH_SITE(mmu, read_cr2); PATCH_SITE(mmu, read_cr3); PATCH_SITE(mmu, write_cr3); - PATCH_SITE(cpu, wbinvd); #if defined(CONFIG_PARAVIRT_SPINLOCKS) case PARAVIRT_PATCH(lock.queued_spin_unlock): if (pv_is_native_spin_unlock()) { -- cgit v1.2.3 From 6da63eb241a05b0e676d68975e793c0521387141 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 28 Aug 2018 09:40:24 +0200 Subject: x86/paravirt: Move the pv_irq_ops under the PARAVIRT_XXL umbrella All of the paravirt ops defined in pv_irq_ops are for Xen PV guests or VSMP only. Define them only if CONFIG_PARAVIRT_XXL is set. Signed-off-by: Juergen Gross Signed-off-by: Thomas Gleixner Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-14-jgross@suse.com --- arch/x86/kernel/asm-offsets.c | 2 +- arch/x86/kernel/asm-offsets_64.c | 2 +- arch/x86/kernel/paravirt.c | 2 +- arch/x86/kernel/paravirt_patch_32.c | 4 ++-- arch/x86/kernel/paravirt_patch_64.c | 4 +++- arch/x86/kernel/vsmp_64.c | 2 +- 6 files changed, 9 insertions(+), 7 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 0fe233d98d17..28e7572ff74d 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -66,9 +66,9 @@ void common(void) { #ifdef CONFIG_PARAVIRT BLANK(); +#ifdef CONFIG_PARAVIRT_XXL OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); -#ifdef CONFIG_PARAVIRT_XXL OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); #endif OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2); diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 2a15d420a84d..ddced33184b5 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -25,9 +25,9 @@ int main(void) OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template, cpu.usergs_sysret64); OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs); -#endif #ifdef CONFIG_DEBUG_ENTRY OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl); +#endif #endif BLANK(); #endif diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 1d5f40cc872a..5e8226335eac 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -373,7 +373,6 @@ struct paravirt_patch_template pv_ops = { .cpu.start_context_switch = paravirt_nop, .cpu.end_context_switch = paravirt_nop, -#endif /* CONFIG_PARAVIRT_XXL */ /* Irq ops. */ .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), @@ -382,6 +381,7 @@ struct paravirt_patch_template pv_ops = { .irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), .irq.safe_halt = native_safe_halt, .irq.halt = native_halt, +#endif /* CONFIG_PARAVIRT_XXL */ /* Mmu ops. */ .mmu.read_cr2 = native_read_cr2, diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 5a20aa56efc0..1d44705c6528 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 #include +#ifdef CONFIG_PARAVIRT_XXL DEF_NATIVE(irq, irq_disable, "cli"); DEF_NATIVE(irq, irq_enable, "sti"); DEF_NATIVE(irq, restore_fl, "push %eax; popf"); DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); -#ifdef CONFIG_PARAVIRT_XXL DEF_NATIVE(cpu, iret, "iret"); #endif DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); @@ -43,11 +43,11 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) end = end_##ops##_##x; \ goto patch_site switch (type) { +#ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(irq, irq_disable); PATCH_SITE(irq, irq_enable); PATCH_SITE(irq, restore_fl); PATCH_SITE(irq, save_fl); -#ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(cpu, iret); #endif PATCH_SITE(mmu, read_cr2); diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 461aba038ada..b00937963a0f 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -3,10 +3,12 @@ #include #include +#ifdef CONFIG_PARAVIRT_XXL DEF_NATIVE(irq, irq_disable, "cli"); DEF_NATIVE(irq, irq_enable, "sti"); DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq"); DEF_NATIVE(irq, save_fl, "pushfq; popq %rax"); +#endif DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); @@ -51,11 +53,11 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) end = end_##ops##_##x; \ goto patch_site switch(type) { +#ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(irq, restore_fl); PATCH_SITE(irq, save_fl); PATCH_SITE(irq, irq_enable); PATCH_SITE(irq, irq_disable); -#ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(cpu, usergs_sysret64); PATCH_SITE(cpu, swapgs); PATCH_SITE(cpu, wbinvd); diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 789918d78697..1eae5af491c2 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -26,7 +26,7 @@ #define TOPOLOGY_REGISTER_OFFSET 0x10 -#if defined CONFIG_PCI && defined CONFIG_PARAVIRT +#if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL /* * Interrupt control on vSMPowered systems: * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' -- cgit v1.2.3 From fdc0269e8958a1ec95b8ac685c1d372c24c60faa Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 28 Aug 2018 09:40:25 +0200 Subject: x86/paravirt: Move the Xen-only pv_mmu_ops under the PARAVIRT_XXL umbrella Most of the paravirt ops defined in pv_mmu_ops are for Xen PV guests only. Define them only if CONFIG_PARAVIRT_XXL is set. Signed-off-by: Juergen Gross Signed-off-by: Thomas Gleixner Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-15-jgross@suse.com --- arch/x86/kernel/asm-offsets.c | 4 +--- arch/x86/kernel/head_64.S | 4 +--- arch/x86/kernel/paravirt.c | 15 +++++++++------ arch/x86/kernel/paravirt_patch_32.c | 4 ++-- arch/x86/kernel/paravirt_patch_64.c | 4 +--- 5 files changed, 14 insertions(+), 17 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 28e7572ff74d..fc02c3cf238f 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -64,13 +64,11 @@ void common(void) { OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext); #endif -#ifdef CONFIG_PARAVIRT - BLANK(); #ifdef CONFIG_PARAVIRT_XXL + BLANK(); OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); -#endif OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2); #endif diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index a5bd72a0ee1a..827bca2c2782 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -25,14 +25,12 @@ #include #include -#ifdef CONFIG_PARAVIRT +#ifdef CONFIG_PARAVIRT_XXL #include #include #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg #else #define GET_CR2_INTO(reg) movq %cr2, reg -#endif -#ifndef CONFIG_PARAVIRT_XXL #define INTERRUPT_RETURN iretq #endif diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 5e8226335eac..bbf006fe78d7 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -384,11 +384,6 @@ struct paravirt_patch_template pv_ops = { #endif /* CONFIG_PARAVIRT_XXL */ /* Mmu ops. */ - .mmu.read_cr2 = native_read_cr2, - .mmu.write_cr2 = native_write_cr2, - .mmu.read_cr3 = __native_read_cr3, - .mmu.write_cr3 = native_write_cr3, - .mmu.flush_tlb_user = native_flush_tlb, .mmu.flush_tlb_kernel = native_flush_tlb_global, .mmu.flush_tlb_one_user = native_flush_tlb_one_user, @@ -396,6 +391,14 @@ struct paravirt_patch_template pv_ops = { .mmu.tlb_remove_table = (void (*)(struct mmu_gather *, void *))tlb_remove_page, + .mmu.exit_mmap = paravirt_nop, + +#ifdef CONFIG_PARAVIRT_XXL + .mmu.read_cr2 = native_read_cr2, + .mmu.write_cr2 = native_write_cr2, + .mmu.read_cr3 = __native_read_cr3, + .mmu.write_cr3 = native_write_cr3, + .mmu.pgd_alloc = __paravirt_pgd_alloc, .mmu.pgd_free = paravirt_nop, @@ -448,7 +451,6 @@ struct paravirt_patch_template pv_ops = { .mmu.make_pgd = PTE_IDENT, .mmu.dup_mmap = paravirt_nop, - .mmu.exit_mmap = paravirt_nop, .mmu.activate_mm = paravirt_nop, .mmu.lazy_mode = { @@ -458,6 +460,7 @@ struct paravirt_patch_template pv_ops = { }, .mmu.set_fixmap = native_set_fixmap, +#endif /* CONFIG_PARAVIRT_XXL */ #if defined(CONFIG_PARAVIRT_SPINLOCKS) /* Lock ops. */ diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 1d44705c6528..d460cbcabcfe 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -7,10 +7,10 @@ DEF_NATIVE(irq, irq_enable, "sti"); DEF_NATIVE(irq, restore_fl, "push %eax; popf"); DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); DEF_NATIVE(cpu, iret, "iret"); -#endif DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); +#endif #if defined(CONFIG_PARAVIRT_SPINLOCKS) DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); @@ -49,10 +49,10 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(irq, restore_fl); PATCH_SITE(irq, save_fl); PATCH_SITE(cpu, iret); -#endif PATCH_SITE(mmu, read_cr2); PATCH_SITE(mmu, read_cr3); PATCH_SITE(mmu, write_cr3); +#endif #if defined(CONFIG_PARAVIRT_SPINLOCKS) case PARAVIRT_PATCH(lock.queued_spin_unlock): if (pv_is_native_spin_unlock()) { diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index b00937963a0f..5ad5bcda9dc6 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -8,11 +8,9 @@ DEF_NATIVE(irq, irq_disable, "cli"); DEF_NATIVE(irq, irq_enable, "sti"); DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq"); DEF_NATIVE(irq, save_fl, "pushfq; popq %rax"); -#endif DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); -#ifdef CONFIG_PARAVIRT_XXL DEF_NATIVE(cpu, wbinvd, "wbinvd"); DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); @@ -61,10 +59,10 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(cpu, usergs_sysret64); PATCH_SITE(cpu, swapgs); PATCH_SITE(cpu, wbinvd); -#endif PATCH_SITE(mmu, read_cr2); PATCH_SITE(mmu, read_cr3); PATCH_SITE(mmu, write_cr3); +#endif #if defined(CONFIG_PARAVIRT_SPINLOCKS) case PARAVIRT_PATCH(lock.queued_spin_unlock): if (pv_is_native_spin_unlock()) { -- cgit v1.2.3 From 3637897b6c9bc2f12f38956d64724a6d0bbb56fd Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 11 Sep 2018 11:15:10 +0200 Subject: x86/paravirt: Clean up native_patch() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When CONFIG_PARAVIRT_SPINLOCKS=n, it generates a warning: arch/x86/kernel/paravirt_patch_64.c: In function ‘native_patch’: arch/x86/kernel/paravirt_patch_64.c:89:1: warning: label ‘patch_site’ defined but not used [-Wunused-label] patch_site: ... but those labels can simply be removed by directly calling the respective functions there. Get rid of local variables too, while at it. Also, simplify function flow for better readability. Signed-off-by: Borislav Petkov Reviewed-by: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: virtualization@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180911091510.GA12094@zn.tnic Signed-off-by: Ingo Molnar --- arch/x86/kernel/paravirt_patch_32.c | 44 +++++++++++++---------------------- arch/x86/kernel/paravirt_patch_64.c | 46 ++++++++++++++----------------------- 2 files changed, 33 insertions(+), 57 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index d460cbcabcfe..6368c22fa1fa 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -34,14 +34,10 @@ extern bool pv_is_native_vcpu_is_preempted(void); unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) { - const unsigned char *start, *end; - unsigned ret; - #define PATCH_SITE(ops, x) \ - case PARAVIRT_PATCH(ops.x): \ - start = start_##ops##_##x; \ - end = end_##ops##_##x; \ - goto patch_site + case PARAVIRT_PATCH(ops.x): \ + return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x) + switch (type) { #ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(irq, irq_disable); @@ -54,32 +50,24 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(mmu, write_cr3); #endif #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(lock.queued_spin_unlock): - if (pv_is_native_spin_unlock()) { - start = start_lock_queued_spin_unlock; - end = end_lock_queued_spin_unlock; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(lock.queued_spin_unlock): + if (pv_is_native_spin_unlock()) + return paravirt_patch_insns(ibuf, len, + start_lock_queued_spin_unlock, + end_lock_queued_spin_unlock); + break; - case PARAVIRT_PATCH(lock.vcpu_is_preempted): - if (pv_is_native_vcpu_is_preempted()) { - start = start_lock_vcpu_is_preempted; - end = end_lock_vcpu_is_preempted; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(lock.vcpu_is_preempted): + if (pv_is_native_vcpu_is_preempted()) + return paravirt_patch_insns(ibuf, len, + start_lock_vcpu_is_preempted, + end_lock_vcpu_is_preempted); + break; #endif default: -patch_default: __maybe_unused - ret = paravirt_patch_default(type, ibuf, addr, len); - break; - -patch_site: - ret = paravirt_patch_insns(ibuf, len, start, end); break; } #undef PATCH_SITE - return ret; + return paravirt_patch_default(type, ibuf, addr, len); } diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 5ad5bcda9dc6..7ca9cb726f4d 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -42,15 +42,11 @@ extern bool pv_is_native_vcpu_is_preempted(void); unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) { - const unsigned char *start, *end; - unsigned ret; - #define PATCH_SITE(ops, x) \ - case PARAVIRT_PATCH(ops.x): \ - start = start_##ops##_##x; \ - end = end_##ops##_##x; \ - goto patch_site - switch(type) { + case PARAVIRT_PATCH(ops.x): \ + return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x) + + switch (type) { #ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(irq, restore_fl); PATCH_SITE(irq, save_fl); @@ -64,32 +60,24 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(mmu, write_cr3); #endif #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(lock.queued_spin_unlock): - if (pv_is_native_spin_unlock()) { - start = start_lock_queued_spin_unlock; - end = end_lock_queued_spin_unlock; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(lock.queued_spin_unlock): + if (pv_is_native_spin_unlock()) + return paravirt_patch_insns(ibuf, len, + start_lock_queued_spin_unlock, + end_lock_queued_spin_unlock); + break; - case PARAVIRT_PATCH(lock.vcpu_is_preempted): - if (pv_is_native_vcpu_is_preempted()) { - start = start_lock_vcpu_is_preempted; - end = end_lock_vcpu_is_preempted; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(lock.vcpu_is_preempted): + if (pv_is_native_vcpu_is_preempted()) + return paravirt_patch_insns(ibuf, len, + start_lock_vcpu_is_preempted, + end_lock_vcpu_is_preempted); + break; #endif default: -patch_default: __maybe_unused - ret = paravirt_patch_default(type, ibuf, addr, len); - break; - -patch_site: - ret = paravirt_patch_insns(ibuf, len, start, end); break; } #undef PATCH_SITE - return ret; + return paravirt_patch_default(type, ibuf, addr, len); } -- cgit v1.2.3 From 3a025de64bf89c84a79909069e3c24ad9e710d27 Mon Sep 17 00:00:00 2001 From: Yi Sun Date: Mon, 8 Oct 2018 16:29:34 +0800 Subject: x86/hyperv: Enable PV qspinlock for Hyper-V Implement the required wait and kick callbacks to support PV spinlocks in Hyper-V guests. [ tglx: Document the requirement for disabling interrupts in the wait() callback. Remove goto and unnecessary includes. Add prototype for hv_vcpu_is_preempted(). Adapted to pending paravirt changes. ] Signed-off-by: Yi Sun Signed-off-by: Thomas Gleixner Reviewed-by: Juergen Gross Cc: "K. Y. Srinivasan" Cc: Haiyang Zhang Cc: Stephen Hemminger Cc: Michael Kelley (EOSG) Cc: chao.p.peng@intel.com Cc: chao.gao@intel.com Cc: isaku.yamahata@intel.com Cc: tianyu.lan@microsoft.com Link: https://lkml.kernel.org/r/1538987374-51217-3-git-send-email-yi.y.sun@linux.intel.com --- arch/x86/kernel/cpu/mshyperv.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index ad12733f6058..1c72f3819eb1 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -199,6 +199,16 @@ static unsigned long hv_get_tsc_khz(void) return freq / 1000; } +#if defined(CONFIG_SMP) && IS_ENABLED(CONFIG_HYPERV) +static void __init hv_smp_prepare_boot_cpu(void) +{ + native_smp_prepare_boot_cpu(); +#if defined(CONFIG_X86_64) && defined(CONFIG_PARAVIRT_SPINLOCKS) + hv_init_spinlocks(); +#endif +} +#endif + static void __init ms_hyperv_init_platform(void) { int hv_host_info_eax; @@ -303,6 +313,10 @@ static void __init ms_hyperv_init_platform(void) if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE) alloc_intr_gate(HYPERV_STIMER0_VECTOR, hv_stimer0_callback_vector); + +# ifdef CONFIG_SMP + smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu; +# endif #endif } -- cgit v1.2.3