From 7879fc4bdc7506d37bd67b6fc29442c53c06dfda Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sun, 25 Aug 2019 22:50:18 +0200 Subject: x86/rdrand: Sanity-check RDRAND output It turned out recently that on certain AMD F15h and F16h machines, due to the BIOS dropping the ball after resume, yet again, RDRAND would not function anymore: c49a0a80137c ("x86/CPU/AMD: Clear RDRAND CPUID bit on AMD family 15h/16h") Add a silly test to the CPU bringup path, to sanity-check the random data RDRAND returns and scream as loudly as possible if that returned random data doesn't change. Suggested-by: Linus Torvalds Signed-off-by: Borislav Petkov Cc: Pu Wen Cc: Thomas Gleixner Cc: Tom Lendacky Cc: x86-ml Link: https://lkml.kernel.org/r/CAHk-=wjWPDauemCmLTKbdMYFB0UveMszZpcrwoUkJRRWKrqaTw@mail.gmail.com --- arch/x86/kernel/cpu/rdrand.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c index 5c900f9527ff..c4be62058dd9 100644 --- a/arch/x86/kernel/cpu/rdrand.c +++ b/arch/x86/kernel/cpu/rdrand.c @@ -29,7 +29,8 @@ __setup("nordrand", x86_rdrand_setup); #ifdef CONFIG_ARCH_RANDOM void x86_init_rdrand(struct cpuinfo_x86 *c) { - unsigned long tmp; + unsigned int changed = 0; + unsigned long tmp, prev; int i; if (!cpu_has(c, X86_FEATURE_RDRAND)) @@ -42,5 +43,24 @@ void x86_init_rdrand(struct cpuinfo_x86 *c) return; } } + + /* + * Stupid sanity-check whether RDRAND does *actually* generate + * some at least random-looking data. + */ + prev = tmp; + for (i = 0; i < SANITY_CHECK_LOOPS; i++) { + if (rdrand_long(&tmp)) { + if (prev != tmp) + changed++; + + prev = tmp; + } + } + + if (WARN_ON_ONCE(!changed)) + pr_emerg( +"RDRAND gives funky smelling output, might consider not using it by booting with \"nordrand\""); + } #endif -- cgit v1.2.3 From e6b44ce1925a8329a937c57f0d60ba0d9bb5d226 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 1 Oct 2019 16:23:34 +0200 Subject: x86/math-emu: Check __copy_from_user() result MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The new __must_check annotation on __copy_from_user() successfully identified some code that has lacked the check since at least linux-2.1.73: arch/x86/math-emu/reg_ld_str.c:88:2: error: ignoring return value of \ function declared with 'warn_unused_result' attribute [-Werror,-Wunused-result]         __copy_from_user(sti_ptr, s, 10);         ^~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~ arch/x86/math-emu/reg_ld_str.c:1129:2: error: ignoring return value of \ function declared with 'warn_unused_result' attribute [-Werror,-Wunused-result]         __copy_from_user(register_base + offset, s, other);         ^~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ arch/x86/math-emu/reg_ld_str.c:1131:3: error: ignoring return value of \ function declared with 'warn_unused_result' attribute [-Werror,-Wunused-result]                 __copy_from_user(register_base, s + other, offset);                 ^~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In addition, the get_user()/put_user() helpers do not enforce a return value check, but actually still require one. These have been missing for even longer. Change the internal wrappers around get_user()/put_user() to force a signal and add a corresponding wrapper around __copy_from_user() to check all such cases. [ bp: Break long lines. ] Fixes: 257e458057e5 ("Import 2.1.73") Fixes: 9dd819a15162 ("uaccess: add missing __must_check attributes") Signed-off-by: Arnd Bergmann Signed-off-by: Borislav Petkov Reviewed-by: Kees Cook Cc: "H. Peter Anvin" Cc: Bill Metzenthen Cc: Ingo Molnar Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191001142344.1274185-1-arnd@arndb.de --- arch/x86/math-emu/fpu_system.h | 6 ++++-- arch/x86/math-emu/reg_ld_str.c | 6 +++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h index f98a0c956764..9b41391867dc 100644 --- a/arch/x86/math-emu/fpu_system.h +++ b/arch/x86/math-emu/fpu_system.h @@ -107,6 +107,8 @@ static inline bool seg_writable(struct desc_struct *d) #define FPU_access_ok(y,z) if ( !access_ok(y,z) ) \ math_abort(FPU_info,SIGSEGV) #define FPU_abort math_abort(FPU_info, SIGSEGV) +#define FPU_copy_from_user(to, from, n) \ + do { if (copy_from_user(to, from, n)) FPU_abort; } while (0) #undef FPU_IGNORE_CODE_SEGV #ifdef FPU_IGNORE_CODE_SEGV @@ -122,7 +124,7 @@ static inline bool seg_writable(struct desc_struct *d) #define FPU_code_access_ok(z) FPU_access_ok((void __user *)FPU_EIP,z) #endif -#define FPU_get_user(x,y) get_user((x),(y)) -#define FPU_put_user(x,y) put_user((x),(y)) +#define FPU_get_user(x,y) do { if (get_user((x),(y))) FPU_abort; } while (0) +#define FPU_put_user(x,y) do { if (put_user((x),(y))) FPU_abort; } while (0) #endif diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c index f3779743d15e..fe6246ff9887 100644 --- a/arch/x86/math-emu/reg_ld_str.c +++ b/arch/x86/math-emu/reg_ld_str.c @@ -85,7 +85,7 @@ int FPU_load_extended(long double __user *s, int stnr) RE_ENTRANT_CHECK_OFF; FPU_access_ok(s, 10); - __copy_from_user(sti_ptr, s, 10); + FPU_copy_from_user(sti_ptr, s, 10); RE_ENTRANT_CHECK_ON; return FPU_tagof(sti_ptr); @@ -1126,9 +1126,9 @@ void frstor(fpu_addr_modes addr_modes, u_char __user *data_address) /* Copy all registers in stack order. */ RE_ENTRANT_CHECK_OFF; FPU_access_ok(s, 80); - __copy_from_user(register_base + offset, s, other); + FPU_copy_from_user(register_base + offset, s, other); if (offset) - __copy_from_user(register_base, s + other, offset); + FPU_copy_from_user(register_base, s + other, offset); RE_ENTRANT_CHECK_ON; for (i = 0; i < 8; i++) { -- cgit v1.2.3 From 87d6021b814353d7b353afcc3698ffe49de7d4ec Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 1 Oct 2019 16:23:35 +0200 Subject: x86/math-emu: Limit MATH_EMULATION to 486SX compatibles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The FPU emulation code is old and fragile in places, try to limit its use to builds for CPUs that actually use it. As far as I can tell, this is only true for i486sx compatibles, including the Cyrix 486SLC, AMD Am486SX and ÉLAN SC410, UMC U5S amd DM&P VortexSX86, all of which were relatively short-lived and got replaced with i486DX compatible processors soon after introduction, though some of the embedded versions remained available much longer. Signed-off-by: Arnd Bergmann Signed-off-by: Borislav Petkov Reviewed-by: Kees Cook Cc: "H. Peter Anvin" Cc: Andrew Morton Cc: Bill Metzenthen Cc: Ingo Molnar Cc: Masahiro Yamada Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191001142344.1274185-2-arnd@arndb.de --- arch/x86/Kconfig | 2 +- arch/x86/Kconfig.cpu | 25 ++++++++++++++++--------- arch/x86/Makefile_32.cpu | 1 + arch/x86/include/asm/module.h | 2 ++ 4 files changed, 20 insertions(+), 10 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d6e1faa28c58..91c22ee6bc9a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1751,7 +1751,7 @@ config X86_RESERVE_LOW config MATH_EMULATION bool depends on MODIFY_LDT_SYSCALL - prompt "Math emulation" if X86_32 + prompt "Math emulation" if X86_32 && (M486SX || MELAN) ---help--- Linux can emulate a math coprocessor (used for floating point operations) if you don't have one. 486DX and Pentium processors have diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 8e29c991ba3e..af9c967782f6 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -50,12 +50,19 @@ choice See each option's help text for additional details. If you don't know what to do, choose "486". +config M486SX + bool "486SX" + depends on X86_32 + ---help--- + Select this for an 486-class CPU without an FPU such as + AMD/Cyrix/IBM/Intel SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5S. + config M486 - bool "486" + bool "486DX" depends on X86_32 ---help--- Select this for an 486-class CPU such as AMD/Cyrix/IBM/Intel - 486DX/DX2/DX4 or SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S. + 486DX/DX2/DX4 and UMC U5D. config M586 bool "586/K5/5x86/6x86/6x86MX" @@ -312,20 +319,20 @@ config X86_L1_CACHE_SHIFT int default "7" if MPENTIUM4 || MPSC default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU - default "4" if MELAN || M486 || MGEODEGX1 + default "4" if MELAN || M486SX || M486 || MGEODEGX1 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX config X86_F00F_BUG def_bool y - depends on M586MMX || M586TSC || M586 || M486 + depends on M586MMX || M586TSC || M586 || M486SX || M486 config X86_INVD_BUG def_bool y - depends on M486 + depends on M486SX || M486 config X86_ALIGNMENT_16 def_bool y - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1 config X86_INTEL_USERCOPY def_bool y @@ -378,7 +385,7 @@ config X86_MINIMUM_CPU_FAMILY config X86_DEBUGCTLMSR def_bool y - depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486) && !UML + depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486SX || M486) && !UML menuconfig PROCESSOR_SELECT bool "Supported processor vendors" if EXPERT @@ -402,7 +409,7 @@ config CPU_SUP_INTEL config CPU_SUP_CYRIX_32 default y bool "Support Cyrix processors" if PROCESSOR_SELECT - depends on M486 || M586 || M586TSC || M586MMX || (EXPERT && !64BIT) + depends on M486SX || M486 || M586 || M586TSC || M586MMX || (EXPERT && !64BIT) ---help--- This enables detection, tunings and quirks for Cyrix processors @@ -470,7 +477,7 @@ config CPU_SUP_TRANSMETA_32 config CPU_SUP_UMC_32 default y bool "Support UMC processors" if PROCESSOR_SELECT - depends on M486 || (EXPERT && !64BIT) + depends on M486SX || M486 || (EXPERT && !64BIT) ---help--- This enables detection, tunings and quirks for UMC processors diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu index 1f5faf8606b4..cd3056759880 100644 --- a/arch/x86/Makefile_32.cpu +++ b/arch/x86/Makefile_32.cpu @@ -10,6 +10,7 @@ else tune = $(call cc-option,-mcpu=$(1),$(2)) endif +cflags-$(CONFIG_M486SX) += -march=i486 cflags-$(CONFIG_M486) += -march=i486 cflags-$(CONFIG_M586) += -march=i586 cflags-$(CONFIG_M586TSC) += -march=i586 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 7948a17febb4..c215d2762488 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -15,6 +15,8 @@ struct mod_arch_specific { #ifdef CONFIG_X86_64 /* X86_64 does not define MODULE_PROC_FAMILY */ +#elif defined CONFIG_M486SX +#define MODULE_PROC_FAMILY "486SX " #elif defined CONFIG_M486 #define MODULE_PROC_FAMILY "486 " #elif defined CONFIG_M586 -- cgit v1.2.3 From 9d40b85bb46a99bc95dad3a07787da93b0a018e9 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 7 Oct 2019 15:48:39 -0500 Subject: x86/cpufeatures: Add feature bit RDPRU on AMD MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AMD Zen 2 introduces a new RDPRU instruction which is used to give access to some processor registers that are typically only accessible when the privilege level is zero. ECX is used as the implicit register to specify which register to read. RDPRU places the specified register’s value into EDX:EAX. For example, the RDPRU instruction can be used to read MPERF and APERF at CPL > 0. Add the feature bit so it is visible in /proc/cpuinfo. Details are available in the AMD64 Architecture Programmer’s Manual: https://www.amd.com/system/files/TechDocs/24594.pdf Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov Cc: Aaron Lewis Cc: ak@linux.intel.com Cc: Fenghua Yu Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: "Peter Zijlstra (Intel)" Cc: robert.hu@linux.intel.com Cc: Thomas Gleixner Cc: Thomas Hellstrom Cc: x86-ml Link: https://lkml.kernel.org/r/20191007204839.5727.10803.stgit@localhost.localdomain --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 0652d3eed9bd..1db10a112537 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -292,6 +292,7 @@ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ +#define X86_FEATURE_RDPRU (13*32+ 4) /* Read processor register at user level */ #define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ -- cgit v1.2.3 From 1edae1ae62589f28d00da186465a003e2a7f9c6c Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Sat, 12 Oct 2019 02:00:54 -0500 Subject: x86/Kconfig: Enforce limit of 512 CPUs with MAXSMP and no CPUMASK_OFFSTACK The help text of NR_CPUS says that the maximum number of CPUs supported without CPUMASK_OFFSTACK is 512. However, NR_CPUS_RANGE_END allows this limit to be bypassed by MAXSMP even if CPUMASK_OFFSTACK is not set. This scenario can currently only happen in the RT tree, since it has "select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL" in MAXSMP. However, even if we ignore the RT tree, checking for MAXSMP in addition to CPUMASK_OFFSTACK is redundant. Signed-off-by: Scott Wood Signed-off-by: Borislav Petkov Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Mike Travis Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191012070054.28657-1-swood@redhat.com --- arch/x86/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 91c22ee6bc9a..896f840ade2d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1000,8 +1000,8 @@ config NR_CPUS_RANGE_END config NR_CPUS_RANGE_END int depends on X86_64 - default 8192 if SMP && ( MAXSMP || CPUMASK_OFFSTACK) - default 512 if SMP && (!MAXSMP && !CPUMASK_OFFSTACK) + default 8192 if SMP && CPUMASK_OFFSTACK + default 512 if SMP && !CPUMASK_OFFSTACK default 1 if !SMP config NR_CPUS_DEFAULT -- cgit v1.2.3 From 58db103784994e9be5322237df7ef0cf4c0afc39 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Fri, 1 Nov 2019 15:38:50 +0300 Subject: x86/fpu: Update stale variable name in comment When the fpu code was reworked pcntxt_mask was renamed to xfeatures_mask. Reflect it in the comment as well. Signed-off-by: Cyrill Gorcunov Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20191101123850.GE1615@uranus.lan --- arch/x86/kernel/fpu/xstate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index e5cb67d67c03..18ffc6f4c447 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -840,7 +840,7 @@ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr) /* * We should not ever be requesting features that we - * have not enabled. Remember that pcntxt_mask is + * have not enabled. Remember that xfeatures_mask is * what we write to the XCR0 register. */ WARN_ONCE(!(xfeatures_mask & BIT_ULL(xfeature_nr)), -- cgit v1.2.3 From c08550510ca26bd57eabfe912281635e382193e5 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Fri, 1 Nov 2019 15:42:28 +0300 Subject: x86/fpu: Shrink space allocated for xstate_comp_offsets commit 8ff925e10f2c ("x86/xsaves: Clean up code in xstate offsets computation in xsave area") introduced an allocation of 64 entries for xstate_comp_offsets while the code only handles up to XFEATURE_MAX entries. For this reason xstate_offsets and xstate_sizes are already defined with the explicit XFEATURE_MAX limit. Do the same for compressed format for consistency sake. As the changelog of that commit is not giving any information it's assumed that the main idea was to cover all possible bits in xfeatures_mask, but this doesn't explain why other variables such as the non-compacted offsets and sizes are explicitely limited to XFEATURE_MAX. For consistency it's better to use the XFEATURE_MAX limit everywhere and extend it on demand when new features get implemented at the hardware level and subsequently supported by the kernel. Signed-off-by: Cyrill Gorcunov Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20191101124228.GF1615@uranus.lan --- arch/x86/kernel/fpu/xstate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 18ffc6f4c447..a8bd5c01c796 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -60,7 +60,7 @@ u64 xfeatures_mask __read_mostly; static unsigned int xstate_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1}; static unsigned int xstate_sizes[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1}; -static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8]; +static unsigned int xstate_comp_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1}; /* * The XSAVE area of kernel can be in standard or compacted format; @@ -342,7 +342,7 @@ static int xfeature_is_aligned(int xfeature_nr) */ static void __init setup_xstate_comp(void) { - unsigned int xstate_comp_sizes[sizeof(xfeatures_mask)*8]; + unsigned int xstate_comp_sizes[XFEATURE_MAX]; int i; /* -- cgit v1.2.3 From 446e693ca30b7c7c2aaeaf09e90ec224c7538fec Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Fri, 1 Nov 2019 16:01:53 +0300 Subject: x86/fpu: Use XFEATURE_FP/SSE enum values instead of hardcoded numbers When setting up sizes and offsets for legacy header entries the code uses hardcoded 0/1 instead of the corresponding enum values XFEATURE_FP and XFEATURE_SSE. Replace the hardcoded numbers which enhances readability of the code and also makes this code the first user of those enum values.. Signed-off-by: Cyrill Gorcunov Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20191101130153.GG1615@uranus.lan --- arch/x86/kernel/fpu/xstate.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index a8bd5c01c796..319be936c348 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -254,10 +254,13 @@ static void __init setup_xstate_features(void) * in the fixed offsets in the xsave area in either compacted form * or standard form. */ - xstate_offsets[0] = 0; - xstate_sizes[0] = offsetof(struct fxregs_state, xmm_space); - xstate_offsets[1] = xstate_sizes[0]; - xstate_sizes[1] = FIELD_SIZEOF(struct fxregs_state, xmm_space); + xstate_offsets[XFEATURE_FP] = 0; + xstate_sizes[XFEATURE_FP] = offsetof(struct fxregs_state, + xmm_space); + + xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP]; + xstate_sizes[XFEATURE_SSE] = FIELD_SIZEOF(struct fxregs_state, + xmm_space); for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { if (!xfeature_enabled(i)) @@ -350,8 +353,9 @@ static void __init setup_xstate_comp(void) * in the fixed offsets in the xsave area in either compacted form * or standard form. */ - xstate_comp_offsets[0] = 0; - xstate_comp_offsets[1] = offsetof(struct fxregs_state, xmm_space); + xstate_comp_offsets[XFEATURE_FP] = 0; + xstate_comp_offsets[XFEATURE_SSE] = offsetof(struct fxregs_state, + xmm_space); if (!boot_cpu_has(X86_FEATURE_XSAVES)) { for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { -- cgit v1.2.3 From b971880fe79f4042aaaf426744a5b19521bf77b3 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Tue, 5 Nov 2019 21:25:32 +0000 Subject: x86/Kconfig: Rename UMIP config parameter AMD 2nd generation EPYC processors support the UMIP (User-Mode Instruction Prevention) feature. So, rename X86_INTEL_UMIP to generic X86_UMIP and modify the text to cover both Intel and AMD. [ bp: take of the disabled-features.h copy in tools/ too. ] Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Ricardo Neri Cc: Thomas Gleixner Cc: "x86@kernel.org" Link: https://lkml.kernel.org/r/157298912544.17462.2018334793891409521.stgit@naples-babu.amd.com --- arch/x86/Kconfig | 16 ++++++++-------- arch/x86/include/asm/disabled-features.h | 2 +- arch/x86/include/asm/umip.h | 4 ++-- arch/x86/kernel/Makefile | 2 +- tools/arch/x86/include/asm/disabled-features.h | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 896f840ade2d..434fae95279c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1880,16 +1880,16 @@ config X86_SMAP If unsure, say Y. -config X86_INTEL_UMIP +config X86_UMIP def_bool y - depends on CPU_SUP_INTEL - prompt "Intel User Mode Instruction Prevention" if EXPERT + depends on CPU_SUP_INTEL || CPU_SUP_AMD + prompt "User Mode Instruction Prevention" if EXPERT ---help--- - The User Mode Instruction Prevention (UMIP) is a security - feature in newer Intel processors. If enabled, a general - protection fault is issued if the SGDT, SLDT, SIDT, SMSW - or STR instructions are executed in user mode. These instructions - unnecessarily expose information about the hardware state. + User Mode Instruction Prevention (UMIP) is a security feature in + some x86 processors. If enabled, a general protection fault is + issued if the SGDT, SLDT, SIDT, SMSW or STR instructions are + executed in user mode. These instructions unnecessarily expose + information about the hardware state. The vast majority of applications do not use these instructions. For the very few that do, software emulation is provided in diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index a5ea841cc6d2..8e1d0bb46361 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -22,7 +22,7 @@ # define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31)) #endif -#ifdef CONFIG_X86_INTEL_UMIP +#ifdef CONFIG_X86_UMIP # define DISABLE_UMIP 0 #else # define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31)) diff --git a/arch/x86/include/asm/umip.h b/arch/x86/include/asm/umip.h index db43f2a0d92c..aeed98c3c9e1 100644 --- a/arch/x86/include/asm/umip.h +++ b/arch/x86/include/asm/umip.h @@ -4,9 +4,9 @@ #include #include -#ifdef CONFIG_X86_INTEL_UMIP +#ifdef CONFIG_X86_UMIP bool fixup_umip_exception(struct pt_regs *regs); #else static inline bool fixup_umip_exception(struct pt_regs *regs) { return false; } -#endif /* CONFIG_X86_INTEL_UMIP */ +#endif /* CONFIG_X86_UMIP */ #endif /* _ASM_X86_UMIP_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3578ad248bc9..52ce1e239525 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -134,7 +134,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o obj-$(CONFIG_TRACING) += tracepoint.o obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o -obj-$(CONFIG_X86_INTEL_UMIP) += umip.o +obj-$(CONFIG_X86_UMIP) += umip.o obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index a5ea841cc6d2..8e1d0bb46361 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -22,7 +22,7 @@ # define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31)) #endif -#ifdef CONFIG_X86_INTEL_UMIP +#ifdef CONFIG_X86_UMIP # define DISABLE_UMIP 0 #else # define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31)) -- cgit v1.2.3 From 9774a96f785bf0fa6d956ce33300463f1704dbeb Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Tue, 5 Nov 2019 21:25:40 +0000 Subject: x86/umip: Make the comments vendor-agnostic AMD 2nd generation EPYC processors also support the UMIP feature. Make the comments vendor-agnostic. Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Ricardo Neri Cc: Thomas Gleixner Cc: "x86@kernel.org" Link: https://lkml.kernel.org/r/157298913784.17462.12654728938970637305.stgit@naples-babu.amd.com --- arch/x86/kernel/umip.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index 548fefed71ee..8ccef6c495dc 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -1,6 +1,6 @@ /* - * umip.c Emulation for instruction protected by the Intel User-Mode - * Instruction Prevention feature + * umip.c Emulation for instruction protected by the User-Mode Instruction + * Prevention feature * * Copyright (c) 2017, Intel Corporation. * Ricardo Neri @@ -18,10 +18,10 @@ /** DOC: Emulation for User-Mode Instruction Prevention (UMIP) * - * The feature User-Mode Instruction Prevention present in recent Intel - * processor prevents a group of instructions (SGDT, SIDT, SLDT, SMSW and STR) - * from being executed with CPL > 0. Otherwise, a general protection fault is - * issued. + * User-Mode Instruction Prevention is a security feature present in recent + * x86 processors that, when enabled, prevents a group of instructions (SGDT, + * SIDT, SLDT, SMSW and STR) from being run in user mode by issuing a general + * protection fault if the instruction is executed with CPL > 0. * * Rather than relaying to the user space the general protection fault caused by * the UMIP-protected instructions (in the form of a SIGSEGV signal), it can be -- cgit v1.2.3 From f6a892ddd53e555362dbf64d31b47fde0f550ec4 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Mon, 16 Sep 2019 15:39:56 -0700 Subject: x86/cpu: Align cpu_caps_cleared and cpu_caps_set to unsigned long cpu_caps_cleared[] and cpu_caps_set[] are arrays of type u32 and therefore naturally aligned to 4 bytes, which is also unsigned long aligned on 32-bit, but not on 64-bit. The array pointer is handed into atomic bit operations. If the access not aligned to unsigned long then the atomic bit operations can end up crossing a cache line boundary, which causes the CPU to do a full bus lock as it can't lock both cache lines at once. The bus lock operation is heavy weight and can cause severe performance degradation. The upcoming #AC split lock detection mechanism will issue warnings for this kind of access. Force the alignment of these arrays to unsigned long. This avoids the massive code changes which would be required when converting the array data type to unsigned long. [ tglx: Rewrote changelog ] Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Link: https://lkml.kernel.org/r/20190916223958.27048-2-tony.luck@intel.com --- arch/x86/kernel/cpu/common.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9ae7d1bcd4f4..1e9430bed75b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -565,8 +565,9 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c) return NULL; /* Not found */ } -__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; -__u32 cpu_caps_set[NCAPINTS + NBUGINTS]; +/* Aligned to unsigned long to avoid split lock in atomic bitmap ops */ +__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); +__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); void load_percpu_segment(int cpu) { -- cgit v1.2.3 From db8c33f8b5bea59d00ca12dcd6b65d01b1ea98ef Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Mon, 16 Sep 2019 15:39:58 -0700 Subject: x86/cpu: Align the x86_capability array to size of unsigned long The x86_capability array in cpuinfo_x86 is of type u32 and thus is naturally aligned to 4 bytes. But, set_bit() and clear_bit() require the array to be aligned to size of unsigned long (i.e. 8 bytes on 64-bit systems). The array pointer is handed into atomic bit operations. If the access is not aligned to unsigned long then the atomic bit operations can end up crossing a cache line boundary, which causes the CPU to do a full bus lock as it can't lock both cache lines at once. The bus lock operation is heavy weight and can cause severe performance degradation. The upcoming #AC split lock detection mechanism will issue warnings for this kind of access. Force the alignment of the array to unsigned long. This avoids the massive code changes which would be required when converting the array data type to unsigned long. [ tglx: Rewrote changelog so it contains information WHY this is required ] Suggested-by: David Laight Suggested-by: Thomas Gleixner Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190916223958.27048-4-tony.luck@intel.com --- arch/x86/include/asm/processor.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 6e0a3b43d027..c073534ca485 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -93,7 +93,15 @@ struct cpuinfo_x86 { __u32 extended_cpuid_level; /* Maximum supported CPUID level, -1=no CPUID: */ int cpuid_level; - __u32 x86_capability[NCAPINTS + NBUGINTS]; + /* + * Align to size of unsigned long because the x86_capability array + * is passed to bitops which require the alignment. Use unnamed + * union to enforce the array is aligned to size of unsigned long. + */ + union { + __u32 x86_capability[NCAPINTS + NBUGINTS]; + unsigned long x86_capability_alignment; + }; char x86_vendor_id[16]; char x86_model_id[64]; /* in KB - valid for CPUS which support this call: */ -- cgit v1.2.3