summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-04 16:01:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-04 16:01:43 -0700
commit23221d997b3d28cb80c4d4d1b4bd36610f8e12fc (patch)
tree2abbdfd8f44c4d61440f780bad2f584ef301e6a3 /arch
parent5b1f3dc927a2681cb339b05156f828f83bfa1b80 (diff)
parent65896545b69ffaac947c12e11d3dcc57fd1fb772 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "Nothing particularly stands out here, probably because people were tied up with spectre/meltdown stuff last time around. Still, the main pieces are: - Rework of our CPU features framework so that we can whitelist CPUs that don't require kpti even in a heterogeneous system - Support for the IDC/DIC architecture extensions, which allow us to elide instruction and data cache maintenance when writing out instructions - Removal of the large memory model which resulted in suboptimal codegen by the compiler and increased the use of literal pools, which could potentially be used as ROP gadgets since they are mapped as executable - Rework of forced signal delivery so that the siginfo_t is well-formed and handling of show_unhandled_signals is consolidated and made consistent between different fault types - More siginfo cleanup based on the initial patches from Eric Biederman - Workaround for Cortex-A55 erratum #1024718 - Some small ACPI IORT updates and cleanups from Lorenzo Pieralisi - Misc cleanups and non-critical fixes" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (70 commits) arm64: uaccess: Fix omissions from usercopy whitelist arm64: fpsimd: Split cpu field out from struct fpsimd_state arm64: tlbflush: avoid writing RES0 bits arm64: cmpxchg: Include linux/compiler.h in asm/cmpxchg.h arm64: move percpu cmpxchg implementation from cmpxchg.h to percpu.h arm64: cmpxchg: Include build_bug.h instead of bug.h for BUILD_BUG arm64: lse: Include compiler_types.h and export.h for out-of-line LL/SC arm64: fpsimd: include <linux/init.h> in fpsimd.h drivers/perf: arm_pmu_platform: do not warn about affinity on uniprocessor perf: arm_spe: include linux/vmalloc.h for vmap() Revert "arm64: Revert L1_CACHE_SHIFT back to 6 (64-byte cache line size)" arm64: cpufeature: Avoid warnings due to unused symbols arm64: Add work around for Arm Cortex-A55 Erratum 1024718 arm64: Delay enabling hardware DBM feature arm64: Add MIDR encoding for Arm Cortex-A55 and Cortex-A35 arm64: capabilities: Handle shared entries arm64: capabilities: Add support for checks based on a list of MIDRs arm64: Add helpers for checking CPU MIDR against a range arm64: capabilities: Clean up midr range helpers arm64: capabilities: Change scope of VHE to Boot CPU feature ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/Kconfig47
-rw-r--r--arch/arm64/Makefile15
-rw-r--r--arch/arm64/include/asm/assembler.h34
-rw-r--r--arch/arm64/include/asm/cache.h4
-rw-r--r--arch/arm64/include/asm/cacheflush.h3
-rw-r--r--arch/arm64/include/asm/cmpxchg.h29
-rw-r--r--arch/arm64/include/asm/cpucaps.h6
-rw-r--r--arch/arm64/include/asm/cpufeature.h262
-rw-r--r--arch/arm64/include/asm/cputype.h43
-rw-r--r--arch/arm64/include/asm/esr.h9
-rw-r--r--arch/arm64/include/asm/fpsimd.h34
-rw-r--r--arch/arm64/include/asm/lse.h3
-rw-r--r--arch/arm64/include/asm/module.h2
-rw-r--r--arch/arm64/include/asm/percpu.h29
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/processor.h47
-rw-r--r--arch/arm64/include/asm/sysreg.h3
-rw-r--r--arch/arm64/include/asm/system_misc.h11
-rw-r--r--arch/arm64/include/asm/tlbflush.h25
-rw-r--r--arch/arm64/include/asm/traps.h8
-rw-r--r--arch/arm64/include/asm/virt.h6
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h4
-rw-r--r--arch/arm64/include/uapi/asm/siginfo.h21
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c2
-rw-r--r--arch/arm64/kernel/cpu_errata.c316
-rw-r--r--arch/arm64/kernel/cpufeature.c438
-rw-r--r--arch/arm64/kernel/cpuinfo.c4
-rw-r--r--arch/arm64/kernel/debug-monitors.c3
-rw-r--r--arch/arm64/kernel/fpsimd.c105
-rw-r--r--arch/arm64/kernel/kaslr.c35
-rw-r--r--arch/arm64/kernel/kgdb.c21
-rw-r--r--arch/arm64/kernel/module-plts.c90
-rw-r--r--arch/arm64/kernel/module.c44
-rw-r--r--arch/arm64/kernel/process.c6
-rw-r--r--arch/arm64/kernel/ptrace.c32
-rw-r--r--arch/arm64/kernel/reloc_test_core.c4
-rw-r--r--arch/arm64/kernel/reloc_test_syms.S12
-rw-r--r--arch/arm64/kernel/signal.c9
-rw-r--r--arch/arm64/kernel/signal32.c15
-rw-r--r--arch/arm64/kernel/smp.c44
-rw-r--r--arch/arm64/kernel/sys_compat.c23
-rw-r--r--arch/arm64/kernel/traps.c76
-rw-r--r--arch/arm64/lib/Makefile3
-rw-r--r--arch/arm64/mm/cache.S21
-rw-r--r--arch/arm64/mm/fault.c236
-rw-r--r--arch/arm64/mm/proc.S22
-rw-r--r--arch/x86/kernel/signal_compat.c2
47 files changed, 1434 insertions, 775 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 302d0b681676..177be0d1d090 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -459,12 +459,26 @@ config ARM64_ERRATUM_845719
config ARM64_ERRATUM_843419
bool "Cortex-A53: 843419: A load or store might access an incorrect address"
default y
- select ARM64_MODULE_CMODEL_LARGE if MODULES
+ select ARM64_MODULE_PLTS if MODULES
help
This option links the kernel with '--fix-cortex-a53-843419' and
- builds modules using the large memory model in order to avoid the use
- of the ADRP instruction, which can cause a subsequent memory access
- to use an incorrect address on Cortex-A53 parts up to r0p4.
+ enables PLT support to replace certain ADRP instructions, which can
+ cause subsequent memory accesses to use an incorrect address on
+ Cortex-A53 parts up to r0p4.
+
+ If unsure, say Y.
+
+config ARM64_ERRATUM_1024718
+ bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
+ default y
+ help
+ This option adds work around for Arm Cortex-A55 Erratum 1024718.
+
+ Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
+ update of the hardware dirty bit when the DBM/AP bits are updated
+ without a break-before-make. The work around is to disable the usage
+ of hardware DBM locally on the affected cores. CPUs not affected by
+ erratum will continue to use the feature.
If unsure, say Y.
@@ -1108,12 +1122,25 @@ config ARM64_SVE
To enable use of this extension on CPUs that implement it, say Y.
-config ARM64_MODULE_CMODEL_LARGE
- bool
+ Note that for architectural reasons, firmware _must_ implement SVE
+ support when running on SVE capable hardware. The required support
+ is present in:
+
+ * version 1.5 and later of the ARM Trusted Firmware
+ * the AArch64 boot wrapper since commit 5e1261e08abf
+ ("bootwrapper: SVE: Enable SVE for EL2 and below").
+
+ For other firmware implementations, consult the firmware documentation
+ or vendor.
+
+ If you need the kernel to boot on SVE-capable hardware with broken
+ firmware, you may need to say N here until you get your firmware
+ fixed. Otherwise, you may experience firmware panics or lockups when
+ booting the kernel. If unsure and you are not observing these
+ symptoms, you should assume that it is safe to say Y.
config ARM64_MODULE_PLTS
bool
- select ARM64_MODULE_CMODEL_LARGE
select HAVE_MOD_ARCH_SPECIFIC
config RELOCATABLE
@@ -1147,12 +1174,12 @@ config RANDOMIZE_BASE
If unsure, say N.
config RANDOMIZE_MODULE_REGION_FULL
- bool "Randomize the module region independently from the core kernel"
+ bool "Randomize the module region over a 4 GB range"
depends on RANDOMIZE_BASE
default y
help
- Randomizes the location of the module region without considering the
- location of the core kernel. This way, it is impossible for modules
+ Randomizes the location of the module region inside a 4 GB window
+ covering the core kernel. This way, it is less likely for modules
to leak information about the location of core kernel data structures
but it does imply that function calls between modules and the core
kernel will need to be resolved via veneers in the module PLT.
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index b481b4a7c011..15402861bb59 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -51,7 +51,6 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
@@ -77,10 +76,6 @@ endif
CHECKFLAGS += -D__aarch64__ -m64
-ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y)
-KBUILD_CFLAGS_MODULE += -mcmodel=large
-endif
-
ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
endif
@@ -97,12 +92,14 @@ else
TEXT_OFFSET := 0x00080000
endif
-# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61)
+# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
+# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
# in 32-bit arithmetic
+KASAN_SHADOW_SCALE_SHIFT := 3
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
- (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
- + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \
- - (1 << (64 - 32 - 3)) )) )
+ (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
+ + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \
+ - (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) )
export TEXT_OFFSET GZFLAGS
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 3c78835bba94..053d83e8db6f 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -202,25 +202,15 @@ lr .req x30 // link register
/*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
- * <symbol> is within the range +/- 4 GB of the PC when running
- * in core kernel context. In module context, a movz/movk sequence
- * is used, since modules may be loaded far away from the kernel
- * when KASLR is in effect.
+ * <symbol> is within the range +/- 4 GB of the PC.
*/
/*
* @dst: destination register (64 bit wide)
* @sym: name of the symbol
*/
.macro adr_l, dst, sym
-#ifndef MODULE
adrp \dst, \sym
add \dst, \dst, :lo12:\sym
-#else
- movz \dst, #:abs_g3:\sym
- movk \dst, #:abs_g2_nc:\sym
- movk \dst, #:abs_g1_nc:\sym
- movk \dst, #:abs_g0_nc:\sym
-#endif
.endm
/*
@@ -231,7 +221,6 @@ lr .req x30 // link register
* the address
*/
.macro ldr_l, dst, sym, tmp=
-#ifndef MODULE
.ifb \tmp
adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym]
@@ -239,15 +228,6 @@ lr .req x30 // link register
adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym]
.endif
-#else
- .ifb \tmp
- adr_l \dst, \sym
- ldr \dst, [\dst]
- .else
- adr_l \tmp, \sym
- ldr \dst, [\tmp]
- .endif
-#endif
.endm
/*
@@ -257,28 +237,18 @@ lr .req x30 // link register
* while <src> needs to be preserved.
*/
.macro str_l, src, sym, tmp
-#ifndef MODULE
adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym]
-#else
- adr_l \tmp, \sym
- str \src, [\tmp]
-#endif
.endm
/*
- * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for
- * non-module code
+ * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
* @sym: The name of the per-cpu variable
* @tmp: scratch register
*/
.macro adr_this_cpu, dst, sym, tmp
-#ifndef MODULE
adrp \tmp, \sym
add \dst, \tmp, #:lo12:\sym
-#else
- adr_l \dst, \sym
-#endif
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
alternative_else
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index ea9bb4e0e9bb..9bbffc7a301f 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -20,8 +20,12 @@
#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
+#define CTR_DMINLINE_SHIFT 16
+#define CTR_ERG_SHIFT 20
#define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15
+#define CTR_IDC_SHIFT 28
+#define CTR_DIC_SHIFT 29
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index bef9f418f089..7dfcec4700fe 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -133,6 +133,9 @@ extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void)
{
+ if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
+ return;
+
asm("ic ialluis");
dsb(ish);
}
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index ae852add053d..4f5fd2a36e6e 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -18,7 +18,8 @@
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H
-#include <linux/bug.h>
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
@@ -196,32 +197,6 @@ __CMPXCHG_GEN(_mb)
__ret; \
})
-/* this_cpu_cmpxchg */
-#define _protect_cmpxchg_local(pcp, o, n) \
-({ \
- typeof(*raw_cpu_ptr(&(pcp))) __ret; \
- preempt_disable(); \
- __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
- preempt_enable(); \
- __ret; \
-})
-
-#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-
-#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
-({ \
- int __ret; \
- preempt_disable(); \
- __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
- raw_cpu_ptr(&(ptr2)), \
- o1, o2, n1, n2); \
- preempt_enable(); \
- __ret; \
-})
-
#define __CMPWAIT_CASE(w, sz, name) \
static inline void __cmpwait_case_##name(volatile void *ptr, \
unsigned long val) \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index bb263820de13..21bb624e0a7a 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -45,7 +45,11 @@
#define ARM64_HARDEN_BRANCH_PREDICTOR 24
#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
#define ARM64_HAS_RAS_EXTN 26
+#define ARM64_WORKAROUND_843419 27
+#define ARM64_HAS_CACHE_IDC 28
+#define ARM64_HAS_CACHE_DIC 29
+#define ARM64_HW_DBM 30
-#define ARM64_NCAPS 27
+#define ARM64_NCAPS 31
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 060e3a4008ab..09b0f2a80c8f 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -10,6 +10,7 @@
#define __ASM_CPUFEATURE_H
#include <asm/cpucaps.h>
+#include <asm/cputype.h>
#include <asm/fpsimd.h>
#include <asm/hwcap.h>
#include <asm/sigcontext.h>
@@ -89,24 +90,231 @@ struct arm64_ftr_reg {
extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
-/* scope of capability check */
-enum {
- SCOPE_SYSTEM,
- SCOPE_LOCAL_CPU,
-};
+/*
+ * CPU capabilities:
+ *
+ * We use arm64_cpu_capabilities to represent system features, errata work
+ * arounds (both used internally by kernel and tracked in cpu_hwcaps) and
+ * ELF HWCAPs (which are exposed to user).
+ *
+ * To support systems with heterogeneous CPUs, we need to make sure that we
+ * detect the capabilities correctly on the system and take appropriate
+ * measures to ensure there are no incompatibilities.
+ *
+ * This comment tries to explain how we treat the capabilities.
+ * Each capability has the following list of attributes :
+ *
+ * 1) Scope of Detection : The system detects a given capability by
+ * performing some checks at runtime. This could be, e.g, checking the
+ * value of a field in CPU ID feature register or checking the cpu
+ * model. The capability provides a call back ( @matches() ) to
+ * perform the check. Scope defines how the checks should be performed.
+ * There are three cases:
+ *
+ * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
+ * matches. This implies, we have to run the check on all the
+ * booting CPUs, until the system decides that state of the
+ * capability is finalised. (See section 2 below)
+ * Or
+ * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
+ * matches. This implies, we run the check only once, when the
+ * system decides to finalise the state of the capability. If the
+ * capability relies on a field in one of the CPU ID feature
+ * registers, we use the sanitised value of the register from the
+ * CPU feature infrastructure to make the decision.
+ * Or
+ * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
+ * feature. This category is for features that are "finalised"
+ * (or used) by the kernel very early even before the SMP cpus
+ * are brought up.
+ *
+ * The process of detection is usually denoted by "update" capability
+ * state in the code.
+ *
+ * 2) Finalise the state : The kernel should finalise the state of a
+ * capability at some point during its execution and take necessary
+ * actions if any. Usually, this is done, after all the boot-time
+ * enabled CPUs are brought up by the kernel, so that it can make
+ * better decision based on the available set of CPUs. However, there
+ * are some special cases, where the action is taken during the early
+ * boot by the primary boot CPU. (e.g, running the kernel at EL2 with
+ * Virtualisation Host Extensions). The kernel usually disallows any
+ * changes to the state of a capability once it finalises the capability
+ * and takes any action, as it may be impossible to execute the actions
+ * safely. A CPU brought up after a capability is "finalised" is
+ * referred to as "Late CPU" w.r.t the capability. e.g, all secondary
+ * CPUs are treated "late CPUs" for capabilities determined by the boot
+ * CPU.
+ *
+ * At the moment there are two passes of finalising the capabilities.
+ * a) Boot CPU scope capabilities - Finalised by primary boot CPU via
+ * setup_boot_cpu_capabilities().
+ * b) Everything except (a) - Run via setup_system_capabilities().
+ *
+ * 3) Verification: When a CPU is brought online (e.g, by user or by the
+ * kernel), the kernel should make sure that it is safe to use the CPU,
+ * by verifying that the CPU is compliant with the state of the
+ * capabilities finalised already. This happens via :
+ *
+ * secondary_start_kernel()-> check_local_cpu_capabilities()
+ *
+ * As explained in (2) above, capabilities could be finalised at
+ * different points in the execution. Each newly booted CPU is verified
+ * against the capabilities that have been finalised by the time it
+ * boots.
+ *
+ * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
+ * except for the primary boot CPU.
+ *
+ * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
+ * user after the kernel boot are verified against the capability.
+ *
+ * If there is a conflict, the kernel takes an action, based on the
+ * severity (e.g, a CPU could be prevented from booting or cause a
+ * kernel panic). The CPU is allowed to "affect" the state of the
+ * capability, if it has not been finalised already. See section 5
+ * for more details on conflicts.
+ *
+ * 4) Action: As mentioned in (2), the kernel can take an action for each
+ * detected capability, on all CPUs on the system. Appropriate actions
+ * include, turning on an architectural feature, modifying the control
+ * registers (e.g, SCTLR, TCR etc.) or patching the kernel via
+ * alternatives. The kernel patching is batched and performed at later
+ * point. The actions are always initiated only after the capability
+ * is finalised. This is usally denoted by "enabling" the capability.
+ * The actions are initiated as follows :
+ * a) Action is triggered on all online CPUs, after the capability is
+ * finalised, invoked within the stop_machine() context from
+ * enable_cpu_capabilitie().
+ *
+ * b) Any late CPU, brought up after (1), the action is triggered via:
+ *
+ * check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
+ *
+ * 5) Conflicts: Based on the state of the capability on a late CPU vs.
+ * the system state, we could have the following combinations :
+ *
+ * x-----------------------------x
+ * | Type | System | Late CPU |
+ * |-----------------------------|
+ * | a | y | n |
+ * |-----------------------------|
+ * | b | n | y |
+ * x-----------------------------x
+ *
+ * Two separate flag bits are defined to indicate whether each kind of
+ * conflict can be allowed:
+ * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
+ * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
+ *
+ * Case (a) is not permitted for a capability that the system requires
+ * all CPUs to have in order for the capability to be enabled. This is
+ * typical for capabilities that represent enhanced functionality.
+ *
+ * Case (b) is not permitted for a capability that must be enabled
+ * during boot if any CPU in the system requires it in order to run
+ * safely. This is typical for erratum work arounds that cannot be
+ * enabled after the corresponding capability is finalised.
+ *
+ * In some non-typical cases either both (a) and (b), or neither,
+ * should be permitted. This can be described by including neither
+ * or both flags in the capability's type field.
+ */
+
+
+/*
+ * Decide how the capability is detected.
+ * On any local CPU vs System wide vs the primary boot CPU
+ */
+#define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
+#define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
+/*
+ * The capabilitiy is detected on the Boot CPU and is used by kernel
+ * during early boot. i.e, the capability should be "detected" and
+ * "enabled" as early as possibly on all booting CPUs.
+ */
+#define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
+#define ARM64_CPUCAP_SCOPE_MASK \
+ (ARM64_CPUCAP_SCOPE_SYSTEM | \
+ ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
+ ARM64_CPUCAP_SCOPE_BOOT_CPU)
+
+#define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
+#define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
+#define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
+#define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
+
+/*
+ * Is it permitted for a late CPU to have this capability when system
+ * hasn't already enabled it ?
+ */
+#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
+/* Is it safe for a late CPU to miss this capability when system has it */
+#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
+
+/*
+ * CPU errata workarounds that need to be enabled at boot time if one or
+ * more CPUs in the system requires it. When one of these capabilities
+ * has been enabled, it is safe to allow any CPU to boot that doesn't
+ * require the workaround. However, it is not safe if a "late" CPU
+ * requires a workaround and the system hasn't enabled it already.
+ */
+#define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
+ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
+/*
+ * CPU feature detected at boot time based on system-wide value of a
+ * feature. It is safe for a late CPU to have this feature even though
+ * the system hasn't enabled it, although the featuer will not be used
+ * by Linux in this case. If the system has enabled this feature already,
+ * then every late CPU must have it.
+ */
+#define ARM64_CPUCAP_SYSTEM_FEATURE \
+ (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
+/*
+ * CPU feature detected at boot time based on feature of one or more CPUs.
+ * All possible conflicts for a late CPU are ignored.
+ */
+#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
+ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
+ ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
+ ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
+
+/*
+ * CPU feature detected at boot time, on one or more CPUs. A late CPU
+ * is not allowed to have the capability when the system doesn't have it.
+ * It is Ok for a late CPU to miss the feature.
+ */
+#define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
+ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
+ ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
+
+/*
+ * CPU feature used early in the boot based on the boot CPU. All secondary
+ * CPUs must match the state of the capability as detected by the boot CPU.
+ */
+#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
struct arm64_cpu_capabilities {
const char *desc;
u16 capability;
- int def_scope; /* default scope */
+ u16 type;
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
- int (*enable)(void *); /* Called on all active CPUs */
+ /*
+ * Take the appropriate actions to enable this capability for this CPU.
+ * For each successfully booted CPU, this method is called for each
+ * globally detected capability.
+ */
+ void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
union {
struct { /* To be used for erratum handling only */
- u32 midr_model;
- u32 midr_range_min, midr_range_max;
+ struct midr_range midr_range;
+ const struct arm64_midr_revidr {
+ u32 midr_rv; /* revision/variant */
+ u32 revidr_mask;
+ } * const fixed_revs;
};
+ const struct midr_range *midr_range_list;
struct { /* Feature register checking */
u32 sys_reg;
u8 field_pos;
@@ -115,9 +323,38 @@ struct arm64_cpu_capabilities {
bool sign;
unsigned long hwcap;
};
+ /*
+ * A list of "matches/cpu_enable" pair for the same
+ * "capability" of the same "type" as described by the parent.
+ * Only matches(), cpu_enable() and fields relevant to these
+ * methods are significant in the list. The cpu_enable is
+ * invoked only if the corresponding entry "matches()".
+ * However, if a cpu_enable() method is associated
+ * with multiple matches(), care should be taken that either
+ * the match criteria are mutually exclusive, or that the
+ * method is robust against being called multiple times.
+ */
+ const struct arm64_cpu_capabilities *match_list;
};
};
+static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
+{
+ return cap->type & ARM64_CPUCAP_SCOPE_MASK;
+}
+
+static inline bool
+cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
+{
+ return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
+}
+
+static inline bool
+cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
+{
+ return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
+}
+
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
@@ -236,15 +473,8 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
}
void __init setup_cpu_features(void);
-
-void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
- const char *info);
-void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
void check_local_cpu_capabilities(void);
-void update_cpu_errata_workarounds(void);
-void __init enable_e