summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/Kconfig69
-rw-r--r--arch/arm64/Makefile11
-rw-r--r--arch/arm64/crypto/aes-ce.S4
-rw-r--r--arch/arm64/crypto/aes-modes.S48
-rw-r--r--arch/arm64/crypto/aes-neon.S4
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S16
-rw-r--r--arch/arm64/include/asm/asm_pointer_auth.h65
-rw-r--r--arch/arm64/include/asm/assembler.h16
-rw-r--r--arch/arm64/include/asm/checksum.h7
-rw-r--r--arch/arm64/include/asm/compiler.h24
-rw-r--r--arch/arm64/include/asm/cpu_ops.h8
-rw-r--r--arch/arm64/include/asm/cpucaps.h5
-rw-r--r--arch/arm64/include/asm/cpufeature.h125
-rw-r--r--arch/arm64/include/asm/esr.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_asm.h4
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h9
-rw-r--r--arch/arm64/include/asm/memory.h1
-rw-r--r--arch/arm64/include/asm/mmu.h10
-rw-r--r--arch/arm64/include/asm/mmu_context.h2
-rw-r--r--arch/arm64/include/asm/page.h4
-rw-r--r--arch/arm64/include/asm/perf_event.h3
-rw-r--r--arch/arm64/include/asm/pointer_auth.h50
-rw-r--r--arch/arm64/include/asm/proc-fns.h2
-rw-r--r--arch/arm64/include/asm/processor.h3
-rw-r--r--arch/arm64/include/asm/smp.h12
-rw-r--r--arch/arm64/include/asm/stackprotector.h5
-rw-r--r--arch/arm64/include/asm/sysreg.h48
-rw-r--r--arch/arm64/include/asm/topology.h9
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c2
-rw-r--r--arch/arm64/kernel/asm-offsets.c16
-rw-r--r--arch/arm64/kernel/cpu-reset.S2
-rw-r--r--arch/arm64/kernel/cpu_errata.c18
-rw-r--r--arch/arm64/kernel/cpu_ops.c11
-rw-r--r--arch/arm64/kernel/cpufeature.c165
-rw-r--r--arch/arm64/kernel/cpuidle.c9
-rw-r--r--arch/arm64/kernel/entry-common.c2
-rw-r--r--arch/arm64/kernel/entry-ftrace.S48
-rw-r--r--arch/arm64/kernel/entry.S121
-rw-r--r--arch/arm64/kernel/head.S86
-rw-r--r--arch/arm64/kernel/hibernate-asm.S2
-rw-r--r--arch/arm64/kernel/hyp-stub.S2
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c2
-rw-r--r--arch/arm64/kernel/perf_event.c338
-rw-r--r--arch/arm64/kernel/pointer_auth.c7
-rw-r--r--arch/arm64/kernel/process.c5
-rw-r--r--arch/arm64/kernel/ptrace.c16
-rw-r--r--arch/arm64/kernel/relocate_kernel.S4
-rw-r--r--arch/arm64/kernel/setup.c8
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kernel/smp.c159
-rw-r--r--arch/arm64/kernel/stacktrace.c5
-rw-r--r--arch/arm64/kernel/topology.c180
-rw-r--r--arch/arm64/kernel/vdso/sigreturn.S4
-rw-r--r--arch/arm64/kernel/vdso32/sigreturn.S23
-rw-r--r--arch/arm64/kvm/hyp-init.S18
-rw-r--r--arch/arm64/kvm/hyp.S4
-rw-r--r--arch/arm64/kvm/hyp/fpsimd.S8
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S27
-rw-r--r--arch/arm64/kvm/hyp/switch.c28
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c8
-rw-r--r--arch/arm64/kvm/hyp/tlb.c8
-rw-r--r--arch/arm64/kvm/sys_regs.c103
-rw-r--r--arch/arm64/lib/csum.c27
-rw-r--r--arch/arm64/lib/strcmp.S2
-rw-r--r--arch/arm64/mm/context.c32
-rw-r--r--arch/arm64/mm/mmu.c379
-rw-r--r--arch/arm64/mm/proc.S104
-rw-r--r--arch/arm64/mm/ptdump_debugfs.c4
70 files changed, 1908 insertions, 650 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c6c32fb7f546..6e41c4b62607 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -117,6 +117,7 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
+ select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
@@ -280,6 +281,9 @@ config ZONE_DMA32
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y
+config ARCH_ENABLE_MEMORY_HOTREMOVE
+ def_bool y
+
config SMP
def_bool y
@@ -951,11 +955,11 @@ config HOTPLUG_CPU
# Common NUMA Features
config NUMA
- bool "Numa Memory Allocation and Scheduler Support"
+ bool "NUMA Memory Allocation and Scheduler Support"
select ACPI_NUMA if ACPI
select OF_NUMA
help
- Enable NUMA (Non Uniform Memory Access) support.
+ Enable NUMA (Non-Uniform Memory Access) support.
The kernel will try to allocate memory used by a CPU on the
local memory of the CPU and add some more
@@ -1497,6 +1501,9 @@ config ARM64_PTR_AUTH
bool "Enable support for pointer authentication"
default y
depends on !KVM || ARM64_VHE
+ depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
+ depends on CC_IS_GCC || (CC_IS_CLANG && AS_HAS_CFI_NEGATE_RA_STATE)
+ depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
Pointer authentication (part of the ARMv8.3 Extensions) provides
instructions for signing and authenticating pointers against secret
@@ -1504,16 +1511,72 @@ config ARM64_PTR_AUTH
and other attacks.
This option enables these instructions at EL0 (i.e. for userspace).
-
Choosing this option will cause the kernel to initialise secret keys
for each process at exec() time, with these keys being
context-switched along with the process.
+ If the compiler supports the -mbranch-protection or
+ -msign-return-address flag (e.g. GCC 7 or later), then this option
+ will also cause the kernel itself to be compiled with return address
+ protection. In this case, and if the target hardware is known to
+ support pointer authentication, then CONFIG_STACKPROTECTOR can be
+ disabled with minimal loss of protection.
+
The feature is detected at runtime. If the feature is not present in
hardware it will not be advertised to userspace/KVM guest nor will it
be enabled. However, KVM guest also require VHE mode and hence
CONFIG_ARM64_VHE=y option to use this feature.
+ If the feature is present on the boot CPU but not on a late CPU, then
+ the late CPU will be parked. Also, if the boot CPU does not have
+ address auth and the late CPU has then the late CPU will still boot
+ but with the feature disabled. On such a system, this option should
+ not be selected.
+
+ This feature works with FUNCTION_GRAPH_TRACER option only if
+ DYNAMIC_FTRACE_WITH_REGS is enabled.
+
+config CC_HAS_BRANCH_PROT_PAC_RET
+ # GCC 9 or later, clang 8 or later
+ def_bool $(cc-option,-mbranch-protection=pac-ret+leaf)
+
+config CC_HAS_SIGN_RETURN_ADDRESS
+ # GCC 7, 8
+ def_bool $(cc-option,-msign-return-address=all)
+
+config AS_HAS_PAC
+ def_bool $(as-option,-Wa$(comma)-march=armv8.3-a)
+
+config AS_HAS_CFI_NEGATE_RA_STATE
+ def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)
+
+endmenu
+
+menu "ARMv8.4 architectural features"
+
+config ARM64_AMU_EXTN
+ bool "Enable support for the Activity Monitors Unit CPU extension"
+ default y
+ help
+ The activity monitors extension is an optional extension introduced
+ by the ARMv8.4 CPU architecture. This enables support for version 1
+ of the activity monitors architecture, AMUv1.
+
+ To enable the use of this extension on CPUs that implement it, say Y.
+
+ Note that for architectural reasons, firmware _must_ implement AMU
+ support when running on CPUs that present the activity monitors
+ extension. The required support is present in:
+ * Version 1.5 and later of the ARM Trusted Firmware
+
+ For kernels that have this configuration enabled but boot with broken
+ firmware, you may need to say N here until the firmware is fixed.
+ Otherwise you may experience firmware panics or lockups when
+ accessing the counter registers. Even if you are not observing these
+ symptoms, the values returned by the register reads might not
+ correctly reflect reality. Most commonly, the value read will be 0,
+ indicating that the counter is not enabled.
+
endmenu
menu "ARMv8.5 architectural features"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index dca1a97751ab..f15f92ba53e6 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -65,6 +65,17 @@ stack_protector_prepare: prepare0
include/generated/asm-offsets.h))
endif
+ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
+branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
+branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
+# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
+# compiler to generate them and consequently to break the single image contract
+# we pass it only to the assembler. This option is utilized only in case of non
+# integrated assemblers.
+branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a
+KBUILD_CFLAGS += $(branch-prot-flags-y)
+endif
+
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index 45062553467f..1dc5bbbfeed2 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -9,8 +9,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#define AES_ENTRY(func) SYM_FUNC_START(ce_ ## func)
-#define AES_ENDPROC(func) SYM_FUNC_END(ce_ ## func)
+#define AES_FUNC_START(func) SYM_FUNC_START(ce_ ## func)
+#define AES_FUNC_END(func) SYM_FUNC_END(ce_ ## func)
.arch armv8-a+crypto
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 8a2faa42b57e..cf618d8f6cec 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -51,7 +51,7 @@ SYM_FUNC_END(aes_decrypt_block5x)
* int blocks)
*/
-AES_ENTRY(aes_ecb_encrypt)
+AES_FUNC_START(aes_ecb_encrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -79,10 +79,10 @@ ST5( st1 {v4.16b}, [x0], #16 )
.Lecbencout:
ldp x29, x30, [sp], #16
ret
-AES_ENDPROC(aes_ecb_encrypt)
+AES_FUNC_END(aes_ecb_encrypt)
-AES_ENTRY(aes_ecb_decrypt)
+AES_FUNC_START(aes_ecb_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -110,7 +110,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
.Lecbdecout:
ldp x29, x30, [sp], #16
ret
-AES_ENDPROC(aes_ecb_decrypt)
+AES_FUNC_END(aes_ecb_decrypt)
/*
@@ -126,7 +126,7 @@ AES_ENDPROC(aes_ecb_decrypt)
* u32 const rk2[]);
*/
-AES_ENTRY(aes_essiv_cbc_encrypt)
+AES_FUNC_START(aes_essiv_cbc_encrypt)
ld1 {v4.16b}, [x5] /* get iv */
mov w8, #14 /* AES-256: 14 rounds */
@@ -135,7 +135,7 @@ AES_ENTRY(aes_essiv_cbc_encrypt)
enc_switch_key w3, x2, x6
b .Lcbcencloop4x
-AES_ENTRY(aes_cbc_encrypt)
+AES_FUNC_START(aes_cbc_encrypt)
ld1 {v4.16b}, [x5] /* get iv */
enc_prepare w3, x2, x6
@@ -167,10 +167,10 @@ AES_ENTRY(aes_cbc_encrypt)
.Lcbcencout:
st1 {v4.16b}, [x5] /* return iv */
ret
-AES_ENDPROC(aes_cbc_encrypt)
-AES_ENDPROC(aes_essiv_cbc_encrypt)
+AES_FUNC_END(aes_cbc_encrypt)
+AES_FUNC_END(aes_essiv_cbc_encrypt)
-AES_ENTRY(aes_essiv_cbc_decrypt)
+AES_FUNC_START(aes_essiv_cbc_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -181,7 +181,7 @@ AES_ENTRY(aes_essiv_cbc_decrypt)
encrypt_block cbciv, w8, x6, x7, w9
b .Lessivcbcdecstart
-AES_ENTRY(aes_cbc_decrypt)
+AES_FUNC_START(aes_cbc_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -238,8 +238,8 @@ ST5( st1 {v4.16b}, [x0], #16 )
st1 {cbciv.16b}, [x5] /* return iv */
ldp x29, x30, [sp], #16
ret
-AES_ENDPROC(aes_cbc_decrypt)
-AES_ENDPROC(aes_essiv_cbc_decrypt)
+AES_FUNC_END(aes_cbc_decrypt)
+AES_FUNC_END(aes_essiv_cbc_decrypt)
/*
@@ -249,7 +249,7 @@ AES_ENDPROC(aes_essiv_cbc_decrypt)
* int rounds, int bytes, u8 const iv[])
*/
-AES_ENTRY(aes_cbc_cts_encrypt)
+AES_FUNC_START(aes_cbc_cts_encrypt)
adr_l x8, .Lcts_permute_table
sub x4, x4, #16
add x9, x8, #32
@@ -276,9 +276,9 @@ AES_ENTRY(aes_cbc_cts_encrypt)
st1 {v0.16b}, [x4] /* overlapping stores */
st1 {v1.16b}, [x0]
ret
-AES_ENDPROC(aes_cbc_cts_encrypt)
+AES_FUNC_END(aes_cbc_cts_encrypt)
-AES_ENTRY(aes_cbc_cts_decrypt)
+AES_FUNC_START(aes_cbc_cts_decrypt)
adr_l x8, .Lcts_permute_table
sub x4, x4, #16
add x9, x8, #32
@@ -305,7 +305,7 @@ AES_ENTRY(aes_cbc_cts_decrypt)
st1 {v2.16b}, [x4] /* overlapping stores */
st1 {v0.16b}, [x0]
ret
-AES_ENDPROC(aes_cbc_cts_decrypt)
+AES_FUNC_END(aes_cbc_cts_decrypt)
.section ".rodata", "a"
.align 6
@@ -324,7 +324,7 @@ AES_ENDPROC(aes_cbc_cts_decrypt)
* int blocks, u8 ctr[])
*/
-AES_ENTRY(aes_ctr_encrypt)
+AES_FUNC_START(aes_ctr_encrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -409,7 +409,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
rev x7, x7
ins vctr.d[0], x7
b .Lctrcarrydone
-AES_ENDPROC(aes_ctr_encrypt)
+AES_FUNC_END(aes_ctr_encrypt)
/*
@@ -433,7 +433,7 @@ AES_ENDPROC(aes_ctr_encrypt)
uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s
.endm
-AES_ENTRY(aes_xts_encrypt)
+AES_FUNC_START(aes_xts_encrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -518,9 +518,9 @@ AES_ENTRY(aes_xts_encrypt)
st1 {v2.16b}, [x4] /* overlapping stores */
mov w4, wzr
b .Lxtsencctsout
-AES_ENDPROC(aes_xts_encrypt)
+AES_FUNC_END(aes_xts_encrypt)
-AES_ENTRY(aes_xts_decrypt)
+AES_FUNC_START(aes_xts_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -612,13 +612,13 @@ AES_ENTRY(aes_xts_decrypt)
st1 {v2.16b}, [x4] /* overlapping stores */
mov w4, wzr
b .Lxtsdecctsout
-AES_ENDPROC(aes_xts_decrypt)
+AES_FUNC_END(aes_xts_decrypt)
/*
* aes_mac_update(u8 const in[], u32 const rk[], int rounds,
* int blocks, u8 dg[], int enc_before, int enc_after)
*/
-AES_ENTRY(aes_mac_update)
+AES_FUNC_START(aes_mac_update)
frame_push 6
mov x19, x0
@@ -676,4 +676,4 @@ AES_ENTRY(aes_mac_update)
ld1 {v0.16b}, [x23] /* get dg */
enc_prepare w21, x20, x0
b .Lmacloop4x
-AES_ENDPROC(aes_mac_update)
+AES_FUNC_END(aes_mac_update)
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index 247d34ddaab0..e47d3ec2cfb4 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -8,8 +8,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#define AES_ENTRY(func) SYM_FUNC_START(neon_ ## func)
-#define AES_ENDPROC(func) SYM_FUNC_END(neon_ ## func)
+#define AES_FUNC_START(func) SYM_FUNC_START(neon_ ## func)
+#define AES_FUNC_END(func) SYM_FUNC_END(neon_ ## func)
xtsmask .req v7
cbciv .req v7
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index 084c6a30b03a..6b958dcdf136 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -587,20 +587,20 @@ CPU_LE( rev w8, w8 )
* struct ghash_key const *k, u64 dg[], u8 ctr[],
* int rounds, u8 tag)
*/
-ENTRY(pmull_gcm_encrypt)
+SYM_FUNC_START(pmull_gcm_encrypt)
pmull_gcm_do_crypt 1
-ENDPROC(pmull_gcm_encrypt)
+SYM_FUNC_END(pmull_gcm_encrypt)
/*
* void pmull_gcm_decrypt(int blocks, u8 dst[], const u8 src[],
* struct ghash_key const *k, u64 dg[], u8 ctr[],
* int rounds, u8 tag)
*/
-ENTRY(pmull_gcm_decrypt)
+SYM_FUNC_START(pmull_gcm_decrypt)
pmull_gcm_do_crypt 0
-ENDPROC(pmull_gcm_decrypt)
+SYM_FUNC_END(pmull_gcm_decrypt)
-pmull_gcm_ghash_4x:
+SYM_FUNC_START_LOCAL(pmull_gcm_ghash_4x)
movi MASK.16b, #0xe1
shl MASK.2d, MASK.2d, #57
@@ -681,9 +681,9 @@ pmull_gcm_ghash_4x:
eor XL.16b, XL.16b, T2.16b
ret
-ENDPROC(pmull_gcm_ghash_4x)
+SYM_FUNC_END(pmull_gcm_ghash_4x)
-pmull_gcm_enc_4x:
+SYM_FUNC_START_LOCAL(pmull_gcm_enc_4x)
ld1 {KS0.16b}, [x5] // load upper counter
sub w10, w8, #4
sub w11, w8, #3
@@ -746,7 +746,7 @@ pmull_gcm_enc_4x:
eor INP3.16b, INP3.16b, KS3.16b
ret
-ENDPROC(pmull_gcm_enc_4x)
+SYM_FUNC_END(pmull_gcm_enc_4x)
.section ".rodata", "a"
.align 6
diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h
new file mode 100644
index 000000000000..ce2a8486992b
--- /dev/null
+++ b/arch/arm64/include/asm/asm_pointer_auth.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ASM_POINTER_AUTH_H
+#define __ASM_ASM_POINTER_AUTH_H
+
+#include <asm/alternative.h>
+#include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+/*
+ * thread.keys_user.ap* as offset exceeds the #imm offset range
+ * so use the base value of ldp as thread.keys_user and offset as
+ * thread.keys_user.ap*.
+ */
+ .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
+ mov \tmp1, #THREAD_KEYS_USER
+ add \tmp1, \tsk, \tmp1
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+ b .Laddr_auth_skip_\@
+alternative_else_nop_endif
+ ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
+ msr_s SYS_APIAKEYLO_EL1, \tmp2
+ msr_s SYS_APIAKEYHI_EL1, \tmp3
+ ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB]
+ msr_s SYS_APIBKEYLO_EL1, \tmp2
+ msr_s SYS_APIBKEYHI_EL1, \tmp3
+ ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA]
+ msr_s SYS_APDAKEYLO_EL1, \tmp2
+ msr_s SYS_APDAKEYHI_EL1, \tmp3
+ ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB]
+ msr_s SYS_APDBKEYLO_EL1, \tmp2
+ msr_s SYS_APDBKEYHI_EL1, \tmp3
+.Laddr_auth_skip_\@:
+alternative_if ARM64_HAS_GENERIC_AUTH
+ ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA]
+ msr_s SYS_APGAKEYLO_EL1, \tmp2
+ msr_s SYS_APGAKEYHI_EL1, \tmp3
+alternative_else_nop_endif
+ .endm
+
+ .macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3
+alternative_if ARM64_HAS_ADDRESS_AUTH
+ mov \tmp1, #THREAD_KEYS_KERNEL
+ add \tmp1, \tsk, \tmp1
+ ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_KERNEL_KEY_APIA]
+ msr_s SYS_APIAKEYLO_EL1, \tmp2
+ msr_s SYS_APIAKEYHI_EL1, \tmp3
+ .if \sync == 1
+ isb
+ .endif
+alternative_else_nop_endif
+ .endm
+
+#else /* CONFIG_ARM64_PTR_AUTH */
+
+ .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
+ .endm
+
+ .macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3
+ .endm
+
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#endif /* __ASM_ASM_POINTER_AUTH_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index aca337d79d12..0bff325117b4 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -257,12 +257,6 @@ alternative_endif
.endm
/*
- * mmid - get context id from mm pointer (mm->context.id)
- */
- .macro mmid, rd, rn
- ldr \rd, [\rn, #MM_CONTEXT_ID]
- .endm
-/*
* read_ctr - read CTR_EL0. If the system has mismatched register fields,
* provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
*/
@@ -431,6 +425,16 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endm
/*
+ * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
+ */
+ .macro reset_amuserenr_el0, tmpreg
+ mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
+ ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4
+ cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
+ msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
+.Lskip_\@:
+ .endm
+/*
* copy_page - copy src to dest using temp registers t1-t8
*/
.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
index 8d2a7de39744..b6f7bc6da5fb 100644
--- a/arch/arm64/include/asm/checksum.h
+++ b/arch/arm64/include/asm/checksum.h
@@ -5,7 +5,12 @@
#ifndef __ASM_CHECKSUM_H
#define __ASM_CHECKSUM_H
-#include <linux/types.h>
+#include <linux/in6.h>
+
+#define _HAVE_ARCH_IPV6_CSUM
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum sum);
static inline __sum16 csum_fold(__wsum csum)
{
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
new file mode 100644
index 000000000000..eece20d2c55f
--- /dev/null
+++ b/arch/arm64/include/asm/compiler.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_COMPILER_H
+#define __ASM_COMPILER_H
+
+#if defined(CONFIG_ARM64_PTR_AUTH)
+
+/*
+ * The EL0/EL1 pointer bits used by a pointer authentication code.
+ * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
+ */
+#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual)
+#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual)
+
+/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */
+#define ptrauth_clear_pac(ptr) \
+ ((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \
+ (ptr & ~ptrauth_user_pac_mask()))
+
+#define __builtin_return_address(val) \
+ (void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
+
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#endif /* __ASM_COMPILER_H */
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index 86aabf1e0199..d28e8f37d3b4 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -55,12 +55,12 @@ struct cpu_operations {
#endif
};
-extern const struct cpu_operations *cpu_ops[NR_CPUS];
-int __init cpu_read_ops(int cpu);
+int __init init_cpu_ops(int cpu);
+extern const struct cpu_operations *get_cpu_ops(int cpu);
-static inline void __init cpu_read_bootcpu_ops(void)
+static inline void __init init_bootcpu_ops(void)
{
- cpu_read_ops(0);
<