summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/entry.S')
-rw-r--r--arch/arm64/kernel/entry.S24
1 files changed, 22 insertions, 2 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index ddcde093c433..244268d5ae47 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -23,6 +23,7 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
+#include <asm/scs.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
#include <asm/unistd.h>
@@ -179,6 +180,8 @@ alternative_cb_end
apply_ssbd 1, x22, x23
ptrauth_keys_install_kernel tsk, 1, x20, x22, x23
+
+ scs_load tsk, x20
.else
add x21, sp, #S_FRAME_SIZE
get_current_task tsk
@@ -343,6 +346,8 @@ alternative_else_nop_endif
msr cntkctl_el1, x1
4:
#endif
+ scs_save tsk, x0
+
/* No kernel C function calls after this as user keys are set. */
ptrauth_keys_install_user tsk, x0, x1, x2
@@ -388,6 +393,9 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
.macro irq_stack_entry
mov x19, sp // preserve the original sp
+#ifdef CONFIG_SHADOW_CALL_STACK
+ mov x24, x18 // preserve the original shadow stack
+#endif
/*
* Compare sp with the base of the task stack.
@@ -405,15 +413,25 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
/* switch to the irq stack */
mov sp, x26
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+ /* also switch to the irq shadow stack */
+ adr_this_cpu x18, irq_shadow_call_stack, x26
+#endif
+
9998:
.endm
/*
- * x19 should be preserved between irq_stack_entry and
- * irq_stack_exit.
+ * The callee-saved regs (x19-x29) should be preserved between
+ * irq_stack_entry and irq_stack_exit, but note that kernel_entry
+ * uses x20-x23 to store data for later use.
*/
.macro irq_stack_exit
mov sp, x19
+#ifdef CONFIG_SHADOW_CALL_STACK
+ mov x18, x24
+#endif
.endm
/* GPRs used by entry code */
@@ -901,6 +919,8 @@ SYM_FUNC_START(cpu_switch_to)
mov sp, x9
msr sp_el0, x1
ptrauth_keys_install_kernel x1, 1, x8, x9, x10
+ scs_save x0, x8
+ scs_load x1, x8
ret
SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)