summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/asm-offsets.c3
-rw-r--r--arch/arm64/kernel/cpufeature.c3
-rw-r--r--arch/arm64/kernel/entry.S41
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/hibernate.c5
-rw-r--r--arch/arm64/kernel/image-vars.h2
-rw-r--r--arch/arm64/kernel/kaslr.c3
-rw-r--r--arch/arm64/kernel/module.c6
-rw-r--r--arch/arm64/kernel/mte.c118
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kernel/smp.c2
12 files changed, 168 insertions, 21 deletions
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 5e82488f1b82..f42fd9e33981 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -47,6 +47,9 @@ int main(void)
DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
#endif
+#ifdef CONFIG_ARM64_MTE
+ DEFINE(THREAD_GCR_EL1_USER, offsetof(struct task_struct, thread.gcr_user_excl));
+#endif
BLANK();
DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
DEFINE(S_X2, offsetof(struct pt_regs, regs[2]));
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d87cfc6246e0..7ffb5f1d8b68 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -70,6 +70,7 @@
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/cpu.h>
+#include <linux/kasan.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
@@ -1710,6 +1711,8 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
cleared_zero_page = true;
mte_clear_page_tags(lm_alias(empty_zero_page));
}
+
+ kasan_init_hw_tags_cpu();
}
#endif /* CONFIG_ARM64_MTE */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 51c762156099..2a93fa5f4e49 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -173,6 +173,43 @@ alternative_else_nop_endif
#endif
.endm
+ .macro mte_set_gcr, tmp, tmp2
+#ifdef CONFIG_ARM64_MTE
+ /*
+ * Calculate and set the exclude mask preserving
+ * the RRND (bit[16]) setting.
+ */
+ mrs_s \tmp2, SYS_GCR_EL1
+ bfi \tmp2, \tmp, #0, #16
+ msr_s SYS_GCR_EL1, \tmp2
+ isb
+#endif
+ .endm
+
+ .macro mte_set_kernel_gcr, tmp, tmp2
+#ifdef CONFIG_KASAN_HW_TAGS
+alternative_if_not ARM64_MTE
+ b 1f
+alternative_else_nop_endif
+ ldr_l \tmp, gcr_kernel_excl
+
+ mte_set_gcr \tmp, \tmp2
+1:
+#endif
+ .endm
+
+ .macro mte_set_user_gcr, tsk, tmp, tmp2
+#ifdef CONFIG_ARM64_MTE
+alternative_if_not ARM64_MTE
+ b 1f
+alternative_else_nop_endif
+ ldr \tmp, [\tsk, #THREAD_GCR_EL1_USER]
+
+ mte_set_gcr \tmp, \tmp2
+1:
+#endif
+ .endm
+
.macro kernel_entry, el, regsize = 64
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
@@ -212,6 +249,8 @@ alternative_else_nop_endif
ptrauth_keys_install_kernel tsk, x20, x22, x23
+ mte_set_kernel_gcr x22, x23
+
scs_load tsk, x20
.else
add x21, sp, #S_FRAME_SIZE
@@ -315,6 +354,8 @@ alternative_else_nop_endif
/* No kernel C function calls after this as user keys are set. */
ptrauth_keys_install_user tsk, x0, x1, x2
+ mte_set_user_gcr tsk, x0, x1
+
apply_ssbd 0, x0, x1
.endif
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 42b23ce679dc..a0dc987724ed 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -433,7 +433,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
bl __pi_memset
dsb ishst // Make zero page visible to PTW
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bl kasan_early_init
#endif
#ifdef CONFIG_RANDOMIZE_BASE
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 42003774d261..9c9f47e9f7f4 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -371,6 +371,11 @@ static void swsusp_mte_restore_tags(void)
unsigned long pfn = xa_state.xa_index;
struct page *page = pfn_to_online_page(pfn);
+ /*
+ * It is not required to invoke page_kasan_tag_reset(page)
+ * at this point since the tags stored in page->flags are
+ * already restored.
+ */
mte_restore_page_tags(page_address(page), tags);
mte_free_tag_storage(tags);
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 39289d75118d..f676243abac6 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -37,7 +37,7 @@ __efistub_strncmp = __pi_strncmp;
__efistub_strrchr = __pi_strrchr;
__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
__efistub___memcpy = __pi_memcpy;
__efistub___memmove = __pi_memmove;
__efistub___memset = __pi_memset;
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 0921aa1520b0..1c74c45b9494 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -161,7 +161,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
/* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48;
- if (IS_ENABLED(CONFIG_KASAN))
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
+ IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/*
* KASAN does not expect the module region to intersect the
* vmalloc region, since shadow memory is allocated for each
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 2a1ad95d9b2c..fe21e0f06492 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -30,7 +30,8 @@ void *module_alloc(unsigned long size)
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
gfp_mask |= __GFP_NOWARN;
- if (IS_ENABLED(CONFIG_KASAN))
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
+ IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/* don't exceed the static module region - see below */
module_alloc_end = MODULES_END;
@@ -39,7 +40,8 @@ void *module_alloc(unsigned long size)
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
- !IS_ENABLED(CONFIG_KASAN))
+ !IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+ !IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/*
* KASAN can only deal with module allocations being served
* from the reserved module region, since the remainder of
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index ef15c8a2a49d..dc9ada64feed 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -13,13 +13,18 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/thread_info.h>
+#include <linux/types.h>
#include <linux/uio.h>
+#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>
+#include <asm/mte-kasan.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
+u64 gcr_kernel_excl __ro_after_init;
+
static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
{
pte_t old_pte = READ_ONCE(*ptep);
@@ -31,6 +36,15 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
return;
}
+ page_kasan_tag_reset(page);
+ /*
+ * We need smp_wmb() in between setting the flags and clearing the
+ * tags because if another thread reads page->flags and builds a
+ * tagged address out of it, there is an actual dependency to the
+ * memory access, but on the current thread we do not guarantee that
+ * the new page->flags are visible before the tags were updated.
+ */
+ smp_wmb();
mte_clear_page_tags(page_address(page));
}
@@ -72,6 +86,78 @@ int memcmp_pages(struct page *page1, struct page *page2)
return ret;
}
+u8 mte_get_mem_tag(void *addr)
+{
+ if (!system_supports_mte())
+ return 0xFF;
+
+ asm(__MTE_PREAMBLE "ldg %0, [%0]"
+ : "+r" (addr));
+
+ return mte_get_ptr_tag(addr);
+}
+
+u8 mte_get_random_tag(void)
+{
+ void *addr;
+
+ if (!system_supports_mte())
+ return 0xFF;
+
+ asm(__MTE_PREAMBLE "irg %0, %0"
+ : "+r" (addr));
+
+ return mte_get_ptr_tag(addr);
+}
+
+void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+{
+ void *ptr = addr;
+
+ if ((!system_supports_mte()) || (size == 0))
+ return addr;
+
+ /* Make sure that size is MTE granule aligned. */
+ WARN_ON(size & (MTE_GRANULE_SIZE - 1));
+
+ /* Make sure that the address is MTE granule aligned. */
+ WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
+
+ tag = 0xF0 | tag;
+ ptr = (void *)__tag_set(ptr, tag);
+
+ mte_assign_mem_tag_range(ptr, size);
+
+ return ptr;
+}
+
+void mte_init_tags(u64 max_tag)
+{
+ static bool gcr_kernel_excl_initialized;
+
+ if (!gcr_kernel_excl_initialized) {
+ /*
+ * The format of the tags in KASAN is 0xFF and in MTE is 0xF.
+ * This conversion extracts an MTE tag from a KASAN tag.
+ */
+ u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT,
+ max_tag), 0);
+
+ gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
+ gcr_kernel_excl_initialized = true;
+ }
+
+ /* Enable the kernel exclude mask for random tags generation. */
+ write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
+}
+
+void mte_enable_kernel(void)
+{
+ /* Enable MTE Sync Mode for EL1. */
+ sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
+ isb();
+}
+
static void update_sctlr_el1_tcf0(u64 tcf0)
{
/* ISB required for the kernel uaccess routines */
@@ -92,23 +178,26 @@ static void set_sctlr_el1_tcf0(u64 tcf0)
preempt_enable();
}
-static void update_gcr_el1_excl(u64 incl)
+static void update_gcr_el1_excl(u64 excl)
{
- u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
/*
- * Note that 'incl' is an include mask (controlled by the user via
- * prctl()) while GCR_EL1 accepts an exclude mask.
+ * Note that the mask controlled by the user via prctl() is an
+ * include while GCR_EL1 accepts an exclude mask.
* No need for ISB since this only affects EL0 currently, implicit
* with ERET.
*/
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
}
-static void set_gcr_el1_excl(u64 incl)
+static void set_gcr_el1_excl(u64 excl)
{
- current->thread.gcr_user_incl = incl;
- update_gcr_el1_excl(incl);
+ current->thread.gcr_user_excl = excl;
+
+ /*
+ * SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value
+ * by mte_set_user_gcr() in kernel_exit,
+ */
}
void flush_mte_state(void)
@@ -123,7 +212,7 @@ void flush_mte_state(void)
/* disable tag checking */
set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
/* reset tag generation mask */
- set_gcr_el1_excl(0);
+ set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
}
void mte_thread_switch(struct task_struct *next)
@@ -134,7 +223,6 @@ void mte_thread_switch(struct task_struct *next)
/* avoid expensive SCTLR_EL1 accesses if no change */
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
- update_gcr_el1_excl(next->thread.gcr_user_incl);
}
void mte_suspend_exit(void)
@@ -142,13 +230,14 @@ void mte_suspend_exit(void)
if (!system_supports_mte())
return;
- update_gcr_el1_excl(current->thread.gcr_user_incl);
+ update_gcr_el1_excl(gcr_kernel_excl);
}
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
u64 tcf0;
- u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT;
+ u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
+ SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte())
return 0;
@@ -169,10 +258,10 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
if (task != current) {
task->thread.sctlr_tcf0 = tcf0;
- task->thread.gcr_user_incl = gcr_incl;
+ task->thread.gcr_user_excl = gcr_excl;
} else {
set_sctlr_el1_tcf0(tcf0);
- set_gcr_el1_excl(gcr_incl);
+ set_gcr_el1_excl(gcr_excl);
}
return 0;
@@ -181,11 +270,12 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
long get_mte_ctrl(struct task_struct *task)
{
unsigned long ret;
+ u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte())
return 0;
- ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT;
+ ret = incl << PR_MTE_TAG_SHIFT;
switch (task->thread.sctlr_tcf0) {
case SCTLR_EL1_TCF0_NONE:
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index c44eb4b80163..c18aacde8bb0 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -358,7 +358,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
smp_build_mpidr_hash();
/* Init percpu seeds for random tags after cpus are set up. */
- kasan_init_tags();
+ kasan_init_sw_tags();
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 4be7f7eed875..6bdef7362c0e 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -133,7 +133,7 @@ SYM_FUNC_START(_cpu_resume)
*/
bl cpu_do_resume
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
mov x0, sp
bl kasan_unpoison_task_stack_below
#endif
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 2499b895efea..19b1705ae5cb 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -462,6 +462,8 @@ void __init smp_prepare_boot_cpu(void)
/* Conditionally switch to GIC PMR for interrupt masking */
if (system_uses_irq_prio_masking())
init_gic_priority_masking();
+
+ kasan_init_hw_tags();
}
static u64 __init of_get_cpu_mpidr(struct device_node *dn)