From e9a2f8b599d0bc22a1b13e69527246ac39c697b4 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sun, 11 Oct 2020 10:20:16 +0100 Subject: ARM: 9011/1: centralize phys-to-virt conversion of DT/ATAGS address Before moving the DT mapping out of the linear region, let's prepare for this change by removing all the phys-to-virt translations of the __atags_pointer variable, and perform this translation only once at setup time. Tested-by: Linus Walleij Reviewed-by: Linus Walleij Acked-by: Nicolas Pitre Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King --- arch/arm/include/asm/prom.h | 4 ++-- arch/arm/kernel/atags.h | 4 ++-- arch/arm/kernel/atags_parse.c | 6 +++--- arch/arm/kernel/devtree.c | 6 +++--- arch/arm/kernel/setup.c | 14 +++++++++----- arch/arm/mm/mmu.c | 4 ++-- 6 files changed, 21 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index 1e36c40533c1..402e3f34c7ed 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h @@ -9,12 +9,12 @@ #ifdef CONFIG_OF -extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys); +extern const struct machine_desc *setup_machine_fdt(void *dt_virt); extern void __init arm_dt_init_cpu_maps(void); #else /* CONFIG_OF */ -static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys) +static inline const struct machine_desc *setup_machine_fdt(void *dt_virt) { return NULL; } diff --git a/arch/arm/kernel/atags.h b/arch/arm/kernel/atags.h index 067e12edc341..f2819c25b602 100644 --- a/arch/arm/kernel/atags.h +++ b/arch/arm/kernel/atags.h @@ -2,11 +2,11 @@ void convert_to_tag_list(struct tag *tags); #ifdef CONFIG_ATAGS -const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer, +const struct machine_desc *setup_machine_tags(void *__atags_vaddr, unsigned int machine_nr); #else static inline const struct machine_desc * __init __noreturn -setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) +setup_machine_tags(void *__atags_vaddr, unsigned int machine_nr) { early_print("no ATAGS support: can't continue\n"); while (true); diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c index 6c12d9fe694e..373b61f9a4f0 100644 --- a/arch/arm/kernel/atags_parse.c +++ b/arch/arm/kernel/atags_parse.c @@ -174,7 +174,7 @@ static void __init squash_mem_tags(struct tag *tag) } const struct machine_desc * __init -setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) +setup_machine_tags(void *atags_vaddr, unsigned int machine_nr) { struct tag *tags = (struct tag *)&default_tags; const struct machine_desc *mdesc = NULL, *p; @@ -195,8 +195,8 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) if (!mdesc) return NULL; - if (__atags_pointer) - tags = phys_to_virt(__atags_pointer); + if (atags_vaddr) + tags = atags_vaddr; else if (mdesc->atag_offset) tags = (void *)(PAGE_OFFSET + mdesc->atag_offset); diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 7f0745a97e20..28311dd0fee6 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -203,12 +203,12 @@ static const void * __init arch_get_next_mach(const char *const **match) /** * setup_machine_fdt - Machine setup when an dtb was passed to the kernel - * @dt_phys: physical address of dt blob + * @dt_virt: virtual address of dt blob * * If a dtb was passed to the kernel in r2, then use it to choose the * correct machine_desc and to setup the system. */ -const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) +const struct machine_desc * __init setup_machine_fdt(void *dt_virt) { const struct machine_desc *mdesc, *mdesc_best = NULL; @@ -221,7 +221,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) mdesc_best = &__mach_desc_GENERIC_DT; #endif - if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys))) + if (!dt_virt || !early_init_dt_verify(dt_virt)) return NULL; mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3f65d0ac9f63..306bcd9844be 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -89,6 +89,7 @@ unsigned int cacheid __read_mostly; EXPORT_SYMBOL(cacheid); unsigned int __atags_pointer __initdata; +void *atags_vaddr __initdata; unsigned int system_rev; EXPORT_SYMBOL(system_rev); @@ -1081,19 +1082,22 @@ void __init hyp_mode_check(void) void __init setup_arch(char **cmdline_p) { - const struct machine_desc *mdesc; + const struct machine_desc *mdesc = NULL; + + if (__atags_pointer) + atags_vaddr = phys_to_virt(__atags_pointer); setup_processor(); - mdesc = setup_machine_fdt(__atags_pointer); + if (atags_vaddr) + mdesc = setup_machine_fdt(atags_vaddr); if (!mdesc) - mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type); + mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type); if (!mdesc) { early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n"); early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type, __atags_pointer); if (__atags_pointer) - early_print(" r2[]=%*ph\n", 16, - phys_to_virt(__atags_pointer)); + early_print(" r2[]=%*ph\n", 16, atags_vaddr); dump_machine_table(); } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ab69250a86bc..55991fe60054 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1489,7 +1489,7 @@ static void __init map_lowmem(void) } #ifdef CONFIG_ARM_PV_FIXUP -extern unsigned long __atags_pointer; +extern void *atags_vaddr; typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata); pgtables_remap lpae_pgtables_remap_asm; @@ -1520,7 +1520,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc) */ lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm); pa_pgd = __pa(swapper_pg_dir); - boot_data = __va(__atags_pointer); + boot_data = atags_vaddr; barrier(); pr_info("Switching physical address space to 0x%08llx\n", -- cgit v1.2.3 From 7a1be318f5795cb66fa0dc86b3ace427fe68057f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sun, 11 Oct 2020 10:21:37 +0100 Subject: ARM: 9012/1: move device tree mapping out of linear region On ARM, setting up the linear region is tricky, given the constraints around placement and alignment of the memblocks, and how the kernel itself as well as the DT are placed in physical memory. Let's simplify matters a bit, by moving the device tree mapping to the top of the address space, right between the end of the vmalloc region and the start of the the fixmap region, and create a read-only mapping for it that is independent of the size of the linear region, and how it is organized. Since this region was formerly used as a guard region, which will now be populated fully on LPAE builds by this read-only mapping (which will still be able to function as a guard region for stray writes), bump the start of the [underutilized] fixmap region by 512 KB as well, to ensure that there is always a proper guard region here. Doing so still leaves ample room for the fixmap space, even with NR_CPUS set to its maximum value of 32. Tested-by: Linus Walleij Reviewed-by: Linus Walleij Reviewed-by: Nicolas Pitre Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King --- arch/arm/include/asm/fixmap.h | 2 +- arch/arm/include/asm/memory.h | 5 +++++ arch/arm/kernel/head.S | 5 ++--- arch/arm/kernel/setup.c | 11 ++++++++--- arch/arm/mm/init.c | 1 - arch/arm/mm/mmu.c | 20 ++++++++++++++------ arch/arm/mm/pv-fixup-asm.S | 4 ++-- 7 files changed, 32 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index fc56fc3e1931..9575b404019c 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -2,7 +2,7 @@ #ifndef _ASM_FIXMAP_H #define _ASM_FIXMAP_H -#define FIXADDR_START 0xffc00000UL +#define FIXADDR_START 0xffc80000UL #define FIXADDR_END 0xfff00000UL #define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 99035b5891ef..bb79e52aeb90 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -67,6 +67,10 @@ */ #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) +#define FDT_FIXED_BASE UL(0xff800000) +#define FDT_FIXED_SIZE (2 * PMD_SIZE) +#define FDT_VIRT_ADDR(physaddr) ((void *)(FDT_FIXED_BASE | (physaddr) % PMD_SIZE)) + #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) /* * Allow 16MB-aligned ioremap pages @@ -107,6 +111,7 @@ extern unsigned long vectors_base; #define MODULES_VADDR PAGE_OFFSET #define XIP_VIRT_ADDR(physaddr) (physaddr) +#define FDT_VIRT_ADDR(physaddr) ((void *)(physaddr)) #endif /* !CONFIG_MMU */ diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index f8904227e7fd..9b18d8c66129 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -275,9 +275,8 @@ __create_page_tables: */ mov r0, r2, lsr #SECTION_SHIFT movs r0, r0, lsl #SECTION_SHIFT - subne r3, r0, r8 - addne r3, r3, #PAGE_OFFSET - addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) + ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER) + addne r3, r3, r4 orrne r6, r7, r0 strne r6, [r3], #1 << PMD_ORDER addne r6, r6, #1 << SECTION_SHIFT diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 306bcd9844be..694aa6b4bd03 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -89,7 +90,6 @@ unsigned int cacheid __read_mostly; EXPORT_SYMBOL(cacheid); unsigned int __atags_pointer __initdata; -void *atags_vaddr __initdata; unsigned int system_rev; EXPORT_SYMBOL(system_rev); @@ -1083,13 +1083,18 @@ void __init hyp_mode_check(void) void __init setup_arch(char **cmdline_p) { const struct machine_desc *mdesc = NULL; + void *atags_vaddr = NULL; if (__atags_pointer) - atags_vaddr = phys_to_virt(__atags_pointer); + atags_vaddr = FDT_VIRT_ADDR(__atags_pointer); setup_processor(); - if (atags_vaddr) + if (atags_vaddr) { mdesc = setup_machine_fdt(atags_vaddr); + if (mdesc) + memblock_reserve(__atags_pointer, + fdt_totalsize(atags_vaddr)); + } if (!mdesc) mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type); if (!mdesc) { diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index d57112a276f5..a391804c7ce3 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -223,7 +223,6 @@ void __init arm_memblock_init(const struct machine_desc *mdesc) if (mdesc->reserve) mdesc->reserve(); - early_init_fdt_reserve_self(); early_init_fdt_scan_reserved_mem(); /* reserve memory for DMA contiguous allocations */ diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 55991fe60054..fa259825310c 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -39,6 +39,8 @@ #include "mm.h" #include "tcm.h" +extern unsigned long __atags_pointer; + /* * empty_zero_page is a special page that is used for * zero-initialized data and COW. @@ -946,7 +948,7 @@ static void __init create_mapping(struct map_desc *md) return; } - if ((md->type == MT_DEVICE || md->type == MT_ROM) && + if (md->type == MT_DEVICE && md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", @@ -1333,6 +1335,15 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); + if (__atags_pointer) { + /* create a read-only mapping of the device tree */ + map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK); + map.virtual = FDT_FIXED_BASE; + map.length = FDT_FIXED_SIZE; + map.type = MT_ROM; + create_mapping(&map); + } + /* * Map the kernel if it is XIP. * It is always first in the modulearea. @@ -1489,8 +1500,7 @@ static void __init map_lowmem(void) } #ifdef CONFIG_ARM_PV_FIXUP -extern void *atags_vaddr; -typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata); +typedef void pgtables_remap(long long offset, unsigned long pgd); pgtables_remap lpae_pgtables_remap_asm; /* @@ -1503,7 +1513,6 @@ static void __init early_paging_init(const struct machine_desc *mdesc) unsigned long pa_pgd; unsigned int cr, ttbcr; long long offset; - void *boot_data; if (!mdesc->pv_fixup) return; @@ -1520,7 +1529,6 @@ static void __init early_paging_init(const struct machine_desc *mdesc) */ lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm); pa_pgd = __pa(swapper_pg_dir); - boot_data = atags_vaddr; barrier(); pr_info("Switching physical address space to 0x%08llx\n", @@ -1556,7 +1564,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc) * needs to be assembly. It's fairly simple, as we're using the * temporary tables setup by the initial assembly code. */ - lpae_pgtables_remap(offset, pa_pgd, boot_data); + lpae_pgtables_remap(offset, pa_pgd); /* Re-enable the caches and cacheable TLB walks */ asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr)); diff --git a/arch/arm/mm/pv-fixup-asm.S b/arch/arm/mm/pv-fixup-asm.S index 8eade0416739..5c5e1952000a 100644 --- a/arch/arm/mm/pv-fixup-asm.S +++ b/arch/arm/mm/pv-fixup-asm.S @@ -39,8 +39,8 @@ ENTRY(lpae_pgtables_remap_asm) /* Update level 2 entries for the boot data */ add r7, r2, #0x1000 - add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER - bic r7, r7, #(1 << L2_ORDER) - 1 + movw r3, #FDT_FIXED_BASE >> (SECTION_SHIFT - L2_ORDER) + add r7, r7, r3 ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 -- cgit v1.2.3 From d5d44e7e3507b0ad868f68e0c5bca6a57afa1b8b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 25 Oct 2020 23:50:09 +0100 Subject: ARM: 9013/2: Disable KASan instrumentation for some code Disable instrumentation for arch/arm/boot/compressed/* since that code is executed before the kernel has even set up its mappings and definately out of scope for KASan. Disable instrumentation of arch/arm/vdso/* because that code is not linked with the kernel image, so the KASan management code would fail to link. Disable instrumentation of arch/arm/mm/physaddr.c. See commit ec6d06efb0ba ("arm64: Add support for CONFIG_DEBUG_VIRTUAL") for more details. Disable kasan check in the function unwind_pop_register because it does not matter that kasan checks failed when unwind_pop_register() reads the stack memory of a task. Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: kasan-dev@googlegroups.com Reviewed-by: Ard Biesheuvel Tested-by: Ard Biesheuvel # QEMU/KVM/mach-virt/LPAE/8G Tested-by: Florian Fainelli # Brahma SoCs Tested-by: Ahmad Fatoum # i.MX6Q Reported-by: Florian Fainelli Reported-by: Marc Zyngier Signed-off-by: Abbott Liu Signed-off-by: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/boot/compressed/Makefile | 1 + arch/arm/kernel/unwind.c | 6 +++++- arch/arm/mm/Makefile | 2 ++ arch/arm/vdso/Makefile | 2 ++ 4 files changed, 10 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 47f001ca5499..a815b1ae990d 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -24,6 +24,7 @@ OBJS += hyp-stub.o endif GCOV_PROFILE := n +KASAN_SANITIZE := n # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. KCOV_INSTRUMENT := n diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index d2bd0df2318d..f35eb584a18a 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -236,7 +236,11 @@ static int unwind_pop_register(struct unwind_ctrl_block *ctrl, if (*vsp >= (unsigned long *)ctrl->sp_high) return -URC_FAILURE; - ctrl->vrs[reg] = *(*vsp)++; + /* Use READ_ONCE_NOCHECK here to avoid this memory access + * from being tracked by KASAN. + */ + ctrl->vrs[reg] = READ_ONCE_NOCHECK(*(*vsp)); + (*vsp)++; return URC_OK; } diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 7cb1699fbfc4..99699c32d8a5 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -7,6 +7,7 @@ obj-y := extable.o fault.o init.o iomap.o obj-y += dma-mapping$(MMUEXT).o obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ mmap.o pgd.o mmu.o pageattr.o +KASAN_SANITIZE_mmu.o := n ifneq ($(CONFIG_MMU),y) obj-y += nommu.o @@ -16,6 +17,7 @@ endif obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o obj-$(CONFIG_ARM_PTDUMP_DEBUGFS) += ptdump_debugfs.o obj-$(CONFIG_MODULES) += proc-syms.o +KASAN_SANITIZE_physaddr.o := n obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile index 150ce6e6a5d3..b558bee0e1f6 100644 --- a/arch/arm/vdso/Makefile +++ b/arch/arm/vdso/Makefile @@ -42,6 +42,8 @@ GCOV_PROFILE := n # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. KCOV_INSTRUMENT := n +KASAN_SANITIZE := n + # Force dependency $(obj)/vdso.o : $(obj)/vdso.so -- cgit v1.2.3 From d6d51a96c7d63b7450860a3037f2d62388286a52 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 25 Oct 2020 23:52:08 +0100 Subject: ARM: 9014/2: Replace string mem* functions for KASan Functions like memset()/memmove()/memcpy() do a lot of memory accesses. If a bad pointer is passed to one of these functions it is important to catch this. Compiler instrumentation cannot do this since these functions are written in assembly. KASan replaces these memory functions with instrumented variants. The original functions are declared as weak symbols so that the strong definitions in mm/kasan/kasan.c can replace them. The original functions have aliases with a '__' prefix in their name, so we can call the non-instrumented variant if needed. We must use __memcpy()/__memset() in place of memcpy()/memset() when we copy .data to RAM and when we clear .bss, because kasan_early_init cannot be called before the initialization of .data and .bss. For the kernel compression and EFI libstub's custom string libraries we need a special quirk: even if these are built without KASan enabled, they rely on the global headers for their custom string libraries, which means that e.g. memcpy() will be defined to __memcpy() and we get link failures. Since these implementations are written i C rather than assembly we use e.g. __alias(memcpy) to redirected any users back to the local implementation. Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: kasan-dev@googlegroups.com Reviewed-by: Ard Biesheuvel Tested-by: Ard Biesheuvel # QEMU/KVM/mach-virt/LPAE/8G Tested-by: Florian Fainelli # Brahma SoCs Tested-by: Ahmad Fatoum # i.MX6Q Reported-by: Russell King - ARM Linux Signed-off-by: Ahmad Fatoum Signed-off-by: Abbott Liu Signed-off-by: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/boot/compressed/string.c | 19 +++++++++++++++++++ arch/arm/include/asm/string.h | 26 ++++++++++++++++++++++++++ arch/arm/kernel/head-common.S | 4 ++-- arch/arm/lib/memcpy.S | 3 +++ arch/arm/lib/memmove.S | 5 ++++- arch/arm/lib/memset.S | 3 +++ 6 files changed, 57 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/compressed/string.c b/arch/arm/boot/compressed/string.c index ade5079bebbf..8c0fa276d994 100644 --- a/arch/arm/boot/compressed/string.c +++ b/arch/arm/boot/compressed/string.c @@ -7,6 +7,25 @@ #include +/* + * The decompressor is built without KASan but uses the same redirects as the + * rest of the kernel when CONFIG_KASAN is enabled, defining e.g. memcpy() + * to __memcpy() but since we are not linking with the main kernel string + * library in the decompressor, that will lead to link failures. + * + * Undefine KASan's versions, define the wrapped functions and alias them to + * the right names so that when e.g. __memcpy() appear in the code, it will + * still be linked to this local version of memcpy(). + */ +#ifdef CONFIG_KASAN +#undef memcpy +#undef memmove +#undef memset +void *__memcpy(void *__dest, __const void *__src, size_t __n) __alias(memcpy); +void *__memmove(void *__dest, __const void *__src, size_t count) __alias(memmove); +void *__memset(void *s, int c, size_t count) __alias(memset); +#endif + void *memcpy(void *__dest, __const void *__src, size_t __n) { int i = 0; diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h index 111a1d8a41dd..6c607c68f3ad 100644 --- a/arch/arm/include/asm/string.h +++ b/arch/arm/include/asm/string.h @@ -5,6 +5,9 @@ /* * We don't do inline string functions, since the * optimised inline asm versions are not small. + * + * The __underscore versions of some functions are for KASan to be able + * to replace them with instrumented versions. */ #define __HAVE_ARCH_STRRCHR @@ -15,15 +18,18 @@ extern char * strchr(const char * s, int c); #define __HAVE_ARCH_MEMCPY extern void * memcpy(void *, const void *, __kernel_size_t); +extern void *__memcpy(void *dest, const void *src, __kernel_size_t n); #define __HAVE_ARCH_MEMMOVE extern void * memmove(void *, const void *, __kernel_size_t); +extern void *__memmove(void *dest, const void *src, __kernel_size_t n); #define __HAVE_ARCH_MEMCHR extern void * memchr(const void *, int, __kernel_size_t); #define __HAVE_ARCH_MEMSET extern void * memset(void *, int, __kernel_size_t); +extern void *__memset(void *s, int c, __kernel_size_t n); #define __HAVE_ARCH_MEMSET32 extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t); @@ -39,4 +45,24 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) return __memset64(p, v, n * 8, v >> 32); } +/* + * For files that are not instrumented (e.g. mm/slub.c) we + * must use non-instrumented versions of the mem* + * functions named __memcpy() etc. All such kernel code has + * been tagged with KASAN_SANITIZE_file.o = n, which means + * that the address sanitization argument isn't passed to the + * compiler, and __SANITIZE_ADDRESS__ is not set. As a result + * these defines kick in. + */ +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) +#define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memmove(dst, src, len) __memmove(dst, src, len) +#define memset(s, c, n) __memset(s, c, n) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif + +#endif + #endif diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 4a3982812a40..6840c7c60a85 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -95,7 +95,7 @@ __mmap_switched: THUMB( ldmia r4!, {r0, r1, r2, r3} ) THUMB( mov sp, r3 ) sub r2, r2, r1 - bl memcpy @ copy .data to RAM + bl __memcpy @ copy .data to RAM #endif ARM( ldmia r4!, {r0, r1, sp} ) @@ -103,7 +103,7 @@ __mmap_switched: THUMB( mov sp, r3 ) sub r2, r1, r0 mov r1, #0 - bl memset @ clear .bss + bl __memset @ clear .bss ldmia r4, {r0, r1, r2, r3} str r9, [r0] @ Save processor ID diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S index 09a333153dc6..ad4625d16e11 100644 --- a/arch/arm/lib/memcpy.S +++ b/arch/arm/lib/memcpy.S @@ -58,6 +58,8 @@ /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ +.weak memcpy +ENTRY(__memcpy) ENTRY(mmiocpy) ENTRY(memcpy) @@ -65,3 +67,4 @@ ENTRY(memcpy) ENDPROC(memcpy) ENDPROC(mmiocpy) +ENDPROC(__memcpy) diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S index b50e5770fb44..fd123ea5a5a4 100644 --- a/arch/arm/lib/memmove.S +++ b/arch/arm/lib/memmove.S @@ -24,12 +24,14 @@ * occurring in the opposite direction. */ +.weak memmove +ENTRY(__memmove) ENTRY(memmove) UNWIND( .fnstart ) subs ip, r0, r1 cmphi r2, ip - bls memcpy + bls __memcpy stmfd sp!, {r0, r4, lr} UNWIND( .fnend ) @@ -222,3 +224,4 @@ ENTRY(memmove) 18: backward_copy_shift push=24 pull=8 ENDPROC(memmove) +ENDPROC(__memmove) diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index 6ca4535c47fb..0e7ff0423f50 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -13,6 +13,8 @@ .text .align 5 +.weak memset +ENTRY(__memset) ENTRY(mmioset) ENTRY(memset) UNWIND( .fnstart ) @@ -132,6 +134,7 @@ UNWIND( .fnstart ) UNWIND( .fnend ) ENDPROC(memset) ENDPROC(mmioset) +ENDPROC(__memset) ENTRY(__memset32) UNWIND( .fnstart ) -- cgit v1.2.3 From c12366ba441da2f6f2b915410aca2b5b39c16514 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 25 Oct 2020 23:53:46 +0100 Subject: ARM: 9015/2: Define the virtual space of KASan's shadow region Define KASAN_SHADOW_OFFSET,KASAN_SHADOW_START and KASAN_SHADOW_END for the Arm kernel address sanitizer. We are "stealing" lowmem (the 4GB addressable by a 32bit architecture) out of the virtual address space to use as shadow memory for KASan as follows: +----+ 0xffffffff | | | | |-> Static kernel image (vmlinux) BSS and page table | |/ +----+ PAGE_OFFSET | | | | |-> Loadable kernel modules virtual address space area | |/ +----+ MODULES_VADDR = KASAN_SHADOW_END | | | | |-> The shadow area of kernel virtual address. | |/ +----+-> TASK_SIZE (start of kernel space) = KASAN_SHADOW_START the | | shadow address of MODULES_VADDR | | | | | | | | |-> The user space area in lowmem. The kernel address | | | sanitizer do not use this space, nor does it map it. | | | | | | | | | | | | | |/ ------ 0 0 .. TASK_SIZE is the memory that can be used by shared userspace/kernelspace. It us used for userspace processes and for passing parameters and memory buffers in system calls etc. We do not need to shadow this area. KASAN_SHADOW_START: This value begins with the MODULE_VADDR's shadow address. It is the start of kernel virtual space. Since we have modules to load, we need to cover also that area with shadow memory so we can find memory bugs in modules. KASAN_SHADOW_END This value is the 0x100000000's shadow address: the mapping that would be after the end of the kernel memory at 0xffffffff. It is the end of kernel address sanitizer shadow area. It is also the start of the module area. KASAN_SHADOW_OFFSET: This value is used to map an address to the corresponding shadow address by the following formula: shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET; As you would expect, >> 3 is equal to dividing by 8, meaning each byte in the shadow memory covers 8 bytes of kernel memory, so one bit shadow memory per byte of kernel memory is used. The KASAN_SHADOW_OFFSET is provided in a Kconfig option depending on the VMSPLIT layout of the system: the kernel and userspace can split up lowmem in different ways according to needs, so we calculate the shadow offset depending on this. When kasan is enabled, the definition of TASK_SIZE is not an 8-bit rotated constant, so we need to modify the TASK_SIZE access code in the *.s file. The kernel and modules may use different amounts of memory, according to the VMSPLIT configuration, which in turn determines the PAGE_OFFSET. We use the following KASAN_SHADOW_OFFSETs depending on how the virtual memory is split up: - 0x1f000000 if we have 1G userspace / 3G kernelspace split: - The kernel address space is 3G (0xc0000000) - PAGE_OFFSET is then set to 0x40000000 so the kernel static image (vmlinux) uses addresses 0x40000000 .. 0xffffffff - On top of that we have the MODULES_VADDR which under the worst case (using ARM instructions) is PAGE_OFFSET - 16M (0x01000000) = 0x3f000000 so the modules use addresses 0x3f000000 .. 0x3fffffff - So the addresses 0x3f000000 .. 0xffffffff need to be covered with shadow memory. That is 0xc1000000 bytes of memory. - 1/8 of that is needed for its shadow memory, so 0x18200000 bytes of shadow memory is needed. We "steal" that from the remaining lowmem. - The KASAN_SHADOW_START becomes 0x26e00000, to KASAN_SHADOW_END at 0x3effffff. - Now we can calculate the KASAN_SHADOW_OFFSET for any kernel address as 0x3f000000 needs to map to the first byte of shadow memory and 0xffffffff needs to map to the last byte of shadow memory. Since: SHADOW_ADDR = (address >> 3) + KASAN_SHADOW_OFFSET 0x26e00000 = (0x3f000000 >> 3) + KASAN_SHADOW_OFFSET KASAN_SHADOW_OFFSET = 0x26e00000 - (0x3f000000 >> 3) KASAN_SHADOW_OFFSET = 0x26e00000 - 0x07e00000 KASAN_SHADOW_OFFSET = 0x1f000000 - 0x5f000000 if we have 2G userspace / 2G kernelspace split: - The kernel space is 2G (0x80000000) - PAGE_OFFSET is set to 0x80000000 so the kernel static image uses 0x80000000 .. 0xffffffff. - On top of that we have the MODULES_VADDR which under the worst case (using ARM instructions) is PAGE_OFFSET - 16M (0x01000000) = 0x7f000000 so the modules use addresses 0x7f000000 .. 0x7fffffff - So the addresses 0x7f000000 .. 0xffffffff need to be covered with shadow memory. That is 0x81000000 bytes of memory. - 1/8 of that is needed for its shadow memory, so 0x10200000 bytes of shadow memory is needed. We "steal" that from the remaining lowmem. - The KASAN_SHADOW_START becomes 0x6ee00000, to KASAN_SHADOW_END at 0x7effffff. - Now we can calculate the KASAN_SHADOW_OFFSET for any kernel address as 0x7f000000 needs to map to the first byte of shadow memory and 0xffffffff needs to map to the last byte of shadow memory. Since: SHADOW_ADDR = (address >> 3) + KASAN_SHADOW_OFFSET 0x6ee00000 = (0x7f000000 >> 3) + KASAN_SHADOW_OFFSET KASAN_SHADOW_OFFSET = 0x6ee00000 - (0x7f000000 >> 3) KASAN_SHADOW_OFFSET = 0x6ee00000 - 0x0fe00000 KASAN_SHADOW_OFFSET = 0x5f000000 - 0x9f000000 if we have 3G userspace / 1G kernelspace split, and this is the default split for ARM: - The kernel address space is 1GB (0x40000000) - PAGE_OFFSET is set to 0xc0000000 so the kernel static image uses 0xc0000000 .. 0xffffffff. - On top of that we have the MODULES_VADDR which under the worst case (using ARM instructions) is PAGE_OFFSET - 16M (0x01000000) = 0xbf000000 so the modules use addresses 0xbf000000 .. 0xbfffffff - So the addresses 0xbf000000 .. 0xffffffff need to be covered with shadow memory. That is 0x41000000 bytes of memory. - 1/8 of that is needed for its shadow memory, so 0x08200000 bytes of shadow memory is needed. We "steal" that from the remaining lowmem. - The KASAN_SHADOW_START becomes 0xb6e00000, to KASAN_SHADOW_END at 0xbfffffff. - Now we can calculate the KASAN_SHADOW_OFFSET for any kernel address as 0xbf000000 needs to map to the first byte of shadow memory and 0xffffffff needs to map to the last byte of shadow memory. Since: SHADOW_ADDR = (address >> 3) + KASAN_SHADOW_OFFSET 0xb6e00000 = (0xbf000000 >> 3) + KASAN_SHADOW_OFFSET KASAN_SHADOW_OFFSET = 0xb6e00000 - (0xbf000000 >> 3) KASAN_SHADOW_OFFSET = 0xb6e00000 - 0x17e00000 KASAN_SHADOW_OFFSET = 0x9f000000 - 0x8f000000 if we have 3G userspace / 1G kernelspace with full 1 GB low memory (VMSPLIT_3G_OPT): - The kernel address space is 1GB (0x40000000) - PAGE_OFFSET is set to 0xb0000000 so the kernel static image uses 0xb0000000 .. 0xffffffff. - On top of that we have the MODULES_VADDR which under the worst case (using ARM instructions) is PAGE_OFFSET - 16M (0x01000000) = 0xaf000000 so the modules use addresses 0xaf000000 .. 0xaffffff - So the addresses 0xaf000000 .. 0xffffffff need to be covered with shadow memory. That is 0x51000000 bytes of memory. - 1/8 of that is needed for its shadow memory, so 0x0a200000 bytes of shadow memory is needed. We "steal" that from the remaining lowmem. - The KASAN_SHADOW_START becomes 0xa4e00000, to KASAN_SHADOW_END at 0xaeffffff. - Now we can calculate the KASAN_SHADOW_OFFSET for any kernel address as 0xaf000000 needs to map to the first byte of shadow memory and 0xffffffff needs to map to the last byte of shadow memory. Since: SHADOW_ADDR = (address >> 3) + KASAN_SHADOW_OFFSET 0xa4e00000 = (0xaf000000 >> 3) + KASAN_SHADOW_OFFSET KASAN_SHADOW_OFFSET = 0xa4e00000 - (0xaf000000 >> 3) KASAN_SHADOW_OFFSET = 0xa4e00000 - 0x15e00000 KASAN_SHADOW_OFFSET = 0x8f000000 - The default value of 0xffffffff for KASAN_SHADOW_OFFSET is an error value. We should always match one of the above shadow offsets. When we do this, TASK_SIZE will sometimes get a bit odd values that will not fit into immediate mov assembly instructions. To account for this, we need to rewrite some assembly using TASK_SIZE like this: - mov r1, #TASK_SIZE + ldr r1, =TASK_SIZE or - cmp r4, #TASK_SIZE + ldr r0, =TASK_SIZE + cmp r4, r0 this is done to avoid the immediate #TASK_SIZE that need to fit into a limited number of bits. Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: kasan-dev@googlegroups.com Cc: Mike Rapoport Reviewed-by: Ard Biesheuvel Tested-by: Ard Biesheuvel # QEMU/KVM/mach-virt/LPAE/8G Tested-by: Florian Fainelli # Brahma SoCs Tested-by: Ahmad Fatoum # i.MX6Q Reported-by: Ard Biesheuvel Signed-off-by: Abbott Liu Signed-off-by: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/Kconfig | 9 +++++ arch/arm/include/asm/kasan_def.h | 81 ++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/memory.h | 5 +++ arch/arm/include/asm/uaccess-asm.h | 2 +- arch/arm/kernel/entry-armv.S | 3 +- arch/arm/kernel/entry-common.S | 9 +++-- arch/arm/mm/mmu.c | 18 +++++++++ 7 files changed, 122 insertions(+), 5 deletions(-) create mode 100644 arch/arm/include/asm/kasan_def.h (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index fe2f17eb2b50..194032568b7a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1323,6 +1323,15 @@ config PAGE_OFFSET default 0xB0000000 if VMSPLIT_3G_OPT default 0xC0000000 +config KASAN_SHADOW_OFFSET + hex + depends on KASAN + default 0x1f000000 if PAGE_OFFSET=0x40000000 + default 0x5f000000 if PAGE_OFFSET=0x80000000 + default 0x9f000000 if PAGE_OFFSET=0xC0000000 + default 0x8f000000 if PAGE_OFFSET=0xB0000000 + default 0xffffffff + config NR_CPUS int "Maximum number of CPUs (2-32)" range 2 32 diff --git a/arch/arm/include/asm/kasan_def.h b/arch/arm/include/asm/kasan_def.h new file mode 100644 index 000000000000..5739605aa7cf --- /dev/null +++ b/arch/arm/include/asm/kasan_def.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/arm/include/asm/kasan_def.h + * + * Copyright (c) 2018 Huawei Technologies Co., Ltd. + * + * Author: Abbott Liu + */ + +#ifndef __ASM_KASAN_DEF_H +#define __ASM_KASAN_DEF_H + +#ifdef CONFIG_KASAN + +/* + * Define KASAN_SHADOW_OFFSET,KASAN_SHADOW_START and KASAN_SHADOW_END for + * the Arm kernel address sanitizer. We are "stealing" lowmem (the 4GB + * addressable by a 32bit architecture) out of the virtual address + * space to use as shadow memory for KASan as follows: + * + * +----+ 0xffffffff + * | | \ + * | | |-> Static kernel image (vmlinux) BSS and page table + * | |/ + * +----+ PAGE_OFFSET + * | | \ + * | | |-> Loadable kernel modules virtual address space area + * | |/ + * +----+ MODULES_VADDR = KASAN_SHADOW_END + * | | \ + * | | |-> The shadow area of kernel virtual address. + * | |/ + * +----+-> TASK_SIZE (start of kernel space) = KASAN_SHADOW_START the + * | |\ shadow address of MODULES_VADDR + * | | | + * | | | + * | | |-> The user space area in lowmem. The kernel address + * | | | sanitizer do not use this space, nor does it map it. + * | | | + * | | | + * | | | + * | | | + * | |/ + * ------ 0 + * + * 1) KASAN_SHADOW_START + * This value begins with the MODULE_VADDR's shadow address. It is the + * start of kernel virtual space. Since we have modules to load, we need + * to cover also that area with shadow memory so we can find memory + * bugs in modules. + * + * 2) KASAN_SHADOW_END + * This value is the 0x100000000's shadow address: the mapping that would + * be after the end of the kernel memory at 0xffffffff. It is the end of + * kernel address sanitizer shadow area. It is also the start of the + * module area. + * + * 3) KASAN_SHADOW_OFFSET: + * This value is used to map an address to the corresponding shadow + * address by the following formula: + * + * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET; + * + * As you would expect, >> 3 is equal to dividing by 8, meaning each + * byte in the shadow memory covers 8 bytes of kernel memory, so one + * bit shadow memory per byte of kernel memory is used. + * + * The KASAN_SHADOW_OFFSET is provided in a Kconfig option depending + * on the VMSPLIT layout of the system: the kernel and userspace can + * split up lowmem in different ways according to needs, so we calculate + * the shadow offset depending on this. + */ + +#define KASAN_SHADOW_SCALE_SHIFT 3 +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) +#define KASAN_SHADOW_END ((UL(1) << (32 - KASAN_SHADOW_SCALE_SHIFT)) \ + + KASAN_SHADOW_OFFSET) +#define KASAN_SHADOW_START ((KASAN_SHADOW_END >> 3) + KASAN_SHADOW_OFFSET) + +#endif +#endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index bb79e52aeb90..598dbdca2017 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -18,6 +18,7 @@ #ifdef CONFIG_NEED_MACH_MEMORY_H #include #endif +#include /* PAGE_OFFSET - the virtual address of the start of the kernel image */ #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) @@ -28,7 +29,11 @@ * TASK_SIZE - the maximum size of a user space task. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area */ +#ifndef CONFIG_KASAN #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) +#else +#define TASK_SIZE (KASAN_SHADOW_START) +#endif #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) /* diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h index 907571fd05c6..e6eb7a2aaf1e 100644 --- a/arch/arm/include/asm/uaccess-asm.h +++ b/arch/arm/include/asm/uaccess-asm.h @@ -85,7 +85,7 @@ */ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable ldr \tmp1, [\tsk, #TI_ADDR_LIMIT] - mov \tmp2, #TASK_SIZE + ldr \tmp2, =TASK_SIZE str \tmp2, [\tsk, #TI_ADDR_LIMIT] DACR( mrc p15, 0, \tmp0, c3, c0, 0) DACR( str \tmp0, [sp, #SVC_DACR]) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 55a47df04773..c4220f51fcf3 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -427,7 +427,8 @@ ENDPROC(__fiq_abt) @ if it was interrupted in a critical region. Here we @ perform a quick test inline since it should be false @ 99.9999% of the time. The rest is done out of line. - cmp r4, #TASK_SIZE + ldr r0, =TASK_SIZE + cmp r4, r0 blhs kuser_cmpxchg64_fixup #endif #endif diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 271cb8a1eba1..fee279e28a72 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -50,7 +50,8 @@ __ret_fast_syscall: UNWIND(.cantunwind ) disable_irq_notrace @ disable interrupts ldr r2, [tsk, #TI_ADDR_LIMIT] - cmp r2, #TASK_SIZE + ldr r1, =TASK_SIZE + cmp r2, r1 blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK @@ -87,7 +88,8 @@ __ret_fast_syscall: #endif disable_irq_notrace @ disable interrupts ldr r2, [tsk, #TI_ADDR_LIMIT] - cmp r2, #TASK_SIZE + ldr r1, =TASK_SIZE + cmp r2, r1 blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK @@ -128,7 +130,8 @@ ret_slow_syscall: disable_irq_notrace @ disable interrupts ENTRY(ret_to_user_from_irq) ldr r2, [tsk, #TI_ADDR_LIMIT] - cmp r2, #TASK_SIZE + ldr r1, =TASK_SIZE + cmp r2, r1 blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index fa259825310c..c06ebfbc48c4 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -1255,8 +1256,25 @@ static inline void prepare_page_table(void) /* * Clear out all the mappings below the kernel image. */ +#ifdef CONFIG_KASAN + /* + * KASan's shadow memory inserts itself between the TASK_SIZE + * and MODULES_VADDR. Do not clear the KASan shadow memory mappings. + */ + for (addr = 0; addr < KASAN_SHADOW_START; addr += PMD_SIZE) + pmd_clear(pmd_off_k(addr)); + /* + * Skip over the KASan shadow area. KASAN_SHADOW_END is sometimes + * equal to MODULES_VADDR and then we exit the pmd clearing. If we + * are using a thumb-compiled kernel, there there will be 8MB more + * to clear as KASan always offset to 16 MB below MODULES_VADDR. + */ + for (addr = KASAN_SHADOW_END; addr < MODULES_VADDR; addr += PMD_SIZE) + pmd_clear(pmd_off_k(addr)); +#else for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); +#endif #ifdef CONFIG_XIP_KERNEL /* The XIP kernel is mapped in the module area -- skip over it */ -- cgit v1.2.3 From 5615f69bc2097452ecc954f5264d784e158d6801 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 25 Oct 2020 23:55:16 +0100 Subject: ARM: 9016/2: Initialize the mapping of KASan shadow memory This patch initializes KASan shadow region's page table and memory. There are two stage for KASan initializing: 1. At early boot stage the whole shadow region is mapped to just one physical page (kasan_zero_page). It is finished by the function kasan_early_init which is called by __mmap_switched(arch/arm/kernel/ head-common.S) 2. After the calling of paging_init, we use kasan_zero_page as zero shadow for some memory that KASan does not need to track, and we allocate a new shadow space for the other memory that KASan need to track. These issues are finished by the function kasan_init which is call by setup_arch. When using KASan we also need to increase the THREAD_SIZE_ORDER from 1 to 2 as the extra calls for shadow memory uses quite a bit of stack. As we need to make a temporary copy of the PGD when setting up shadow memory we create a helpful PGD_SIZE definition for both LPAE and non-LPAE setups. The KASan core code unconditionally calls pud_populate() so this needs to be changed from BUG() to do {} while (0) when building with KASan enabled. After the initial development by Andre Ryabinin several modifications have been made to this code: Abbott Liu - Add support ARM LPAE: If LPAE is enabled, KASan shadow region's mapping table need be copied in the pgd_alloc() function. - Change kasan_pte_populate,kasan_pmd_populate,kasan_pud_populate, kasan_pgd_populate from .meminit.text section to .init.text section. Reported by Florian Fainelli Linus Walleij : - Drop the custom mainpulation of TTBR0 and just use cpu_switch_mm() to switch the pgd table. - Adopt to handle 4th level page tabel folding. - Rewrite the entire page directory and page entry initialization sequence to be recursive based on ARM64:s kasan_init.c. Ard Biesheuvel : - Necessary underlying fixes. - Crucial bug fixes to the memory set-up code. Co-developed-by: Andrey Ryabinin Co-developed-by: Abbott Liu Co-developed-by: Ard Biesheuvel Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: kasan-dev@googlegroups.com Cc: Mike Rapoport Acked-by: Mike Rapoport Reviewed-by: Ard Biesheuvel Tested-by: Ard Biesheuvel # QEMU/KVM/mach-virt/LPAE/8G Tested-by: Florian Fainelli # Brahma SoCs Tested-by: Ahmad Fatoum # i.MX6Q Reported-by: Russell King - ARM Linux Reported-by: Florian Fainelli Signed-off-by: Andrey Ryabinin Signed-off-by: Abbott Liu Signed-off-by: Florian Fainelli Signed-off-by: Ard Biesheuvel Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/include/asm/kasan.h | 33 +++++ arch/arm/include/asm/pgalloc.h | 8 +- arch/arm/include/asm/thread_info.h | 8 + arch/arm/kernel/head-common.S | 3 + arch/arm/kernel/setup.c | 2 + arch/arm/mm/Makefile | 3 + arch/arm/mm/kasan_init.c | 291 +++++++++++++++++++++++++++++++++++++ arch/arm/mm/pgd.c | 16 +- 8 files changed, 362 insertions(+), 2 deletions(-) create mode 100644 arch/arm/include/asm/kasan.h create mode 100644 arch/arm/mm/kasan_init.c (limited to 'arch') diff --git a/arch/arm/include/asm/kasan.h b/arch/arm/include/asm/kasan.h new file mode 100644 index 000000000000..303c35df3135 --- /dev/null +++ b/arch/arm/include/asm/kasan.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/arm/include/asm/kasan.h + * + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Author: Andrey Ryabinin + * + */ + +#ifndef __ASM_KASAN_H +#define __ASM_KASAN_H + +#ifdef CONFIG_KASAN + +#include + +#define KASAN_SHADOW_SCALE_SHIFT 3 + +/* + * The compiler uses a shadow offset assuming that addresses start + * from 0. Kernel addresses don't start from 0, so shadow + * for kernel really starts from 'compiler's shadow offset' + + * ('kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT) + */ + +asmlinkage void kasan_early_init(void); +extern void kasan_init(void); + +#else +static inline void kasan_init(void) { } +#endif + +#endif diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 15f4674715f8..fdee1f04f4f3 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -21,6 +21,7 @@ #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) #ifdef CONFIG_ARM_LPAE +#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { @@ -28,14 +29,19 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) } #else /* !CONFIG_ARM_LPAE */ +#define PGD_SIZE (PAGE_SIZE << 2) /* * Since we have only two-level page tables, these are trivial */ #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(mm, pmd) do { } while (0) +#ifdef CONFIG_KASAN +/* The KASan core unconditionally calls pud_populate() on all architectures */ +#define pud_populate(mm,pmd,pte) do { } while (0) +#else #define pud_populate(mm,pmd,pte) BUG() - +#endif #endif /* CONFIG_ARM_LPAE */ extern pgd_t *pgd_alloc(struct mm_struct *mm); diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 536b6b979f63..56fae7861fd3 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -13,7 +13,15 @@ #include #include +#ifdef CONFIG_KASAN +/* + * KASan uses a lot of extra stack space so the thread size order needs to + * be increased. + */ +#define THREAD_SIZE_ORDER 2 +#else #define THREAD_SIZE_ORDER 1 +#endif #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_START_SP (THREAD_SIZE - 8) diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 6840c7c60a85..89c80154b9ef 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -111,6 +111,9 @@ __mmap_switched: str r8, [r2] @ Save atags pointer cmp r3, #0 strne r10, [r3] @ Save control register values +#ifdef CONFIG_KASAN + bl kasan_early_init +#endif mov lr, #0 b start_kernel ENDPROC(__mmap_switched) diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 694aa6b4bd03..f4bd8b654255 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -59,6 +59,7 @@ #include #include #include +#include #include "atags.h" @@ -1145,6 +1146,7 @@ void __init setup_arch(char **cmdline_p) early_ioremap_reset(); paging_init(mdesc); + kasan_init(); request_standard_resources(mdesc); if (mdesc->restart) diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 99699c32d8a5..4536159bc8fa 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -113,3 +113,6 @@ obj-$(CONFIG_CACHE_L2X0_PMU) += cache-l2x0-pmu.o obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o obj-$(CONFIG_CACHE_UNIPHIER) += cache-uniphier.o + +KASAN_SANITIZE_kasan_init.o := n +obj-$(CONFIG_KASAN) += kasan_init.o diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c new file mode 100644 index 000000000000..9c348042a724 --- /dev/null +++ b/arch/arm/mm/kasan_init.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This file contains kasan initialization code for ARM. + * + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * Author: Andrey Ryabinin + * Author: Linus Walleij + */ + +#define pr_fmt(fmt) "kasan: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mm.h" + +static pgd_t tmp_pgd_table[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); + +pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss; + +static __init void *kasan_alloc_block(size_t size) +{ + return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), + MEMBLOCK_ALLOC_KASAN, NUMA_NO_NODE); +} + +static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, + unsigned long end, bool early) +{ + unsigned long next; + pte_t *ptep = pte_offset_kernel(pmdp, addr); + + do { + pte_t entry; + void *p; + + next = addr + PAGE_SIZE; + + if (!early) { + if (!pte_none(READ_ONCE(*ptep))) + continue; + + p = kasan_alloc_block(PAGE_SIZE); + if (!p) { + panic("%s failed to allocate shadow page for address 0x%lx\n", + __func__, addr); + return; + } + memset(p, KASAN_SHADOW_INIT, PAGE_SIZE); + entry = pfn_pte(virt_to_pfn(p), + __pgprot(pgprot_val(PAGE_KERNEL))); + } else if (pte_none(READ_ONCE(*ptep))) { + /* + * The early shadow memory is mapping all KASan + * operations to one and the same page in memory, + * "kasan_early_shadow_page" so that the instrumentation + * will work on a scratch area until we can set up the + * proper KASan shadow memory. + */ + entry = pfn_pte(virt_to_pfn(kasan_early_shadow_page), + __pgprot(_L_PTE_DEFAULT | L_PTE_DIRTY | L_PTE_XN)); + } else { + /* + * Early shadow mappings are PMD_SIZE aligned, so if the + * first entry is already set, they must all be set. + */ + return; + } + + set_pte_at(&init_mm, addr, ptep, entry); + } while (ptep++, addr = next, addr != end); +} + +/* + * The pmd (page middle directory) is only used on LPAE + */ +static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, + unsigned long end, bool early) +{ + unsigned long next; + pmd_t *pmdp = pmd_offset(pudp, addr); + + do { + if (pmd_none(*pmdp)) { + /* + * We attempt to allocate a shadow block for the PMDs + * used by the PTEs for this address if it isn't already + * allocated. + */ + void *p = early ? kasan_early_shadow_pte : + kasan_alloc_block(PAGE_SIZE); + + if (!p) { + panic("%s failed to allocate shadow block for address 0x%lx\n", + __func__, addr); + return; + } + pmd_populate_kernel(&init_mm, pmdp, p); + flush_pmd_entry(pmdp); + } + + next = pmd_addr_end(addr, end); + kasan_pte_populate(pmdp, addr, next, early); + } while (pmdp++, addr = next, addr != end); +} + +static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, + bool early) +{ + unsigned long next; + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + + pgdp = pgd_offset_k(addr); + + do { + /* + * Allocate and populate the shadow block of p4d folded into + * pud folded into pmd if it doesn't already exist + */ + if (!early && pgd_none(*pgdp)) { + void *p = kasan_alloc_block(PAGE_SIZE); + + if (!p) { + panic("%s failed to allocate shadow block for address 0x%lx\n", + __func__, addr); + return; + } + pgd_populate(&init_mm, pgdp, p); + } + + next = pgd_addr_end(addr, end); + /* + * We just immediately jump over the p4d and pud page + * directories since we believe ARM32 will never gain four + * nor five level page tables. + */ + p4dp = p4d_offset(pgdp, addr); + pudp = pud_offset(p4dp, addr); + + kasan_pmd_populate(pudp, addr, next, early); + } while (pgdp++, addr = next, addr != end); +} + +extern struct proc_info_list *lookup_processor_type(unsigned int); + +void __init kasan_early_init(void) +{ + struct proc_info_list *list; + + /* + * locate processor in the list of supported processor + * types. The linker builds this table for us from the + * entries in arch/arm/mm/proc-*.S + */ + list = lookup_processor_type(read_cpuid_id()); + if (list) { +#ifdef MULTI_CPU + processor = *list->proc; +#endif + } + + BUILD_BUG_ON((KASAN_SHADOW_END - (1UL << 29)) != KASAN_SHADOW_OFFSET); + /* + * We walk the page table and set all of the shadow memory to point + * to the scratch page. + */ + kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, true); +} + +static void __init clear_pgds(unsigned long start, + unsigned long end) +{ + for (; start && start < end; start += PMD_SIZE) + pmd_clear(pmd_off_k(start)); +} + +static int __init create_mapping(void *start, void *end) +{ + void *shadow_start, *shadow_end; + + shadow_start = kasan_mem_to_shadow(start); + shadow_end = kasan_mem_to_shadow(end); + + pr_info("Mapping kernel virtual memory block: %px-%px at shadow: %px-%px\n", + start, end, shadow_start, shadow_end); + + kasan_pgd_populate((unsigned long)shadow_start & PAGE_MASK, + PAGE_ALIGN((unsigned long)shadow_end), false); + return 0; +} + +void __init kasan_init(void) +{ + phys_addr_t pa_start, pa_end; + u64 i; + + /* + * We are going to perform proper setup of shadow memory. + * + * At first we should unmap early shadow (clear_pgds() call bellow). + * However, instrumented code can't execute without shadow memory. + * + * To keep the early shadow memory MMU tables around while setting up + * the proper shadow memory, we copy swapper_pg_dir (the initial page + * table) to tmp_pgd_table and use that to keep the early shadow memory + * mapped until the full shadow setup is finished. Then we swap back + * to the proper swapper_pg_dir. + */ + + memcpy(tmp_pgd_table, swapper_pg_dir, sizeof(tmp_pgd_table)); +#ifdef CONFIG_ARM_LPAE + /* We need to be in the same PGD or this won't work */ + BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) != + pgd_index(KASAN_SHADOW_END)); + memcpy(tmp_pmd_table, + pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), + sizeof(tmp_pmd_table)); + set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)], + __pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); +#endif + cpu_switch_mm(tmp_pgd_table, &init_mm); + local_flush_tlb_all(); + + clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); + + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), + kasan_mem_to_shadow((void *)-1UL) + 1); + + for_each_mem_range(i, &pa_start, &pa_end) { + void *start = __va(pa_start); + void *end = __va(pa_end); + + /* Do not attempt to shadow highmem */ + if (pa_start >= arm_lowmem_limit) { + pr_info("Skip highmem block at %pa-%pa\n", &pa_start, &pa_end); + continue; + } + if (pa_end > arm_lowmem_limit) { + pr_info("Truncating shadow for memory block at %pa-%pa to lowmem region at %pa\n", + &pa_start, &pa_end, &arm_lowmem_limit); + end = __va(arm_lowmem_limit); + } + if (start >= end) { + pr_info("Skipping invalid memory block %pa-%pa (virtual %p-%p)\n", + &pa_start, &pa_end, start, end); + continue; + } + + create_mapping(start, end); + } + + /* + * 1. The module global variables are in MODULES_VADDR ~ MODULES_END, + * so we need to map this area. + * 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR + * ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't + * use kasan_populate_zero_shadow. + */ + create_mapping((void *)MODULES_VADDR, (void *)(PKMAP_BASE + PMD_SIZE)); + + /* + * KAsan may reuse the contents of kasan_early_shadow_pte directly, so + * we should make sure that it maps the zero page read-only. + */ + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE, + &kasan_early_shadow_pte[i], + pfn_pte(virt_to_pfn(kasan_early_shadow_page), + __pgprot(pgprot_val(PAGE_KERNEL) + | L_PTE_RDONLY))); + + cpu_switch_mm(swapper_pg_dir, &init_mm); + local_flush_tlb_all(); + + memset(kasan_early_shadow_page, 0, PAGE_SIZE); + pr_info("Kernel address sanitizer initialized\n"); + init_task.kasan_depth = 0; +} diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index c5e1b27046a8..f8e9bc58a84f 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -66,7 +66,21 @@ pgd_t *pgd_alloc(struct mm_struct *mm) new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; -#endif +#ifdef CONFIG_KASAN + /* + * Copy PMD table for KASAN shadow mappings. + */ + init_pgd = pgd_offset_k(TASK_SIZE); + init_p4d = p4d_offset(init_pgd, TASK_SIZE); + init_pud = pud_offset(init_p4d, TASK_SIZE); + init_pmd = pmd_offset(init_pud, TASK_SIZE); + new_pmd = pmd_offset(new_pud, TASK_SIZE); + memcpy(new_pmd, init_pmd, + (pmd_index(MODULES_VADDR) - pmd_index(TASK_SIZE)) + * sizeof(pmd_t)); + clean_dcache_area(new_pmd, PTRS_PER_PMD * sizeof(pmd_t)); +#endif /* CONFIG_KASAN */ +#endif /* CONFIG_LPAE */ if (!vectors_high()) { /* -- cgit v1.2.3 From 421015713b306e47af95d4d61cdfbd96d462e4cb Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 25 Oct 2020 23:56:18 +0100 Subject: ARM: 9017/2: Enable KASan for ARM This patch enables the kernel address sanitizer for ARM. XIP_KERNEL has not been tested and is therefore not allowed for now. Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: kasan-dev@googlegroups.com Acked-by: Dmitry Vyukov Reviewed-by: Ard Biesheuvel Tested-by: Ard Biesheuvel # QEMU/KVM/mach-virt/LPAE/8G Tested-by: Florian Fainelli # Brahma SoCs Tested-by: Ahmad Fatoum # i.MX6Q Signed-off-by: Abbott Liu Signed-off-by: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 194032568b7a..2c5eb5c2b310 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -67,6 +67,7 @@ config ARM select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU + select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_SECCOMP select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT -- cgit v1.2.3 From fc2933c133744305236793025b00c2f7d258b687 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 28 Oct 2020 14:20:55 +0100 Subject: ARM: 9020/1: mm: use correct section size macro to describe the FDT virtual address Commit 149a3ffe62b9dbc3 ("9012/1: move device tree mapping out of linear region") created a permanent, read-only section mapping of the device tree blob provided by the firmware, and added a set of macros to get the base and size of the virtually mapped FDT based on the physical address. However, while the mapping code uses the SECTION_SIZE macro correctly, the macros use PMD_SIZE instead, which means something entirely different on ARM when using short descriptors, and is therefore not the right quantity to use here. So replace PMD_SIZE with SECTION_SIZE. While at it, change the names of the macro and its parameter to clarify that it returns the virtual address of the start of the FDT, based on the physical address in memory. Tested-by: Joel Stanley Tested-by: Marek Szyprowski Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 6 +++--- arch/arm/kernel/setup.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 598dbdca2017..38a163f50130 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -73,8 +73,8 @@ #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) #define FDT_FIXED_BASE UL(0xff800000) -#define FDT_FIXED_SIZE (2 * PMD_SIZE) -#define FDT_VIRT_ADDR(physaddr) ((void *)(FDT_FIXED_BASE | (physaddr) % PMD_SIZE)) +#define FDT_FIXED_SIZE (2 * SECTION_SIZE) +#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE)) #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) /* @@ -116,7 +116,7 @@ extern unsigned long vectors_base; #define MODULES_VADDR PAGE_OFFSET #define XIP_VIRT_ADDR(physaddr) (physaddr) -#define FDT_VIRT_ADDR(physaddr) ((void *)(physaddr)) +#define FDT_VIRT_BASE(physbase) ((void *)(physbase)) #endif /* !CONFIG_MMU */ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index f4bd8b654255..d32f7652a5bf 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -1087,7 +1087,7 @@ void __init setup_arch(char **cmdline_p) void *atags_vaddr = NULL; if (__atags_pointer) - atags_vaddr = FDT_VIRT_ADDR(__atags_pointer); + atags_vaddr = FDT_VIRT_BASE(__atags_pointer); setup_processor()