From 3b1a4a8640876a966ab68ab4f561642e19674671 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:00:14 -0800 Subject: kasan: group vmalloc code This is a preparatory commit for the upcoming addition of a new hardware tag-based (MTE-based) KASAN mode. Group all vmalloc-related function declarations in include/linux/kasan.h, and their implementations in mm/kasan/common.c. No functional changes. Link: https://lkml.kernel.org/r/80a6fdd29b039962843bd6cf22ce2643a7c8904e.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Reviewed-by: Marco Elver Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 30d343b4a40a..59538e795df4 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -75,19 +75,6 @@ struct kasan_cache { int free_meta_offset; }; -/* - * These functions provide a special case to support backing module - * allocations with real shadow memory. With KASAN vmalloc, the special - * case is unnecessary, as the work is handled in the generic case. - */ -#ifndef CONFIG_KASAN_VMALLOC -int kasan_module_alloc(void *addr, size_t size); -void kasan_free_shadow(const struct vm_struct *vm); -#else -static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } -static inline void kasan_free_shadow(const struct vm_struct *vm) {} -#endif - int kasan_add_zero_shadow(void *start, unsigned long size); void kasan_remove_zero_shadow(void *start, unsigned long size); @@ -156,9 +143,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, return false; } -static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } -static inline void kasan_free_shadow(const struct vm_struct *vm) {} - static inline int kasan_add_zero_shadow(void *start, unsigned long size) { return 0; @@ -211,13 +195,16 @@ static inline void *kasan_reset_tag(const void *addr) #endif /* CONFIG_KASAN_SW_TAGS */ #ifdef CONFIG_KASAN_VMALLOC + int kasan_populate_vmalloc(unsigned long addr, unsigned long size); void kasan_poison_vmalloc(const void *start, unsigned long size); void kasan_unpoison_vmalloc(const void *start, unsigned long size); void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end); -#else + +#else /* CONFIG_KASAN_VMALLOC */ + static inline int kasan_populate_vmalloc(unsigned long start, unsigned long size) { @@ -232,7 +219,25 @@ static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end) {} -#endif + +#endif /* CONFIG_KASAN_VMALLOC */ + +#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) + +/* + * These functions provide a special case to support backing module + * allocations with real shadow memory. With KASAN vmalloc, the special + * case is unnecessary, as the work is handled in the generic case. + */ +int kasan_module_alloc(void *addr, size_t size); +void kasan_free_shadow(const struct vm_struct *vm); + +#else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ + +static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } +static inline void kasan_free_shadow(const struct vm_struct *vm) {} + +#endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ #ifdef CONFIG_KASAN_INLINE void kasan_non_canonical_hook(unsigned long addr); -- cgit v1.2.3 From d5750edf6da759576f91ec2b57d5553985815b40 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:00:17 -0800 Subject: kasan: shadow declarations only for software modes This is a preparatory commit for the upcoming addition of a new hardware tag-based (MTE-based) KASAN mode. Group shadow-related KASAN function declarations and only define them for the two existing software modes. No functional changes for software modes. Link: https://lkml.kernel.org/r/35126.1606402815@turing-police Link: https://lore.kernel.org/linux-arm-kernel/24105.1606397102@turing-police/ Link: https://lkml.kernel.org/r/e88d94eff94db883a65dca52e1736d80d28dd9bc.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Signed-off-by: Valdis Kletnieks Reviewed-by: Marco Elver Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon [valdis.kletnieks@vt.edu: fix build issue with asmlinkage] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 48 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 59538e795df4..7828436a3a99 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -11,7 +11,7 @@ struct task_struct; #ifdef CONFIG_KASAN -#include +#include #include /* kasan_data struct is used in KUnit tests for KASAN expected failures */ @@ -20,6 +20,20 @@ struct kunit_kasan_expectation { bool report_found; }; +#endif + +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + +#include + +/* Software KASAN implementations use shadow memory. */ + +#ifdef CONFIG_KASAN_SW_TAGS +#define KASAN_SHADOW_INIT 0xFF +#else +#define KASAN_SHADOW_INIT 0 +#endif + extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; @@ -35,6 +49,23 @@ static inline void *kasan_mem_to_shadow(const void *addr) + KASAN_SHADOW_OFFSET; } +int kasan_add_zero_shadow(void *start, unsigned long size); +void kasan_remove_zero_shadow(void *start, unsigned long size); + +#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + +static inline int kasan_add_zero_shadow(void *start, unsigned long size) +{ + return 0; +} +static inline void kasan_remove_zero_shadow(void *start, + unsigned long size) +{} + +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + +#ifdef CONFIG_KASAN + /* Enable reporting bugs after kasan_disable_current() */ extern void kasan_enable_current(void); @@ -75,9 +106,6 @@ struct kasan_cache { int free_meta_offset; }; -int kasan_add_zero_shadow(void *start, unsigned long size); -void kasan_remove_zero_shadow(void *start, unsigned long size); - size_t __ksize(const void *); static inline void kasan_unpoison_slab(const void *ptr) { @@ -143,14 +171,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, return false; } -static inline int kasan_add_zero_shadow(void *start, unsigned long size) -{ - return 0; -} -static inline void kasan_remove_zero_shadow(void *start, - unsigned long size) -{} - static inline void kasan_unpoison_slab(const void *ptr) { } static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } @@ -158,8 +178,6 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } #ifdef CONFIG_KASAN_GENERIC -#define KASAN_SHADOW_INIT 0 - void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_record_aux_stack(void *ptr); @@ -174,8 +192,6 @@ static inline void kasan_record_aux_stack(void *ptr) {} #ifdef CONFIG_KASAN_SW_TAGS -#define KASAN_SHADOW_INIT 0xFF - void kasan_init_tags(void); void *kasan_reset_tag(const void *addr); -- cgit v1.2.3 From cebd0eb29acdfc2f5e44e5f356ffcd0c44f16b4a Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:00:21 -0800 Subject: kasan: rename (un)poison_shadow to (un)poison_range This is a preparatory commit for the upcoming addition of a new hardware tag-based (MTE-based) KASAN mode. The new mode won't be using shadow memory. Rename external annotation kasan_unpoison_shadow() to kasan_unpoison_range(), and introduce internal functions (un)poison_range() (without kasan_ prefix). Co-developed-by: Marco Elver Link: https://lkml.kernel.org/r/fccdcaa13dc6b2211bf363d6c6d499279a54fe3a.1606161801.git.andreyknvl@google.com Signed-off-by: Marco Elver Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 7828436a3a99..9740c06a04a1 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -72,7 +72,7 @@ extern void kasan_enable_current(void); /* Disable reporting bugs for current task */ extern void kasan_disable_current(void); -void kasan_unpoison_shadow(const void *address, size_t size); +void kasan_unpoison_range(const void *address, size_t size); void kasan_unpoison_task_stack(struct task_struct *task); @@ -109,7 +109,7 @@ struct kasan_cache { size_t __ksize(const void *); static inline void kasan_unpoison_slab(const void *ptr) { - kasan_unpoison_shadow(ptr, __ksize(ptr)); + kasan_unpoison_range(ptr, __ksize(ptr)); } size_t kasan_metadata_size(struct kmem_cache *cache); @@ -118,7 +118,7 @@ void kasan_restore_multi_shot(bool enabled); #else /* CONFIG_KASAN */ -static inline void kasan_unpoison_shadow(const void *address, size_t size) {} +static inline void kasan_unpoison_range(const void *address, size_t size) {} static inline void kasan_unpoison_task_stack(struct task_struct *task) {} -- cgit v1.2.3 From d73b49365ee65ac48074bdb5aa717bb4644dbbb7 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:00:56 -0800 Subject: kasan, arm64: only use kasan_depth for software modes This is a preparatory commit for the upcoming addition of a new hardware tag-based (MTE-based) KASAN mode. Hardware tag-based KASAN won't use kasan_depth. Only define and use it when one of the software KASAN modes are enabled. No functional changes for software modes. Link: https://lkml.kernel.org/r/e16f15aeda90bc7fb4dfc2e243a14b74cc5c8219.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Reviewed-by: Catalin Marinas Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Marco Elver Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 18 +++++++++--------- include/linux/sched.h | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 9740c06a04a1..b272960e8396 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -52,6 +52,12 @@ static inline void *kasan_mem_to_shadow(const void *addr) int kasan_add_zero_shadow(void *start, unsigned long size); void kasan_remove_zero_shadow(void *start, unsigned long size); +/* Enable reporting bugs after kasan_disable_current() */ +extern void kasan_enable_current(void); + +/* Disable reporting bugs for current task */ +extern void kasan_disable_current(void); + #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ static inline int kasan_add_zero_shadow(void *start, unsigned long size) @@ -62,16 +68,13 @@ static inline void kasan_remove_zero_shadow(void *start, unsigned long size) {} +static inline void kasan_enable_current(void) {} +static inline void kasan_disable_current(void) {} + #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ #ifdef CONFIG_KASAN -/* Enable reporting bugs after kasan_disable_current() */ -extern void kasan_enable_current(void); - -/* Disable reporting bugs for current task */ -extern void kasan_disable_current(void); - void kasan_unpoison_range(const void *address, size_t size); void kasan_unpoison_task_stack(struct task_struct *task); @@ -122,9 +125,6 @@ static inline void kasan_unpoison_range(const void *address, size_t size) {} static inline void kasan_unpoison_task_stack(struct task_struct *task) {} -static inline void kasan_enable_current(void) {} -static inline void kasan_disable_current(void) {} - static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} diff --git a/include/linux/sched.h b/include/linux/sched.h index 51d535b69bd6..6e3a5eeec509 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1234,7 +1234,7 @@ struct task_struct { u64 timer_slack_ns; u64 default_timer_slack_ns; -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) unsigned int kasan_depth; #endif -- cgit v1.2.3 From 60a3a5fe950f4e6c02e9fc6676dc96de043ed743 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:01:03 -0800 Subject: kasan, arm64: rename kasan_init_tags and mark as __init Rename kasan_init_tags() to kasan_init_sw_tags() as the upcoming hardware tag-based KASAN mode will have its own initialization routine. Also similarly to kasan_init() mark kasan_init_tags() as __init. Link: https://lkml.kernel.org/r/71e52af72a09f4b50c8042f16101c60e50649fbb.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Catalin Marinas Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Marco Elver Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b272960e8396..d7042d129dc1 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -192,7 +192,7 @@ static inline void kasan_record_aux_stack(void *ptr) {} #ifdef CONFIG_KASAN_SW_TAGS -void kasan_init_tags(void); +void __init kasan_init_sw_tags(void); void *kasan_reset_tag(const void *addr); @@ -201,7 +201,7 @@ bool kasan_report(unsigned long addr, size_t size, #else /* CONFIG_KASAN_SW_TAGS */ -static inline void kasan_init_tags(void) { } +static inline void kasan_init_sw_tags(void) { } static inline void *kasan_reset_tag(const void *addr) { -- cgit v1.2.3 From 0fea6e9af889f1a4e072f5de999e07fe6859fc88 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:02:06 -0800 Subject: kasan, arm64: expand CONFIG_KASAN checks Some #ifdef CONFIG_KASAN checks are only relevant for software KASAN modes (either related to shadow memory or compiler instrumentation). Expand those into CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS. Link: https://lkml.kernel.org/r/e6971e432dbd72bb897ff14134ebb7e169bdcf0c.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Reviewed-by: Catalin Marinas Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Marco Elver Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan-checks.h | 2 +- include/linux/kasan.h | 7 ++++--- include/linux/moduleloader.h | 3 ++- include/linux/string.h | 2 +- 4 files changed, 8 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h index ac6aba632f2d..ca5e89fb10d3 100644 --- a/include/linux/kasan-checks.h +++ b/include/linux/kasan-checks.h @@ -9,7 +9,7 @@ * even in compilation units that selectively disable KASAN, but must use KASAN * to validate access to an address. Never use these in header files! */ -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) bool __kasan_check_read(const volatile void *p, unsigned int size); bool __kasan_check_write(const volatile void *p, unsigned int size); #else diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d7042d129dc1..b1381ee6922a 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -238,7 +238,8 @@ static inline void kasan_release_vmalloc(unsigned long start, #endif /* CONFIG_KASAN_VMALLOC */ -#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) +#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ + !defined(CONFIG_KASAN_VMALLOC) /* * These functions provide a special case to support backing module @@ -248,12 +249,12 @@ static inline void kasan_release_vmalloc(unsigned long start, int kasan_module_alloc(void *addr, size_t size); void kasan_free_shadow(const struct vm_struct *vm); -#else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ +#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } static inline void kasan_free_shadow(const struct vm_struct *vm) {} -#endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ +#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ #ifdef CONFIG_KASAN_INLINE void kasan_non_canonical_hook(unsigned long addr); diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index 4fa67a8b2265..9e09d11ffe5b 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -96,7 +96,8 @@ void module_arch_cleanup(struct module *mod); /* Any cleanup before freeing mod->module_init */ void module_arch_freeing_init(struct module *mod); -#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) +#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ + !defined(CONFIG_KASAN_VMALLOC) #include #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) #else diff --git a/include/linux/string.h b/include/linux/string.h index 1cd63a8a23ab..4fcfb56abcf5 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -267,7 +267,7 @@ void __write_overflow(void) __compiletime_error("detected write beyond size of o #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); -- cgit v1.2.3 From 2e903b91479782b7dedd869603423d77e079d3de Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:02:10 -0800 Subject: kasan, arm64: implement HW_TAGS runtime Provide implementation of KASAN functions required for the hardware tag-based mode. Those include core functions for memory and pointer tagging (tags_hw.c) and bug reporting (report_tags_hw.c). Also adapt common KASAN code to support the new mode. Link: https://lkml.kernel.org/r/cfd0fbede579a6b66755c98c88c108e54f9c56bf.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Acked-by: Catalin Marinas Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Marco Elver Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 24 +++++++++++++++++------- include/linux/mm.h | 2 +- include/linux/page-flags-layout.h | 2 +- 3 files changed, 19 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b1381ee6922a..d22ec4c9c1bd 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -190,25 +190,35 @@ static inline void kasan_record_aux_stack(void *ptr) {} #endif /* CONFIG_KASAN_GENERIC */ -#ifdef CONFIG_KASAN_SW_TAGS - -void __init kasan_init_sw_tags(void); +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) void *kasan_reset_tag(const void *addr); bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); -#else /* CONFIG_KASAN_SW_TAGS */ - -static inline void kasan_init_sw_tags(void) { } +#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ static inline void *kasan_reset_tag(const void *addr) { return (void *)addr; } -#endif /* CONFIG_KASAN_SW_TAGS */ +#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ + +#ifdef CONFIG_KASAN_SW_TAGS +void __init kasan_init_sw_tags(void); +#else +static inline void kasan_init_sw_tags(void) { } +#endif + +#ifdef CONFIG_KASAN_HW_TAGS +void kasan_init_hw_tags_cpu(void); +void __init kasan_init_hw_tags(void); +#else +static inline void kasan_init_hw_tags_cpu(void) { } +static inline void kasan_init_hw_tags(void) { } +#endif #ifdef CONFIG_KASAN_VMALLOC diff --git a/include/linux/mm.h b/include/linux/mm.h index 362579ad0758..024ec0a00c72 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1421,7 +1421,7 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) } #endif /* CONFIG_NUMA_BALANCING */ -#ifdef CONFIG_KASAN_SW_TAGS +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) static inline u8 page_kasan_tag(const struct page *page) { return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index e200eef6a7fd..7d4ec26d8a3e 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -77,7 +77,7 @@ #define LAST_CPUPID_SHIFT 0 #endif -#ifdef CONFIG_KASAN_SW_TAGS +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) #define KASAN_TAG_WIDTH 8 #else #define KASAN_TAG_WIDTH 0 -- cgit v1.2.3 From d56a9ef84bd0e1e8fba7a837ab12a4ec8476579f Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:02:42 -0800 Subject: kasan, arm64: unpoison stack only with CONFIG_KASAN_STACK There's a config option CONFIG_KASAN_STACK that has to be enabled for KASAN to use stack instrumentation and perform validity checks for stack variables. There's no need to unpoison stack when CONFIG_KASAN_STACK is not enabled. Only call kasan_unpoison_task_stack[_below]() when CONFIG_KASAN_STACK is enabled. Note, that CONFIG_KASAN_STACK is an option that is currently always defined when CONFIG_KASAN is enabled, and therefore has to be tested with #if instead of #ifdef. Link: https://lkml.kernel.org/r/d09dd3f8abb388da397fd11598c5edeaa83fe559.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/If8a891e9fe01ea543e00b576852685afec0887e3 Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Acked-by: Catalin Marinas Reviewed-by: Dmitry Vyukov Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d22ec4c9c1bd..e638255ce906 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -77,8 +77,6 @@ static inline void kasan_disable_current(void) {} void kasan_unpoison_range(const void *address, size_t size); -void kasan_unpoison_task_stack(struct task_struct *task); - void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); @@ -123,8 +121,6 @@ void kasan_restore_multi_shot(bool enabled); static inline void kasan_unpoison_range(const void *address, size_t size) {} -static inline void kasan_unpoison_task_stack(struct task_struct *task) {} - static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} @@ -176,6 +172,12 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } #endif /* CONFIG_KASAN */ +#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK +void kasan_unpoison_task_stack(struct task_struct *task); +#else +static inline void kasan_unpoison_task_stack(struct task_struct *task) {} +#endif + #ifdef CONFIG_KASAN_GENERIC void kasan_cache_shrink(struct kmem_cache *cache); -- cgit v1.2.3 From c0054c565ae598073d6c27762c7d4f7de49a45d9 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:02:52 -0800 Subject: kasan: inline kasan_reset_tag for tag-based modes Using kasan_reset_tag() currently results in a function call. As it's called quite often from the allocator code, this leads to a noticeable slowdown. Move it to include/linux/kasan.h and turn it into a static inline function. Also remove the now unneeded reset_tag() internal KASAN macro and use kasan_reset_tag() instead. Link: https://lkml.kernel.org/r/6940383a3a9dfb416134d338d8fac97a9ebb8686.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I4d2061acfe91d480a75df00b07c22d8494ef14b5 Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Reviewed-by: Dmitry Vyukov Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index e638255ce906..3bb72de94f90 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -194,7 +194,10 @@ static inline void kasan_record_aux_stack(void *ptr) {} #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) -void *kasan_reset_tag(const void *addr); +static inline void *kasan_reset_tag(const void *addr) +{ + return (void *)arch_kasan_reset_tag(addr); +} bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); -- cgit v1.2.3 From bffe690708c8b4fdb8f0bff8ff22b347fc6c709a Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:02:59 -0800 Subject: kasan: open-code kasan_unpoison_slab There's the external annotation kasan_unpoison_slab() that is currently defined as static inline and uses kasan_unpoison_range(). Open-code this function in mempool.c. Otherwise with an upcoming change this function will result in an unnecessary function call. Link: https://lkml.kernel.org/r/131a6694a978a9a8b150187e539eecc8bcbf759b.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/Ia7c8b659f79209935cbaab3913bf7f082cc43a0e Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 3bb72de94f90..7350de3e9fe4 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -107,11 +107,6 @@ struct kasan_cache { int free_meta_offset; }; -size_t __ksize(const void *); -static inline void kasan_unpoison_slab(const void *ptr) -{ - kasan_unpoison_range(ptr, __ksize(ptr)); -} size_t kasan_metadata_size(struct kmem_cache *cache); bool kasan_save_enable_multi_shot(void); @@ -167,7 +162,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, return false; } -static inline void kasan_unpoison_slab(const void *ptr) { } static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } #endif /* CONFIG_KASAN */ -- cgit v1.2.3 From 34303244f2615add92076a4bf2d4f39323bde4f2 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:03:10 -0800 Subject: kasan, mm: check kasan_enabled in annotations Declare the kasan_enabled static key in include/linux/kasan.h and in include/linux/mm.h and check it in all kasan annotations. This allows to avoid any slowdown caused by function calls when kasan_enabled is disabled. Link: https://lkml.kernel.org/r/9f90e3c0aa840dbb4833367c2335193299f69023.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I2589451d3c96c97abbcbf714baabe6161c6f153e Co-developed-by: Vincenzo Frascino Signed-off-by: Vincenzo Frascino Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Reviewed-by: Dmitry Vyukov Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 213 +++++++++++++++++++++++++++++++++++++++----------- include/linux/mm.h | 22 ++++-- 2 files changed, 182 insertions(+), 53 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 7350de3e9fe4..9176849c4934 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -2,6 +2,7 @@ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H +#include #include struct kmem_cache; @@ -75,54 +76,176 @@ static inline void kasan_disable_current(void) {} #ifdef CONFIG_KASAN -void kasan_unpoison_range(const void *address, size_t size); +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; +}; -void kasan_alloc_pages(struct page *page, unsigned int order); -void kasan_free_pages(struct page *page, unsigned int order); +#ifdef CONFIG_KASAN_HW_TAGS +DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); +static __always_inline bool kasan_enabled(void) +{ + return static_branch_likely(&kasan_flag_enabled); +} +#else +static inline bool kasan_enabled(void) +{ + return true; +} +#endif -void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, - slab_flags_t *flags); +void __kasan_unpoison_range(const void *addr, size_t size); +static __always_inline void kasan_unpoison_range(const void *addr, size_t size) +{ + if (kasan_enabled()) + __kasan_unpoison_range(addr, size); +} -void kasan_poison_slab(struct page *page); -void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); -void kasan_poison_object_data(struct kmem_cache *cache, void *object); -void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, - const void *object); +void __kasan_alloc_pages(struct page *page, unsigned int order); +static __always_inline void kasan_alloc_pages(struct page *page, + unsigned int order) +{ + if (kasan_enabled()) + __kasan_alloc_pages(page, order); +} -void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, - gfp_t flags); -void kasan_kfree_large(void *ptr, unsigned long ip); -void kasan_poison_kfree(void *ptr, unsigned long ip); -void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object, - size_t size, gfp_t flags); -void * __must_check kasan_krealloc(const void *object, size_t new_size, - gfp_t flags); +void __kasan_free_pages(struct page *page, unsigned int order); +static __always_inline void kasan_free_pages(struct page *page, + unsigned int order) +{ + if (kasan_enabled()) + __kasan_free_pages(page, order); +} -void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object, - gfp_t flags); -bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); +void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, + slab_flags_t *flags); +static __always_inline void kasan_cache_create(struct kmem_cache *cache, + unsigned int *size, slab_flags_t *flags) +{ + if (kasan_enabled()) + __kasan_cache_create(cache, size, flags); +} -struct kasan_cache { - int alloc_meta_offset; - int free_meta_offset; -}; +size_t __kasan_metadata_size(struct kmem_cache *cache); +static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) +{ + if (kasan_enabled()) + return __kasan_metadata_size(cache); + return 0; +} + +void __kasan_poison_slab(struct page *page); +static __always_inline void kasan_poison_slab(struct page *page) +{ + if (kasan_enabled()) + __kasan_poison_slab(page); +} + +void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); +static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, + void *object) +{ + if (kasan_enabled()) + __kasan_unpoison_object_data(cache, object); +} + +void __kasan_poison_object_data(struct kmem_cache *cache, void *object); +static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, + void *object) +{ + if (kasan_enabled()) + __kasan_poison_object_data(cache, object); +} + +void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, + const void *object); +static __always_inline void * __must_check kasan_init_slab_obj( + struct kmem_cache *cache, const void *object) +{ + if (kasan_enabled()) + return __kasan_init_slab_obj(cache, object); + return (void *)object; +} + +bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); +static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object, + unsigned long ip) +{ + if (kasan_enabled()) + return __kasan_slab_free(s, object, ip); + return false; +} + +void * __must_check __kasan_slab_alloc(struct kmem_cache *s, + void *object, gfp_t flags); +static __always_inline void * __must_check kasan_slab_alloc( + struct kmem_cache *s, void *object, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_slab_alloc(s, object, flags); + return object; +} + +void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, + size_t size, gfp_t flags); +static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, + const void *object, size_t size, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_kmalloc(s, object, size, flags); + return (void *)object; +} -size_t kasan_metadata_size(struct kmem_cache *cache); +void * __must_check __kasan_kmalloc_large(const void *ptr, + size_t size, gfp_t flags); +static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, + size_t size, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_kmalloc_large(ptr, size, flags); + return (void *)ptr; +} + +void * __must_check __kasan_krealloc(const void *object, + size_t new_size, gfp_t flags); +static __always_inline void * __must_check kasan_krealloc(const void *object, + size_t new_size, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_krealloc(object, new_size, flags); + return (void *)object; +} + +void __kasan_poison_kfree(void *ptr, unsigned long ip); +static __always_inline void kasan_poison_kfree(void *ptr, unsigned long ip) +{ + if (kasan_enabled()) + __kasan_poison_kfree(ptr, ip); +} + +void __kasan_kfree_large(void *ptr, unsigned long ip); +static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip) +{ + if (kasan_enabled()) + __kasan_kfree_large(ptr, ip); +} bool kasan_save_enable_multi_shot(void); void kasan_restore_multi_shot(bool enabled); #else /* CONFIG_KASAN */ +static inline bool kasan_enabled(void) +{ + return false; +} static inline void kasan_unpoison_range(const void *address, size_t size) {} - static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} - static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} - +static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) {} @@ -133,36 +256,32 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache, { return (void *)object; } - -static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) +static inline bool kasan_slab_free(struct kmem_cache *s, void *object, + unsigned long ip) { - return ptr; + return false; +} +static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, + gfp_t flags) +{ + return object; } -static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} -static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags) { return (void *)object; } +static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) +{ + return (void *)ptr; +} static inline void *kasan_krealloc(const void *object, size_t new_size, gfp_t flags) { return (void *)object; } - -static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, - gfp_t flags) -{ - return object; -} -static inline bool kasan_slab_free(struct kmem_cache *s, void *object, - unsigned long ip) -{ - return false; -} - -static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } +static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} +static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} #endif /* CONFIG_KASAN */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 024ec0a00c72..5299b90a6c40 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -31,6 +31,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -1422,22 +1423,30 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) #endif /* CONFIG_NUMA_BALANCING */ #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) + static inline u8 page_kasan_tag(const struct page *page) { - return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; + if (kasan_enabled()) + return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; + return 0xff; } static inline void page_kasan_tag_set(struct page *page, u8 tag) { - page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); - page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; + if (kasan_enabled()) { + page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); + page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; + } } static inline void page_kasan_tag_reset(struct page *page) { - page_kasan_tag_set(page, 0xff); + if (kasan_enabled()) + page_kasan_tag_set(page, 0xff); } -#else + +#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ + static inline u8 page_kasan_tag(const struct page *page) { return 0xff; @@ -1445,7 +1454,8 @@ static inline u8 page_kasan_tag(const struct page *page) static inline void page_kasan_tag_set(struct page *page, u8 tag) { } static inline void page_kasan_tag_reset(struct page *page) { } -#endif + +#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ static inline struct zone *page_zone(const struct page *page) { -- cgit v1.2.3 From eeb3160c2419e0f1045537acac7b19cba64112f4 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:03:13 -0800 Subject: kasan, mm: rename kasan_poison_kfree Rename kasan_poison_kfree() to kasan_slab_free_mempool() as it better reflects what this annotation does. Also add a comment that explains the PageSlab() check. No functional changes. Link: https://lkml.kernel.org/r/141675fb493555e984c5dca555e9d9f768c7bbaa.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I5026f87364e556b506ef1baee725144bb04b8810 Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 9176849c4934..6f0c5d9aa43f 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -176,6 +176,13 @@ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object, return false; } +void __kasan_slab_free_mempool(void *ptr, unsigned long ip); +static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) +{ + if (kasan_enabled()) + __kasan_slab_free_mempool(ptr, ip); +} + void * __must_check __kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); static __always_inline void * __must_check kasan_slab_alloc( @@ -216,13 +223,6 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, return (void *)object; } -void __kasan_poison_kfree(void *ptr, unsigned long ip); -static __always_inline void kasan_poison_kfree(void *ptr, unsigned long ip) -{ - if (kasan_enabled()) - __kasan_poison_kfree(ptr, ip); -} - void __kasan_kfree_large(void *ptr, unsigned long ip); static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip) { @@ -261,6 +261,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, { return false; } +static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) { @@ -280,7 +281,6 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, { return (void *)object; } -static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} #endif /* CONFIG_KASAN */ -- cgit v1.2.3 From e86f8b09f215e3755cd2d56930487dec2de02433 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:03:31 -0800 Subject: kasan, mm: allow cache merging with no metadata The reason cache merging is disabled with KASAN is because KASAN puts its metadata right after the allocated object. When the merged caches have slightly different sizes, the metadata ends up in different places, which KASAN doesn't support. It might be possible to adjust the metadata allocation algorithm and make it friendly to the cache merging code. Instead this change takes a simpler approach and allows merging caches when no metadata is present. Which is the case for hardware tag-based KASAN with kasan.mode=prod. Link: https://lkml.kernel.org/r/37497e940bfd4b32c0a93a702a9ae4cf061d5392.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/Ia114847dfb2244f297d2cb82d592bf6a07455dba Co-developed-by: Vincenzo Frascino Signed-off-by: Vincenzo Frascino Signed-off-by: Andrey Konovalov Reviewed-by: Dmitry Vyukov Reviewed-by: Marco Elver Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 6f0c5d9aa43f..5e0655fb2a6f 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -82,17 +82,30 @@ struct kasan_cache { }; #ifdef CONFIG_KASAN_HW_TAGS + DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); + static __always_inline bool kasan_enabled(void) { return static_branch_likely(&kasan_flag_enabled); } -#else + +#else /* CONFIG_KASAN_HW_TAGS */ + static inline bool kasan_enabled(void) { return true; } -#endif + +#endif /* CONFIG_KASAN_HW_TAGS */ + +slab_flags_t __kasan_never_merge(void); +static __always_inline slab_flags_t kasan_never_merge(void) +{ + if (kasan_enabled()) + return __kasan_never_merge(); + return 0; +} void __kasan_unpoison_range(const void *addr, size_t size); static __always_inline void kasan_unpoison_range(const void *addr, size_t size) @@ -239,6 +252,10 @@ static inline bool kasan_enabled(void) { return false; } +static inline slab_flags_t kasan_never_merge(void) +{ + return 0; +} static inline void kasan_unpoison_range(const void *address, size_t size) {} static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} -- cgit v1.2.3