diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 2 | ||||
-rw-r--r-- | mm/filemap.c | 3 | ||||
-rw-r--r-- | mm/hugetlb.c | 22 | ||||
-rw-r--r-- | mm/kasan/Makefile | 25 | ||||
-rw-r--r-- | mm/kasan/common.c | 822 | ||||
-rw-r--r-- | mm/kasan/generic.c | 74 | ||||
-rw-r--r-- | mm/kasan/generic_report.c | 165 | ||||
-rw-r--r-- | mm/kasan/hw_tags.c | 204 | ||||
-rw-r--r-- | mm/kasan/init.c | 17 | ||||
-rw-r--r-- | mm/kasan/kasan.h | 173 | ||||
-rw-r--r-- | mm/kasan/quarantine.c | 31 | ||||
-rw-r--r-- | mm/kasan/report.c | 317 | ||||
-rw-r--r-- | mm/kasan/report_generic.c | 327 | ||||
-rw-r--r-- | mm/kasan/report_hw_tags.c | 42 | ||||
-rw-r--r-- | mm/kasan/report_sw_tags.c (renamed from mm/kasan/tags_report.c) | 29 | ||||
-rw-r--r-- | mm/kasan/shadow.c | 504 | ||||
-rw-r--r-- | mm/kasan/sw_tags.c (renamed from mm/kasan/tags.c) | 39 | ||||
-rw-r--r-- | mm/memcontrol.c | 51 | ||||
-rw-r--r-- | mm/memory.c | 8 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 107 | ||||
-rw-r--r-- | mm/mempool.c | 4 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/mremap.c | 4 | ||||
-rw-r--r-- | mm/page-writeback.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 17 | ||||
-rw-r--r-- | mm/page_poison.c | 2 | ||||
-rw-r--r-- | mm/ptdump.c | 13 | ||||
-rw-r--r-- | mm/slab_common.c | 5 | ||||
-rw-r--r-- | mm/slub.c | 34 | ||||
-rw-r--r-- | mm/util.c | 12 |
30 files changed, 1799 insertions, 1258 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 4275c25b5d8a..f730605b8dcf 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -713,7 +713,7 @@ config ZSMALLOC_STAT select DEBUG_FS help This option enables code in the zsmalloc to collect various - statistics about whats happening in zsmalloc and exports that + statistics about what's happening in zsmalloc and exports that information to userspace via debugfs. If unsure, say N. diff --git a/mm/filemap.c b/mm/filemap.c index 7a49bac48aea..5c9d564317a5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2453,6 +2453,9 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb, if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) return 0; + if (unlikely(!iov_iter_count(iter))) + return 0; + iov_iter_truncate(iter, inode->i_sb->s_maxbytes); if (nr_pages > ARRAY_SIZE(pages_onstack)) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cbf32d2824fd..a2602969873d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4105,10 +4105,30 @@ retry_avoidcopy: * may get SIGKILLed if it later faults. */ if (outside_reserve) { + struct address_space *mapping = vma->vm_file->f_mapping; + pgoff_t idx; + u32 hash; + put_page(old_page); BUG_ON(huge_pte_none(pte)); + /* + * Drop hugetlb_fault_mutex and i_mmap_rwsem before + * unmapping. unmapping needs to hold i_mmap_rwsem + * in write mode. Dropping i_mmap_rwsem in read mode + * here is OK as COW mappings do not interact with + * PMD sharing. + * + * Reacquire both after unmap operation. + */ + idx = vma_hugecache_offset(h, vma, haddr); + hash = hugetlb_fault_mutex_hash(mapping, idx); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + i_mmap_unlock_read(mapping); + unmap_ref_private(mm, vma, old_page, haddr); - BUG_ON(huge_pte_none(pte)); + + i_mmap_lock_read(mapping); + mutex_lock(&hugetlb_fault_mutex_table[hash]); spin_lock(ptl); ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); if (likely(ptep && diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 370d970e5ab5..9fe39a66388a 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -6,12 +6,15 @@ KCOV_INSTRUMENT := n # Disable ftrace to avoid recursion. CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_generic.o = $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_generic_report.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_quarantine.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_tags.o = $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_tags_report.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_report_generic.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_report_hw_tags.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_report_sw_tags.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_shadow.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_hw_tags.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_sw_tags.o = $(CC_FLAGS_FTRACE) # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 @@ -22,13 +25,17 @@ CC_FLAGS_KASAN_RUNTIME += -DDISABLE_BRANCH_PROFILING CFLAGS_common.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_generic.o := $(CC_FLAGS_KASAN_RUNTIME) -CFLAGS_generic_report.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_init.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_quarantine.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_report.o := $(CC_FLAGS_KASAN_RUNTIME) -CFLAGS_tags.o := $(CC_FLAGS_KASAN_RUNTIME) -CFLAGS_tags_report.o := $(CC_FLAGS_KASAN_RUNTIME) +CFLAGS_report_generic.o := $(CC_FLAGS_KASAN_RUNTIME) +CFLAGS_report_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) +CFLAGS_report_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) +CFLAGS_shadow.o := $(CC_FLAGS_KASAN_RUNTIME) +CFLAGS_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) +CFLAGS_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) -obj-$(CONFIG_KASAN) := common.o init.o report.o -obj-$(CONFIG_KASAN_GENERIC) += generic.o generic_report.o quarantine.o -obj-$(CONFIG_KASAN_SW_TAGS) += tags.o tags_report.o +obj-$(CONFIG_KASAN) := common.o report.o +obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quarantine.o +obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o +obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 950fd372a07e..b25167664ead 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -1,24 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 /* - * This file contains common generic and tag-based KASAN code. + * This file contains common KASAN code. * * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> * * Some code borrowed from https://github.com/xairy/kasan-prototype by * Andrey Konovalov <andreyknvl@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * */ #include <linux/export.h> #include <linux/init.h> #include <linux/kasan.h> #include <linux/kernel.h> -#include <linux/kmemleak.h> #include <linux/linkage.h> #include <linux/memblock.h> #include <linux/memory.h> @@ -31,12 +25,8 @@ #include <linux/stacktrace.h> #include <linux/string.h> #include <linux/types.h> -#include <linux/vmalloc.h> #include <linux/bug.h> -#include <asm/cacheflush.h> -#include <asm/tlbflush.h> - #include "kasan.h" #include "../slab.h" @@ -56,6 +46,7 @@ void kasan_set_track(struct kasan_track *track, gfp_t flags) track->stack = kasan_save_stack(flags); } +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) void kasan_enable_current(void) { current->kasan_depth++; @@ -65,106 +56,20 @@ void kasan_disable_current(void) { current->kasan_depth--; } +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ -bool __kasan_check_read(const volatile void *p, unsigned int size) -{ - return check_memory_region((unsigned long)p, size, false, _RET_IP_); -} -EXPORT_SYMBOL(__kasan_check_read); - -bool __kasan_check_write(const volatile void *p, unsigned int size) -{ - return check_memory_region((unsigned long)p, size, true, _RET_IP_); -} -EXPORT_SYMBOL(__kasan_check_write); - -#undef memset -void *memset(void *addr, int c, size_t len) -{ - if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_)) - return NULL; - - return __memset(addr, c, len); -} - -#ifdef __HAVE_ARCH_MEMMOVE -#undef memmove -void *memmove(void *dest, const void *src, size_t len) -{ - if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) || - !check_memory_region((unsigned long)dest, len, true, _RET_IP_)) - return NULL; - - return __memmove(dest, src, len); -} -#endif - -#undef memcpy -void *memcpy(void *dest, const void *src, size_t len) +void __kasan_unpoison_range(const void *address, size_t size) { - if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) || - !check_memory_region((unsigned long)dest, len, true, _RET_IP_)) - return NULL; - - return __memcpy(dest, src, len); -} - -/* - * Poisons the shadow memory for 'size' bytes starting from 'addr'. - * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. - */ -void kasan_poison_shadow(const void *address, size_t size, u8 value) -{ - void *shadow_start, *shadow_end; - - /* - * Perform shadow offset calculation based on untagged address, as - * some of the callers (e.g. kasan_poison_object_data) pass tagged - * addresses to this function. - */ - address = reset_tag(address); - - shadow_start = kasan_mem_to_shadow(address); - shadow_end = kasan_mem_to_shadow(address + size); - - __memset(shadow_start, value, shadow_end - shadow_start); -} - -void kasan_unpoison_shadow(const void *address, size_t size) -{ - u8 tag = get_tag(address); - - /* - * Perform shadow offset calculation based on untagged address, as - * some of the callers (e.g. kasan_unpoison_object_data) pass tagged - * addresses to this function. - */ - address = reset_tag(address); - - kasan_poison_shadow(address, size, tag); - - if (size & KASAN_SHADOW_MASK) { - u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); - - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) - *shadow = tag; - else - *shadow = size & KASAN_SHADOW_MASK; - } -} - -static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) -{ - void *base = task_stack_page(task); - size_t size = sp - base; - - kasan_unpoison_shadow(base, size); + unpoison_range(address, size); } +#if CONFIG_KASAN_STACK /* Unpoison the entire stack for a task. */ void kasan_unpoison_task_stack(struct task_struct *task) { - __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); + void *base = task_stack_page(task); + + unpoison_range(base, THREAD_SIZE); } /* Unpoison the stack for the current task beyond a watermark sp value. */ @@ -177,10 +82,22 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) */ void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); - kasan_unpoison_shadow(base, watermark - base); + unpoison_range(base, watermark - base); +} +#endif /* CONFIG_KASAN_STACK */ + +/* + * Only allow cache merging when stack collection is disabled and no metadata + * is present. + */ +slab_flags_t __kasan_never_merge(void) +{ + if (kasan_stack_collection_enabled()) + return SLAB_KASAN; + return 0; } -void kasan_alloc_pages(struct page *page, unsigned int order) +void __kasan_alloc_pages(struct page *page, unsigned int order) { u8 tag; unsigned long i; @@ -191,13 +108,13 @@ void kasan_alloc_pages(struct page *page, unsigned int order) tag = random_tag(); for (i = 0; i < (1 << order); i++) page_kasan_tag_set(page + i, tag); - kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); + unpoison_range(page_address(page), PAGE_SIZE << order); } -void kasan_free_pages(struct page *page, unsigned int order) +void __kasan_free_pages(struct page *page, unsigned int order) { if (likely(!PageHighMem(page))) - kasan_poison_shadow(page_address(page), + poison_range(page_address(page), PAGE_SIZE << order, KASAN_FREE_PAGE); } @@ -208,9 +125,6 @@ void kasan_free_pages(struct page *page, unsigned int order) */ static inline unsigned int optimal_redzone(unsigned int object_size) { - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) - return 0; - return object_size <= 64 - 16 ? 16 : object_size <= 128 - 32 ? 32 : @@ -221,88 +135,129 @@ static inline unsigned int optimal_redzone(unsigned int object_size) object_size <= (1 << 16) - 1024 ? 1024 : 2048; } -void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, - slab_flags_t *flags) +void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, + slab_flags_t *flags) { - unsigned int orig_size = *size; - unsigned int redzone_size; - int redzone_adjust; + unsigned int ok_size; + unsigned int optimal_size; - /* Add alloc meta. */ - cache->kasan_info.alloc_meta_offset = *size; - *size += sizeof(struct kasan_alloc_meta); + /* + * SLAB_KASAN is used to mark caches as ones that are sanitized by + * KASAN. Currently this flag is used in two places: + * 1. In slab_ksize() when calculating the size of the accessible + * memory within the object. + * 2. In slab_common.c to prevent merging of sanitized caches. + */ + *flags |= SLAB_KASAN; - /* Add free meta. */ - if (IS_ENABLED(CONFIG_KASAN_GENERIC) && - (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || - cache->object_size < sizeof(struct kasan_free_meta))) { - cache->kasan_info.free_meta_offset = *size; - *size += sizeof(struct kasan_free_meta); - } + if (!kasan_stack_collection_enabled()) + return; - redzone_size = optimal_redzone(cache->object_size); - redzone_adjust = redzone_size - (*size - cache->object_size); - if (redzone_adjust > 0) - *size += redzone_adjust; + ok_size = *size; - *size = min_t(unsigned int, KMALLOC_MAX_SIZE, - max(*size, cache->object_size + redzone_size)); + /* Add alloc meta into redzone. */ + cache->kasan_info.alloc_meta_offset = *size; + *size += sizeof(struct kasan_alloc_meta); /* - * If the metadata doesn't fit, don't enable KASAN at all. + * If alloc meta doesn't fit, don't add it. + * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal + * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for + * larger sizes. */ - if (*size <= cache->kasan_info.alloc_meta_offset || - *size <= cache->kasan_info.free_meta_offset) { + if (*size > KMALLOC_MAX_SIZE) { cache->kasan_info.alloc_meta_offset = 0; - cache->kasan_info.free_meta_offset = 0; - *size = orig_size; + *size = ok_size; + /* Continue, since free meta might still fit. */ + } + + /* Only the generic mode uses free meta or flexible redzones. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { + cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; return; } - *flags |= SLAB_KASAN; + /* + * Add free meta into redzone when it's not possible to store + * it in the object. This is the case when: + * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can + * be touched after it was freed, or + * 2. Object has a constructor, which means it's expected to + * retain its content until the next allocation, or + * 3. Object is too small. + * Otherwise cache->kasan_info.free_meta_offset = 0 is implied. + */ + if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || + cache->object_size < sizeof(struct kasan_free_meta)) { + ok_size = *size; + + cache->kasan_info.free_meta_offset = *size; + *size += sizeof(struct kasan_free_meta); + + /* If free meta doesn't fit, don't add it. */ + if (*size > KMALLOC_MAX_SIZE) { + cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; + *size = ok_size; + } + } + + /* Calculate size with optimal redzone. */ + optimal_size = cache->object_size + optimal_redzone(cache->object_size); + /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */ + if (optimal_size > KMALLOC_MAX_SIZE) + optimal_size = KMALLOC_MAX_SIZE; + /* Use optimal size if the size with added metas is not large enough. */ + if (*size < optimal_size) + *size = optimal_size; } -size_t kasan_metadata_size(struct kmem_cache *cache) +size_t __kasan_metadata_size(struct kmem_cache *cache) { + if (!kasan_stack_collection_enabled()) + return 0; return (cache->kasan_info.alloc_meta_offset ? sizeof(struct kasan_alloc_meta) : 0) + (cache->kasan_info.free_meta_offset ? sizeof(struct kasan_free_meta) : 0); } -struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, - const void *object) +struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, + const void *object) { - return (void *)object + cache->kasan_info.alloc_meta_offset; + if (!cache->kasan_info.alloc_meta_offset) + return NULL; + return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset; } -struct kasan_free_meta *get_free_info(struct kmem_cache *cache, - const void *object) +#ifdef CONFIG_KASAN_GENERIC +struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, + const void *object) { BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); - return (void *)object + cache->kasan_info.free_meta_offset; + if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META) + return NULL; + return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset; } +#endif -void kasan_poison_slab(struct page *page) +void __kasan_poison_slab(struct page *page) { unsigned long i; for (i = 0; i < compound_nr(page); i++) page_kasan_tag_reset(page + i); - kasan_poison_shadow(page_address(page), page_size(page), - KASAN_KMALLOC_REDZONE); + poison_range(page_address(page), page_size(page), + KASAN_KMALLOC_REDZONE); } -void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) +void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) { - kasan_unpoison_shadow(object, cache->object_size); + unpoison_range(object, cache->object_size); } -void kasan_poison_object_data(struct kmem_cache *cache, void *object) +void __kasan_poison_object_data(struct kmem_cache *cache, void *object) { - kasan_poison_shadow(object, - round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), - KASAN_KMALLOC_REDZONE); + poison_range(object, cache->object_size, KASAN_KMALLOC_REDZONE); } /* @@ -322,6 +277,9 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) static u8 assign_tag(struct kmem_cache *cache, const void *object, bool init, bool keep_tag) { + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) + return 0xff; + /* * 1. When an object is kmalloc()'ed, two hooks are called: * kasan_slab_alloc() and kasan_kmalloc(). We assign the @@ -351,50 +309,32 @@ static u8 assign_tag(struct kmem_cache *cache, const void *object, #endif } -void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, +void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, const void *object) { - struct kasan_alloc_meta *alloc_info; + struct kasan_alloc_meta *alloc_meta; - if (!(cache->flags & SLAB_KASAN)) - return (void *)object; - - alloc_info = get_alloc_info(cache, object); - __memset(alloc_info, 0, sizeof(*alloc_info)); + if (kasan_stack_collection_enabled()) { + alloc_meta = kasan_get_alloc_meta(cache, object); + if (alloc_meta) + __memset(alloc_meta, 0, sizeof(*alloc_meta)); + } - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) - object = set_tag(object, - assign_tag(cache, object, true, false)); + /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */ + object = set_tag(object, assign_tag(cache, object, true, false)); return (void *)object; } -static inline bool shadow_invalid(u8 tag, s8 shadow_byte) -{ - if (IS_ENABLED(CONFIG_KASAN_GENERIC)) - return shadow_byte < 0 || - shadow_byte >= KASAN_SHADOW_SCALE_SIZE; - - /* else CONFIG_KASAN_SW_TAGS: */ - if ((u8)shadow_byte == KASAN_TAG_INVALID) - return true; - if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte)) - return true; - - return false; -} - -static bool __kasan_slab_free(struct kmem_cache *cache, void *object, +static bool ____kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip, bool quarantine) { - s8 shadow_byte; u8 tag; void *tagged_object; - unsigned long rounded_up_size; tag = get_tag(object); tagged_object = object; - object = reset_tag(object); + object = kasan_reset_tag(object); if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != object)) { @@ -406,37 +346,67 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object, if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) return false; - shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); - if (shadow_invalid(tag, shadow_byte)) { + if (check_invalid_free(tagged_object)) { kasan_report_invalid_free(tagged_object, ip); return true; } - rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); - kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); + poison_range(object, cache->object_size, KASAN_KMALLOC_FREE); - if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) || - unlikely(!(cache->flags & SLAB_KASAN))) + if (!kasan_stack_collection_enabled()) + return false; + + if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine)) return false; kasan_set_free_info(cache, object, tag); - quarantine_put(get_free_info(cache, object), cache); + return quarantine_put(cache, object); +} - return IS_ENABLED(CONFIG_KASAN_GENERIC); +bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) +{ + return ____kasan_slab_free(cache, object, ip, true); } -bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) +void __kasan_slab_free_mempool(void *ptr, unsigned long ip) { - return __kasan_slab_free(cache, object, ip, true); + struct page *page; + + page = virt_to_head_page(ptr); + + /* + * Even though this function is only called for kmem_cache_alloc and + * kmalloc backed mempool allocations, those allocations can still be + * !PageSlab() when the size provided to kmalloc is larger than + * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. + */ + if (unlikely(!PageSlab(page))) { + if (ptr != page_address(page)) { + kasan_report_invalid_free(ptr, ip); + return; + } + poison_range(ptr, page_size(page), KASAN_FREE_PAGE); + } else { + ____kasan_slab_free(page->slab_cache, ptr, ip, false); + } +} + +static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) +{ + struct kasan_alloc_meta *alloc_meta; + + alloc_meta = kasan_get_alloc_meta(cache, object); + if (alloc_meta) + kasan_set_track(&alloc_meta->alloc_track, flags); } -static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, +static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, gfp_t flags, bool keep_tag) { unsigned long redzone_start; unsigned long redzone_end; - u8 tag = 0xff; + u8 tag; if (gfpflags_allow_blocking(flags)) quarantine_reduce(); @@ -445,38 +415,36 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, return NULL; redzone_start = round_up((unsigned long)(object + size), - KASAN_SHADOW_SCALE_SIZE); + KASAN_GRANULE_SIZE); redzone_end = round_up((unsigned long)object + cache->object_size, - KASAN_SHADOW_SCALE_SIZE); - - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) - tag = assign_tag(cache, object, false, keep_tag); + KASAN_GRANULE_SIZE); + tag = assign_tag(cache, object, false, keep_tag); - /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ - kasan_unpoison_shadow(set_tag(object, tag), size); - kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, - KASAN_KMALLOC_REDZONE); + /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */ + unpoison_range(set_tag(object, tag), size); + poison_range((void *)redzone_start, redzone_end - redzone_start, + KASAN_KMALLOC_REDZONE); - if (cache->flags & SLAB_KASAN) - kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags); + if (kasan_stack_collection_enabled()) + set_alloc_info(cache, (void *)object, flags); return set_tag(object, tag); } -void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, - gfp_t flags) +void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, + void *object, gfp_t flags) { - return __kasan_kmalloc(cache, object, cache->object_size, flags, false); + return ____kasan_kmalloc(cache, object, cache->object_size, flags, false); } -void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, - size_t size, gfp_t flags) +void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, + size_t size, gfp_t flags) { - return __kasan_kmalloc(cache, object, size, flags, true); + return ____kasan_kmalloc(cache, object, size, flags, true); } -EXPORT_SYMBOL(kasan_kmalloc); +EXPORT_SYMBOL(__kasan_kmalloc); -void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, +void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) { struct page *page; @@ -491,17 +459,17 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, page = virt_to_page(ptr); redzone_start = round_up((unsigned long)(ptr + size), - KASAN_SHADOW_SCALE_SIZE); + KASAN_GRANULE_SIZE); redzone_end = (unsigned long)ptr + page_size(page); - kasan_unpoison_shadow(ptr, size); - kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, - KASAN_PAGE_REDZONE); + unpoison_range(ptr, size); + poison_range((void *)redzone_start, redzone_end - redzone_start, + KASAN_PAGE_REDZONE); return (void *)ptr; } -void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) +void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) { struct page *page; @@ -511,421 +479,15 @@ void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) page = virt_to_head_page(object); if (unlikely(!PageSlab(page))) - return kasan_kmalloc_large(object, size, flags); + return __kasan_kmalloc_large(object, size, flags); else - return __kasan_kmalloc(page->slab_cache, object, size, + return ____kasan_kmalloc(page->slab_cache, object, size, flags, true); } -void kasan_poison_kfree(void *ptr, unsigned long ip) -{ - struct page *page; - - page = virt_to_head_page(ptr); - - if (unlikely(!PageSlab(page))) { - if (ptr != page_address(page)) { - kasan_report_invalid_free(ptr, ip); - return; - } - kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE); - } else { - __kasan_slab_free(page->slab_cache, ptr, ip, false); - } -} - -void kasan_kfree_large(void *ptr, unsigned long ip) +void __kasan_kfree_large(void *ptr, unsigned long ip) { if (ptr != page_address(virt_to_head_page(ptr))) kasan_report_invalid_free(ptr, ip); - /* The object will be poisoned by page_alloc. */ -} - -#ifndef CONFIG_KASAN_VMALLOC -int kasan_module_alloc(void *addr, size_t size) -{ - void *ret; - size_t scaled_size; - size_t shadow_size; - unsigned long shadow_start; - - shadow_start = (unsigned long)kasan_mem_to_shadow(addr); - scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; - shadow_size = round_up(scaled_size, PAGE_SIZE); - - if (WARN_ON(!PAGE_ALIGNED(shadow_start))) - return -EINVAL; - - ret = __vmalloc_node_range(shadow_size, 1, shadow_start, - shadow_start + shadow_size, - GFP_KERNEL, - PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, - __builtin_return_address(0)); - - if (ret) { - __memset(ret, KASAN_SHADOW_INIT, shadow_size); - find_vm_area(addr)->flags |= VM_KASAN; - kmemleak_ignore(ret); - return 0; - } - - return -ENOMEM; -} - -void kasan_free_shadow(const struct vm_struct *vm) -{ - if (vm->flags & VM_KASAN) - vfree(kasan_mem_to_shadow(vm->addr)); -} -#endif - -#ifdef CONFIG_MEMORY_HOTPLUG -static bool shadow_mapped(unsigned long addr) -{ - pgd_t *pgd = pgd_offset_k(addr); - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - if (pgd_none(*pgd)) - return false; - p4d = p4d_offset(pgd, addr); - if (p4d_none(*p4d)) - return false; - pud = pud_offset(p4d, addr); - if (pud_none(*pud)) - return false; - - /* - * We can't use pud_large() or pud_huge(), the first one is - * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse - * pud_bad(), if pud is bad then it's bad because it's huge. - */ - if (pud_bad(*pud)) - return true; - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) - return false; - - if (pmd_bad(*pmd)) - return true; - pte = pte_offset_kernel(pmd, addr); - return !pte_none(*pte); -} - -static int __meminit kasan_mem_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct memory_notify *mem_data = data; - unsigned long nr_shadow_pages, start_kaddr, shadow_start; - unsigned long shadow_end, shadow_size; - - nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; - start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); - shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); - shadow_size = nr_shadow_pages << PAGE_SHIFT; - shadow_end = shadow_start + shadow_size; - - if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZ |