summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/common.c40
-rw-r--r--mm/mempool.c2
2 files changed, 24 insertions, 18 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index ae0130cf9de3..d0f8d7a955cd 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -331,6 +331,29 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
return ____kasan_slab_free(cache, object, ip, true);
}
+void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
+{
+ struct page *page;
+
+ page = virt_to_head_page(ptr);
+
+ /*
+ * Even though this function is only called for kmem_cache_alloc and
+ * kmalloc backed mempool allocations, those allocations can still be
+ * !PageSlab() when the size provided to kmalloc is larger than
+ * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
+ */
+ if (unlikely(!PageSlab(page))) {
+ if (ptr != page_address(page)) {
+ kasan_report_invalid_free(ptr, ip);
+ return;
+ }
+ poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
+ } else {
+ ____kasan_slab_free(page->slab_cache, ptr, ip, false);
+ }
+}
+
static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
{
kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
@@ -422,23 +445,6 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
flags, true);
}
-void __kasan_poison_kfree(void *ptr, unsigned long ip)
-{
- struct page *page;
-
- page = virt_to_head_page(ptr);
-
- if (unlikely(!PageSlab(page))) {
- if (ptr != page_address(page)) {
- kasan_report_invalid_free(ptr, ip);
- return;
- }
- poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
- } else {
- ____kasan_slab_free(page->slab_cache, ptr, ip, false);
- }
-}
-
void __kasan_kfree_large(void *ptr, unsigned long ip)
{
if (ptr != page_address(virt_to_head_page(ptr)))
diff --git a/mm/mempool.c b/mm/mempool.c
index 583a9865b181..624ed51b060f 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element)
static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
- kasan_poison_kfree(element, _RET_IP_);
+ kasan_slab_free_mempool(element, _RET_IP_);
else if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data);
}