summaryrefslogtreecommitdiffstats
path: root/mm/kasan/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan/common.c')
-rw-r--r--mm/kasan/common.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index e3cfe156f693..17b22107f8eb 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -233,6 +233,9 @@ void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
static u8 assign_tag(struct kmem_cache *cache, const void *object,
bool init, bool keep_tag)
{
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ return 0xff;
+
/*
* 1. When an object is kmalloc()'ed, two hooks are called:
* kasan_slab_alloc() and kasan_kmalloc(). We assign the
@@ -275,8 +278,8 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
__memset(alloc_meta, 0, sizeof(*alloc_meta));
}
- if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS))
- object = set_tag(object, assign_tag(cache, object, true, false));
+ /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
+ object = set_tag(object, assign_tag(cache, object, true, false));
return (void *)object;
}
@@ -360,7 +363,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
{
unsigned long redzone_start;
unsigned long redzone_end;
- u8 tag = 0xff;
+ u8 tag;
if (gfpflags_allow_blocking(flags))
quarantine_reduce();
@@ -372,9 +375,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
KASAN_GRANULE_SIZE);
redzone_end = round_up((unsigned long)object + cache->object_size,
KASAN_GRANULE_SIZE);
-
- if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS))
- tag = assign_tag(cache, object, false, keep_tag);
+ tag = assign_tag(cache, object, false, keep_tag);
/* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
unpoison_range(set_tag(object, tag), size);