Skip to content

Commit 29d7355

Browse files
xairyakpm00
authored andcommitted
kasan: save alloc stack traces for mempool
Update kasan_mempool_unpoison_object to properly poison the redzone and save alloc strack traces for kmalloc and slab pools. As a part of this change, split out and use a unpoison_slab_object helper function from __kasan_slab_alloc. [[email protected]: mark unpoison_slab_object() as static] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/05ad235da8347cfe14d496d01b2aaf074b4f607c.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <[email protected]> Signed-off-by: Nathan Chancellor <[email protected]> Cc: Alexander Lobakin <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Breno Leitao <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Evgenii Stepanov <[email protected]> Cc: Marco Elver <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 0cc9fdb commit 29d7355

File tree

2 files changed

+44
-13
lines changed

2 files changed

+44
-13
lines changed

include/linux/kasan.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -303,9 +303,10 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
303303
* mempool).
304304
*
305305
* This function unpoisons a slab allocation that was previously poisoned via
306-
* kasan_mempool_poison_object() without initializing its memory. For the
307-
* tag-based modes, this function does not assign a new tag to the allocation
308-
* and instead restores the original tags based on the pointer value.
306+
* kasan_mempool_poison_object() and saves an alloc stack trace for it without
307+
* initializing the allocation's memory. For the tag-based modes, this function
308+
* does not assign a new tag to the allocation and instead restores the
309+
* original tags based on the pointer value.
309310
*
310311
* This function operates on all slab allocations including large kmalloc
311312
* allocations (the ones returned by kmalloc_large() or by kmalloc() with the

mm/kasan/common.c

Lines changed: 40 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -277,6 +277,20 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
277277
/* The object will be poisoned by kasan_poison_pages(). */
278278
}
279279

280+
static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
281+
gfp_t flags, bool init)
282+
{
283+
/*
284+
* Unpoison the whole object. For kmalloc() allocations,
285+
* poison_kmalloc_redzone() will do precise poisoning.
286+
*/
287+
kasan_unpoison(object, cache->object_size, init);
288+
289+
/* Save alloc info (if possible) for non-kmalloc() allocations. */
290+
if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
291+
kasan_save_alloc_info(cache, object, flags);
292+
}
293+
280294
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
281295
void *object, gfp_t flags, bool init)
282296
{
@@ -299,15 +313,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
299313
tag = assign_tag(cache, object, false);
300314
tagged_object = set_tag(object, tag);
301315

302-
/*
303-
* Unpoison the whole object.
304-
* For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
305-
*/
306-
kasan_unpoison(tagged_object, cache->object_size, init);
307-
308-
/* Save alloc info (if possible) for non-kmalloc() allocations. */
309-
if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
310-
kasan_save_alloc_info(cache, tagged_object, flags);
316+
/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
317+
unpoison_slab_object(cache, tagged_object, flags, init);
311318

312319
return tagged_object;
313320
}
@@ -482,7 +489,30 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
482489

483490
void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
484491
{
485-
kasan_unpoison(ptr, size, false);
492+
struct slab *slab;
493+
gfp_t flags = 0; /* Might be executing under a lock. */
494+
495+
if (is_kfence_address(kasan_reset_tag(ptr)))
496+
return;
497+
498+
slab = virt_to_slab(ptr);
499+
500+
/*
501+
* This function can be called for large kmalloc allocation that get
502+
* their memory from page_alloc.
503+
*/
504+
if (unlikely(!slab)) {
505+
kasan_unpoison(ptr, size, false);
506+
poison_kmalloc_large_redzone(ptr, size, flags);
507+
return;
508+
}
509+
510+
/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
511+
unpoison_slab_object(slab->slab_cache, ptr, size, flags);
512+
513+
/* Poison the redzone and save alloc info for kmalloc() allocations. */
514+
if (is_kmalloc_cache(slab->slab_cache))
515+
poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
486516
}
487517

488518
bool __kasan_check_byte(const void *address, unsigned long ip)

0 commit comments

Comments
 (0)