Skip to content

Commit 0cc9fdb

Browse files
xairyakpm00
authored andcommitted
kasan: introduce poison_kmalloc_large_redzone
Split out a poison_kmalloc_large_redzone helper from __kasan_kmalloc_large and use it in the caller's code. This is a preparatory change for the following patches in this series. Link: https://lkml.kernel.org/r/93317097b668519d76097fb065201b2027436e22.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <[email protected]> Cc: Alexander Lobakin <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Breno Leitao <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Evgenii Stepanov <[email protected]> Cc: Marco Elver <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent ce37eec commit 0cc9fdb

File tree

1 file changed

+23
-18
lines changed

1 file changed

+23
-18
lines changed

mm/kasan/common.c

Lines changed: 23 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -363,23 +363,12 @@ void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object
363363
}
364364
EXPORT_SYMBOL(__kasan_kmalloc);
365365

366-
void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
366+
static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
367367
gfp_t flags)
368368
{
369369
unsigned long redzone_start;
370370
unsigned long redzone_end;
371371

372-
if (gfpflags_allow_blocking(flags))
373-
kasan_quarantine_reduce();
374-
375-
if (unlikely(ptr == NULL))
376-
return NULL;
377-
378-
/*
379-
* The object has already been unpoisoned by kasan_unpoison_pages() for
380-
* alloc_pages() or by kasan_krealloc() for krealloc().
381-
*/
382-
383372
/*
384373
* The redzone has byte-level precision for the generic mode.
385374
* Partially poison the last object granule to cover the unaligned
@@ -389,19 +378,35 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
389378
kasan_poison_last_granule(ptr, size);
390379

391380
/* Poison the aligned part of the redzone. */
392-
redzone_start = round_up((unsigned long)(ptr + size),
393-
KASAN_GRANULE_SIZE);
381+
redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
394382
redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
395383
kasan_poison((void *)redzone_start, redzone_end - redzone_start,
396384
KASAN_PAGE_REDZONE, false);
385+
}
397386

387+
void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
388+
gfp_t flags)
389+
{
390+
if (gfpflags_allow_blocking(flags))
391+
kasan_quarantine_reduce();
392+
393+
if (unlikely(ptr == NULL))
394+
return NULL;
395+
396+
/* The object has already been unpoisoned by kasan_unpoison_pages(). */
397+
poison_kmalloc_large_redzone(ptr, size, flags);
398+
399+
/* Keep the tag that was set by alloc_pages(). */
398400
return (void *)ptr;
399401
}
400402

401403
void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
402404
{
403405
struct slab *slab;
404406

407+
if (gfpflags_allow_blocking(flags))
408+
kasan_quarantine_reduce();
409+
405410
if (unlikely(object == ZERO_SIZE_PTR))
406411
return (void *)object;
407412

@@ -419,11 +424,11 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
419424

420425
/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
421426
if (unlikely(!slab))
422-
return __kasan_kmalloc_large(object, size, flags);
423-
else {
427+
poison_kmalloc_large_redzone(object, size, flags);
428+
else
424429
poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
425-
return (void *)object;
426-
}
430+
431+
return (void *)object;
427432
}
428433

429434
bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,

0 commit comments

Comments
 (0)