Skip to content

Commit 7a2e823

Browse files
MrVangregkh
authored andcommitted
mm, slub: avoid zeroing kmalloc redzone
commit 59090e4 upstream. Since commit 946fa0d ("mm/slub: extend redzone check to extra allocated kmalloc space than requested"), setting orig_size treats the wasted space (object_size - orig_size) as a redzone. However with init_on_free=1 we clear the full object->size, including the redzone. Additionally we clear the object metadata, including the stored orig_size, making it zero, which makes check_object() treat the whole object as a redzone. These issues lead to the following BUG report with "slub_debug=FUZ init_on_free=1": [ 0.000000] ============================================================================= [ 0.000000] BUG kmalloc-8 (Not tainted): kmalloc Redzone overwritten [ 0.000000] ----------------------------------------------------------------------------- [ 0.000000] [ 0.000000] 0xffff000010032858-0xffff00001003285f @offset=2136. First byte 0x0 instead of 0xcc [ 0.000000] FIX kmalloc-8: Restoring kmalloc Redzone 0xffff000010032858-0xffff00001003285f=0xcc [ 0.000000] Slab 0xfffffdffc0400c80 objects=36 used=23 fp=0xffff000010032a18 flags=0x3fffe0000000200(workingset|node=0|zone=0|lastcpupid=0x1ffff) [ 0.000000] Object 0xffff000010032858 @offset=2136 fp=0xffff0000100328c8 [ 0.000000] [ 0.000000] Redzone ffff000010032850: cc cc cc cc cc cc cc cc ........ [ 0.000000] Object ffff000010032858: cc cc cc cc cc cc cc cc ........ [ 0.000000] Redzone ffff000010032860: cc cc cc cc cc cc cc cc ........ [ 0.000000] Padding ffff0000100328b4: 00 00 00 00 00 00 00 00 00 00 00 00 ............ [ 0.000000] CPU: 0 UID: 0 PID: 0 Comm: swapper/0 Not tainted 6.11.0-rc3-next-20240814-00004-g61844c55c3f4 torvalds#144 [ 0.000000] Hardware name: NXP i.MX95 19X19 board (DT) [ 0.000000] Call trace: [ 0.000000] dump_backtrace+0x90/0xe8 [ 0.000000] show_stack+0x18/0x24 [ 0.000000] dump_stack_lvl+0x74/0x8c [ 0.000000] dump_stack+0x18/0x24 [ 0.000000] print_trailer+0x150/0x218 [ 0.000000] check_object+0xe4/0x454 [ 0.000000] free_to_partial_list+0x2f8/0x5ec To address the issue, use orig_size to clear the used area. And restore the value of orig_size after clear the remaining area. When CONFIG_SLUB_DEBUG not defined, (get_orig_size()' directly returns s->object_size. So when using memset to init the area, the size can simply be orig_size, as orig_size returns object_size when CONFIG_SLUB_DEBUG not enabled. And orig_size can never be bigger than object_size. Fixes: 946fa0d ("mm/slub: extend redzone check to extra allocated kmalloc space than requested") Cc: <[email protected]> Reviewed-by: Feng Tang <[email protected]> Acked-by: David Rientjes <[email protected]> Signed-off-by: Peng Fan <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent b883182 commit 7a2e823

File tree

1 file changed

+53
-47
lines changed

1 file changed

+53
-47
lines changed

mm/slub.c

+53-47
Original file line numberDiff line numberDiff line change
@@ -756,6 +756,50 @@ static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
756756
return false;
757757
}
758758

759+
/*
760+
* kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
761+
* family will round up the real request size to these fixed ones, so
762+
* there could be an extra area than what is requested. Save the original
763+
* request size in the meta data area, for better debug and sanity check.
764+
*/
765+
static inline void set_orig_size(struct kmem_cache *s,
766+
void *object, unsigned int orig_size)
767+
{
768+
void *p = kasan_reset_tag(object);
769+
unsigned int kasan_meta_size;
770+
771+
if (!slub_debug_orig_size(s))
772+
return;
773+
774+
/*
775+
* KASAN can save its free meta data inside of the object at offset 0.
776+
* If this meta data size is larger than 'orig_size', it will overlap
777+
* the data redzone in [orig_size+1, object_size]. Thus, we adjust
778+
* 'orig_size' to be as at least as big as KASAN's meta data.
779+
*/
780+
kasan_meta_size = kasan_metadata_size(s, true);
781+
if (kasan_meta_size > orig_size)
782+
orig_size = kasan_meta_size;
783+
784+
p += get_info_end(s);
785+
p += sizeof(struct track) * 2;
786+
787+
*(unsigned int *)p = orig_size;
788+
}
789+
790+
static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
791+
{
792+
void *p = kasan_reset_tag(object);
793+
794+
if (!slub_debug_orig_size(s))
795+
return s->object_size;
796+
797+
p += get_info_end(s);
798+
p += sizeof(struct track) * 2;
799+
800+
return *(unsigned int *)p;
801+
}
802+
759803
#ifdef CONFIG_SLUB_DEBUG
760804
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
761805
static DEFINE_SPINLOCK(object_map_lock);
@@ -969,50 +1013,6 @@ static void print_slab_info(const struct slab *slab)
9691013
folio_flags(folio, 0));
9701014
}
9711015

972-
/*
973-
* kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
974-
* family will round up the real request size to these fixed ones, so
975-
* there could be an extra area than what is requested. Save the original
976-
* request size in the meta data area, for better debug and sanity check.
977-
*/
978-
static inline void set_orig_size(struct kmem_cache *s,
979-
void *object, unsigned int orig_size)
980-
{
981-
void *p = kasan_reset_tag(object);
982-
unsigned int kasan_meta_size;
983-
984-
if (!slub_debug_orig_size(s))
985-
return;
986-
987-
/*
988-
* KASAN can save its free meta data inside of the object at offset 0.
989-
* If this meta data size is larger than 'orig_size', it will overlap
990-
* the data redzone in [orig_size+1, object_size]. Thus, we adjust
991-
* 'orig_size' to be as at least as big as KASAN's meta data.
992-
*/
993-
kasan_meta_size = kasan_metadata_size(s, true);
994-
if (kasan_meta_size > orig_size)
995-
orig_size = kasan_meta_size;
996-
997-
p += get_info_end(s);
998-
p += sizeof(struct track) * 2;
999-
1000-
*(unsigned int *)p = orig_size;
1001-
}
1002-
1003-
static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
1004-
{
1005-
void *p = kasan_reset_tag(object);
1006-
1007-
if (!slub_debug_orig_size(s))
1008-
return s->object_size;
1009-
1010-
p += get_info_end(s);
1011-
p += sizeof(struct track) * 2;
1012-
1013-
return *(unsigned int *)p;
1014-
}
1015-
10161016
void skip_orig_size_check(struct kmem_cache *s, const void *object)
10171017
{
10181018
set_orig_size(s, (void *)object, s->object_size);
@@ -1859,7 +1859,6 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
18591859
int objects) {}
18601860
static inline void dec_slabs_node(struct kmem_cache *s, int node,
18611861
int objects) {}
1862-
18631862
#ifndef CONFIG_SLUB_TINY
18641863
static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
18651864
void **freelist, void *nextfree)
@@ -2187,14 +2186,21 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
21872186
*/
21882187
if (unlikely(init)) {
21892188
int rsize;
2190-
unsigned int inuse;
2189+
unsigned int inuse, orig_size;
21912190

21922191
inuse = get_info_end(s);
2192+
orig_size = get_orig_size(s, x);
21932193
if (!kasan_has_integrated_init())
2194-
memset(kasan_reset_tag(x), 0, s->object_size);
2194+
memset(kasan_reset_tag(x), 0, orig_size);
21952195
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
21962196
memset((char *)kasan_reset_tag(x) + inuse, 0,
21972197
s->size - inuse - rsize);
2198+
/*
2199+
* Restore orig_size, otherwize kmalloc redzone overwritten
2200+
* would be reported
2201+
*/
2202+
set_orig_size(s, x, orig_size);
2203+
21982204
}
21992205
/* KASAN might put x into memory quarantine, delaying its reuse. */
22002206
return !kasan_slab_free(s, x, init);

0 commit comments

Comments
 (0)