Skip to content

Commit 52b4b95

Browse files
0x7f454c46torvalds
authored andcommitted
mm: slab: free kmem_cache_node after destroy sysfs file
When slub_debug alloc_calls_show is enabled we will try to track location and user of slab object on each online node, kmem_cache_node structure and cpu_cache/cpu_slub shouldn't be freed till there is the last reference to sysfs file. This fixes the following panic: BUG: unable to handle kernel NULL pointer dereference at 0000000000000020 IP: list_locations+0x169/0x4e0 PGD 257304067 PUD 438456067 PMD 0 Oops: 0000 [rib#1] SMP CPU: 3 PID: 973074 Comm: cat ve: 0 Not tainted 3.10.0-229.7.2.ovz.9.30-00007-japdoll-dirty rib#2 9.30 Hardware name: DEPO Computers To Be Filled By O.E.M./H67DE3, BIOS L1.60c 07/14/2011 task: ffff88042a5dc5b0 ti: ffff88037f8d8000 task.ti: ffff88037f8d8000 RIP: list_locations+0x169/0x4e0 Call Trace: alloc_calls_show+0x1d/0x30 slab_attr_show+0x1b/0x30 sysfs_read_file+0x9a/0x1a0 vfs_read+0x9c/0x170 SyS_read+0x58/0xb0 system_call_fastpath+0x16/0x1b Code: 5e 07 12 00 b9 00 04 00 00 3d 00 04 00 00 0f 4f c1 3d 00 04 00 00 89 45 b0 0f 84 c3 00 00 00 48 63 45 b0 49 8b 9c c4 f8 00 00 00 <48> 8b 43 20 48 85 c0 74 b6 48 89 df e8 46 37 44 00 48 8b 53 10 CR2: 0000000000000020 Separated __kmem_cache_release from __kmem_cache_shutdown which now called on slab_kmem_cache_release (after the last reference to sysfs file object has dropped). Reintroduced locking in free_partial as sysfs file might access cache's partial list after shutdowning - partial revert of the commit 69cb8e6 ("slub: free slabs without holding locks"). Zap __remove_partial and use remove_partial (w/o underscores) as free_partial now takes list_lock which s partial revert for commit 1e4dd94 ("slub: do not assert not having lock in removing freed partial") Signed-off-by: Dmitry Safonov <[email protected]> Suggested-by: Vladimir Davydov <[email protected]> Acked-by: Vladimir Davydov <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 1ac0b6d commit 52b4b95

File tree

5 files changed

+29
-27
lines changed

5 files changed

+29
-27
lines changed

mm/slab.c

+6-6
Original file line numberDiff line numberDiff line change
@@ -2275,7 +2275,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
22752275

22762276
err = setup_cpu_cache(cachep, gfp);
22772277
if (err) {
2278-
__kmem_cache_shutdown(cachep);
2278+
__kmem_cache_release(cachep);
22792279
return err;
22802280
}
22812281

@@ -2413,13 +2413,14 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
24132413
}
24142414

24152415
int __kmem_cache_shutdown(struct kmem_cache *cachep)
2416+
{
2417+
return __kmem_cache_shrink(cachep, false);
2418+
}
2419+
2420+
void __kmem_cache_release(struct kmem_cache *cachep)
24162421
{
24172422
int i;
24182423
struct kmem_cache_node *n;
2419-
int rc = __kmem_cache_shrink(cachep, false);
2420-
2421-
if (rc)
2422-
return rc;
24232424

24242425
free_percpu(cachep->cpu_cache);
24252426

@@ -2430,7 +2431,6 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
24302431
kfree(n);
24312432
cachep->node[i] = NULL;
24322433
}
2433-
return 0;
24342434
}
24352435

24362436
/*

mm/slab.h

+1
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
140140
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
141141

142142
int __kmem_cache_shutdown(struct kmem_cache *);
143+
void __kmem_cache_release(struct kmem_cache *);
143144
int __kmem_cache_shrink(struct kmem_cache *, bool);
144145
void slab_kmem_cache_release(struct kmem_cache *);
145146

mm/slab_common.c

+1
Original file line numberDiff line numberDiff line change
@@ -693,6 +693,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s,
693693

694694
void slab_kmem_cache_release(struct kmem_cache *s)
695695
{
696+
__kmem_cache_release(s);
696697
destroy_memcg_params(s);
697698
kfree_const(s->name);
698699
kmem_cache_free(kmem_cache, s);

mm/slob.c

+4
Original file line numberDiff line numberDiff line change
@@ -630,6 +630,10 @@ int __kmem_cache_shutdown(struct kmem_cache *c)
630630
return 0;
631631
}
632632

633+
void __kmem_cache_release(struct kmem_cache *c)
634+
{
635+
}
636+
633637
int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
634638
{
635639
return 0;

mm/slub.c

+17-21
Original file line numberDiff line numberDiff line change
@@ -1592,18 +1592,12 @@ static inline void add_partial(struct kmem_cache_node *n,
15921592
__add_partial(n, page, tail);
15931593
}
15941594

1595-
static inline void
1596-
__remove_partial(struct kmem_cache_node *n, struct page *page)
1597-
{
1598-
list_del(&page->lru);
1599-
n->nr_partial--;
1600-
}
1601-
16021595
static inline void remove_partial(struct kmem_cache_node *n,
16031596
struct page *page)
16041597
{
16051598
lockdep_assert_held(&n->list_lock);
1606-
__remove_partial(n, page);
1599+
list_del(&page->lru);
1600+
n->nr_partial--;
16071601
}
16081602

16091603
/*
@@ -3184,6 +3178,12 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
31843178
}
31853179
}
31863180

3181+
void __kmem_cache_release(struct kmem_cache *s)
3182+
{
3183+
free_percpu(s->cpu_slab);
3184+
free_kmem_cache_nodes(s);
3185+
}
3186+
31873187
static int init_kmem_cache_nodes(struct kmem_cache *s)
31883188
{
31893189
int node;
@@ -3443,28 +3443,31 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
34433443

34443444
/*
34453445
* Attempt to free all partial slabs on a node.
3446-
* This is called from kmem_cache_close(). We must be the last thread
3447-
* using the cache and therefore we do not need to lock anymore.
3446+
* This is called from __kmem_cache_shutdown(). We must take list_lock
3447+
* because sysfs file might still access partial list after the shutdowning.
34483448
*/
34493449
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
34503450
{
34513451
struct page *page, *h;
34523452

3453+
BUG_ON(irqs_disabled());
3454+
spin_lock_irq(&n->list_lock);
34533455
list_for_each_entry_safe(page, h, &n->partial, lru) {
34543456
if (!page->inuse) {
3455-
__remove_partial(n, page);
3457+
remove_partial(n, page);
34563458
discard_slab(s, page);
34573459
} else {
34583460
list_slab_objects(s, page,
3459-
"Objects remaining in %s on kmem_cache_close()");
3461+
"Objects remaining in %s on __kmem_cache_shutdown()");
34603462
}
34613463
}
3464+
spin_unlock_irq(&n->list_lock);
34623465
}
34633466

34643467
/*
34653468
* Release all resources used by a slab cache.
34663469
*/
3467-
static inline int kmem_cache_close(struct kmem_cache *s)
3470+
int __kmem_cache_shutdown(struct kmem_cache *s)
34683471
{
34693472
int node;
34703473
struct kmem_cache_node *n;
@@ -3476,16 +3479,9 @@ static inline int kmem_cache_close(struct kmem_cache *s)
34763479
if (n->nr_partial || slabs_node(s, node))
34773480
return 1;
34783481
}
3479-
free_percpu(s->cpu_slab);
3480-
free_kmem_cache_nodes(s);
34813482
return 0;
34823483
}
34833484

3484-
int __kmem_cache_shutdown(struct kmem_cache *s)
3485-
{
3486-
return kmem_cache_close(s);
3487-
}
3488-
34893485
/********************************************************************
34903486
* Kmalloc subsystem
34913487
*******************************************************************/
@@ -3980,7 +3976,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
39803976
memcg_propagate_slab_attrs(s);
39813977
err = sysfs_slab_add(s);
39823978
if (err)
3983-
kmem_cache_close(s);
3979+
__kmem_cache_release(s);
39843980

39853981
return err;
39863982
}

0 commit comments

Comments
 (0)