Skip to content

Commit 69cb8e6

Browse files
Christoph Lameterpenberg
Christoph Lameter
authored andcommitted
slub: free slabs without holding locks
There are two situations in which slub holds a lock while releasing pages: A. During kmem_cache_shrink() B. During kmem_cache_close() For A build a list while holding the lock and then release the pages later. In case of B we are the last remaining user of the slab so there is no need to take the listlock. After this patch all calls to the page allocator to free pages are done without holding any spinlocks. kmem_cache_destroy() will still hold the slub_lock semaphore. Signed-off-by: Christoph Lameter <[email protected]> Signed-off-by: Pekka Enberg <[email protected]>
1 parent 93ee7a9 commit 69cb8e6

File tree

1 file changed

+13
-13
lines changed

1 file changed

+13
-13
lines changed

mm/slub.c

+13-13
Original file line numberDiff line numberDiff line change
@@ -2970,13 +2970,13 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
29702970

29712971
/*
29722972
* Attempt to free all partial slabs on a node.
2973+
* This is called from kmem_cache_close(). We must be the last thread
2974+
* using the cache and therefore we do not need to lock anymore.
29732975
*/
29742976
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
29752977
{
2976-
unsigned long flags;
29772978
struct page *page, *h;
29782979

2979-
spin_lock_irqsave(&n->list_lock, flags);
29802980
list_for_each_entry_safe(page, h, &n->partial, lru) {
29812981
if (!page->inuse) {
29822982
remove_partial(n, page);
@@ -2986,7 +2986,6 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
29862986
"Objects remaining on kmem_cache_close()");
29872987
}
29882988
}
2989-
spin_unlock_irqrestore(&n->list_lock, flags);
29902989
}
29912990

29922991
/*
@@ -3020,6 +3019,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
30203019
s->refcount--;
30213020
if (!s->refcount) {
30223021
list_del(&s->list);
3022+
up_write(&slub_lock);
30233023
if (kmem_cache_close(s)) {
30243024
printk(KERN_ERR "SLUB %s: %s called for cache that "
30253025
"still has objects.\n", s->name, __func__);
@@ -3028,8 +3028,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
30283028
if (s->flags & SLAB_DESTROY_BY_RCU)
30293029
rcu_barrier();
30303030
sysfs_slab_remove(s);
3031-
}
3032-
up_write(&slub_lock);
3031+
} else
3032+
up_write(&slub_lock);
30333033
}
30343034
EXPORT_SYMBOL(kmem_cache_destroy);
30353035

@@ -3347,23 +3347,23 @@ int kmem_cache_shrink(struct kmem_cache *s)
33473347
* list_lock. page->inuse here is the upper limit.
33483348
*/
33493349
list_for_each_entry_safe(page, t, &n->partial, lru) {
3350-
if (!page->inuse) {
3351-
remove_partial(n, page);
3352-
discard_slab(s, page);
3353-
} else {
3354-
list_move(&page->lru,
3355-
slabs_by_inuse + page->inuse);
3356-
}
3350+
list_move(&page->lru, slabs_by_inuse + page->inuse);
3351+
if (!page->inuse)
3352+
n->nr_partial--;
33573353
}
33583354

33593355
/*
33603356
* Rebuild the partial list with the slabs filled up most
33613357
* first and the least used slabs at the end.
33623358
*/
3363-
for (i = objects - 1; i >= 0; i--)
3359+
for (i = objects - 1; i > 0; i--)
33643360
list_splice(slabs_by_inuse + i, n->partial.prev);
33653361

33663362
spin_unlock_irqrestore(&n->list_lock, flags);
3363+
3364+
/* Release empty slabs */
3365+
list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3366+
discard_slab(s, page);
33673367
}
33683368

33693369
kfree(slabs_by_inuse);

0 commit comments

Comments
 (0)