Skip to content

Commit 5a50508

Browse files
Ingo MolnarMel Gorman
Ingo Molnar
authored and
Mel Gorman
committed
mm/rmap: Convert the struct anon_vma::mutex to an rwsem
Convert the struct anon_vma::mutex to an rwsem, which will help in solving a page-migration scalability problem. (Addressed in a separate patch.) The conversion is simple and straightforward: in every case where we mutex_lock()ed we'll now down_write(). Suggested-by: Linus Torvalds <[email protected]> Reviewed-by: Rik van Riel <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Paul Turner <[email protected]> Cc: Lee Schermerhorn <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Hugh Dickins <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Signed-off-by: Mel Gorman <[email protected]>
1 parent d28d433 commit 5a50508

File tree

4 files changed

+25
-25
lines changed

4 files changed

+25
-25
lines changed

include/linux/rmap.h

+8-8
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include <linux/list.h>
88
#include <linux/slab.h>
99
#include <linux/mm.h>
10-
#include <linux/mutex.h>
10+
#include <linux/rwsem.h>
1111
#include <linux/memcontrol.h>
1212

1313
/*
@@ -25,8 +25,8 @@
2525
* pointing to this anon_vma once its vma list is empty.
2626
*/
2727
struct anon_vma {
28-
struct anon_vma *root; /* Root of this anon_vma tree */
29-
struct mutex mutex; /* Serialize access to vma list */
28+
struct anon_vma *root; /* Root of this anon_vma tree */
29+
struct rw_semaphore rwsem; /* W: modification, R: walking the list */
3030
/*
3131
* The refcount is taken on an anon_vma when there is no
3232
* guarantee that the vma of page tables will exist for
@@ -64,7 +64,7 @@ struct anon_vma_chain {
6464
struct vm_area_struct *vma;
6565
struct anon_vma *anon_vma;
6666
struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
67-
struct rb_node rb; /* locked by anon_vma->mutex */
67+
struct rb_node rb; /* locked by anon_vma->rwsem */
6868
unsigned long rb_subtree_last;
6969
#ifdef CONFIG_DEBUG_VM_RB
7070
unsigned long cached_vma_start, cached_vma_last;
@@ -108,24 +108,24 @@ static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
108108
{
109109
struct anon_vma *anon_vma = vma->anon_vma;
110110
if (anon_vma)
111-
mutex_lock(&anon_vma->root->mutex);
111+
down_write(&anon_vma->root->rwsem);
112112
}
113113

114114
static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
115115
{
116116
struct anon_vma *anon_vma = vma->anon_vma;
117117
if (anon_vma)
118-
mutex_unlock(&anon_vma->root->mutex);
118+
up_write(&anon_vma->root->rwsem);
119119
}
120120

121121
static inline void anon_vma_lock(struct anon_vma *anon_vma)
122122
{
123-
mutex_lock(&anon_vma->root->mutex);
123+
down_write(&anon_vma->root->rwsem);
124124
}
125125

126126
static inline void anon_vma_unlock(struct anon_vma *anon_vma)
127127
{
128-
mutex_unlock(&anon_vma->root->mutex);
128+
up_write(&anon_vma->root->rwsem);
129129
}
130130

131131
/*

mm/huge_memory.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -1292,7 +1292,7 @@ static int __split_huge_page_splitting(struct page *page,
12921292
* We can't temporarily set the pmd to null in order
12931293
* to split it, the pmd must remain marked huge at all
12941294
* times or the VM won't take the pmd_trans_huge paths
1295-
* and it won't wait on the anon_vma->root->mutex to
1295+
* and it won't wait on the anon_vma->root->rwsem to
12961296
* serialize against split_huge_page*.
12971297
*/
12981298
pmdp_splitting_flush(vma, address, pmd);
@@ -1495,7 +1495,7 @@ static int __split_huge_page_map(struct page *page,
14951495
return ret;
14961496
}
14971497

1498-
/* must be called with anon_vma->root->mutex hold */
1498+
/* must be called with anon_vma->root->rwsem held */
14991499
static void __split_huge_page(struct page *page,
15001500
struct anon_vma *anon_vma)
15011501
{

mm/mmap.c

+4-4
Original file line numberDiff line numberDiff line change
@@ -2561,15 +2561,15 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
25612561
* The LSB of head.next can't change from under us
25622562
* because we hold the mm_all_locks_mutex.
25632563
*/
2564-
mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem);
2564+
down_write(&anon_vma->root->rwsem);
25652565
/*
25662566
* We can safely modify head.next after taking the
2567-
* anon_vma->root->mutex. If some other vma in this mm shares
2567+
* anon_vma->root->rwsem. If some other vma in this mm shares
25682568
* the same anon_vma we won't take it again.
25692569
*
25702570
* No need of atomic instructions here, head.next
25712571
* can't change from under us thanks to the
2572-
* anon_vma->root->mutex.
2572+
* anon_vma->root->rwsem.
25732573
*/
25742574
if (__test_and_set_bit(0, (unsigned long *)
25752575
&anon_vma->root->rb_root.rb_node))
@@ -2671,7 +2671,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
26712671
*
26722672
* No need of atomic instructions here, head.next
26732673
* can't change from under us until we release the
2674-
* anon_vma->root->mutex.
2674+
* anon_vma->root->rwsem.
26752675
*/
26762676
if (!__test_and_clear_bit(0, (unsigned long *)
26772677
&anon_vma->root->rb_root.rb_node))

mm/rmap.c

+11-11
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
* mm->mmap_sem
2525
* page->flags PG_locked (lock_page)
2626
* mapping->i_mmap_mutex
27-
* anon_vma->mutex
27+
* anon_vma->rwsem
2828
* mm->page_table_lock or pte_lock
2929
* zone->lru_lock (in mark_page_accessed, isolate_lru_page)
3030
* swap_lock (in swap_duplicate, swap_info_get)
@@ -37,7 +37,7 @@
3737
* in arch-dependent flush_dcache_mmap_lock,
3838
* within bdi.wb->list_lock in __sync_single_inode)
3939
*
40-
* anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon)
40+
* anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
4141
* ->tasklist_lock
4242
* pte map lock
4343
*/
@@ -103,7 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
103103
* LOCK should suffice since the actual taking of the lock must
104104
* happen _before_ what follows.
105105
*/
106-
if (mutex_is_locked(&anon_vma->root->mutex)) {
106+
if (rwsem_is_locked(&anon_vma->root->rwsem)) {
107107
anon_vma_lock(anon_vma);
108108
anon_vma_unlock(anon_vma);
109109
}
@@ -219,17 +219,17 @@ static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct
219219
struct anon_vma *new_root = anon_vma->root;
220220
if (new_root != root) {
221221
if (WARN_ON_ONCE(root))
222-
mutex_unlock(&root->mutex);
222+
up_write(&root->rwsem);
223223
root = new_root;
224-
mutex_lock(&root->mutex);
224+
down_write(&root->rwsem);
225225
}
226226
return root;
227227
}
228228

229229
static inline void unlock_anon_vma_root(struct anon_vma *root)
230230
{
231231
if (root)
232-
mutex_unlock(&root->mutex);
232+
up_write(&root->rwsem);
233233
}
234234

235235
/*
@@ -349,7 +349,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
349349
/*
350350
* Iterate the list once more, it now only contains empty and unlinked
351351
* anon_vmas, destroy them. Could not do before due to __put_anon_vma()
352-
* needing to acquire the anon_vma->root->mutex.
352+
* needing to write-acquire the anon_vma->root->rwsem.
353353
*/
354354
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
355355
struct anon_vma *anon_vma = avc->anon_vma;
@@ -365,7 +365,7 @@ static void anon_vma_ctor(void *data)
365365
{
366366
struct anon_vma *anon_vma = data;
367367

368-
mutex_init(&anon_vma->mutex);
368+
init_rwsem(&anon_vma->rwsem);
369369
atomic_set(&anon_vma->refcount, 0);
370370
anon_vma->rb_root = RB_ROOT;
371371
}
@@ -457,14 +457,14 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
457457

458458
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
459459
root_anon_vma = ACCESS_ONCE(anon_vma->root);
460-
if (mutex_trylock(&root_anon_vma->mutex)) {
460+
if (down_write_trylock(&root_anon_vma->rwsem)) {
461461
/*
462462
* If the page is still mapped, then this anon_vma is still
463463
* its anon_vma, and holding the mutex ensures that it will
464464
* not go away, see anon_vma_free().
465465
*/
466466
if (!page_mapped(page)) {
467-
mutex_unlock(&root_anon_vma->mutex);
467+
up_write(&root_anon_vma->rwsem);
468468
anon_vma = NULL;
469469
}
470470
goto out;
@@ -1299,7 +1299,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
12991299
/*
13001300
* We need mmap_sem locking, Otherwise VM_LOCKED check makes
13011301
* unstable result and race. Plus, We can't wait here because
1302-
* we now hold anon_vma->mutex or mapping->i_mmap_mutex.
1302+
* we now hold anon_vma->rwsem or mapping->i_mmap_mutex.
13031303
* if trylock failed, the page remain in evictable lru and later
13041304
* vmscan could retry to move the page to unevictable lru if the
13051305
* page is actually mlocked.

0 commit comments

Comments
 (0)