24
24
* mm->mmap_sem
25
25
* page->flags PG_locked (lock_page)
26
26
* mapping->i_mmap_mutex
27
- * anon_vma->mutex
27
+ * anon_vma->rwsem
28
28
* mm->page_table_lock or pte_lock
29
29
* zone->lru_lock (in mark_page_accessed, isolate_lru_page)
30
30
* swap_lock (in swap_duplicate, swap_info_get)
37
37
* in arch-dependent flush_dcache_mmap_lock,
38
38
* within bdi.wb->list_lock in __sync_single_inode)
39
39
*
40
- * anon_vma->mutex ,mapping->i_mutex (memory_failure, collect_procs_anon)
40
+ * anon_vma->rwsem ,mapping->i_mutex (memory_failure, collect_procs_anon)
41
41
* ->tasklist_lock
42
42
* pte map lock
43
43
*/
@@ -103,7 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
103
103
* LOCK should suffice since the actual taking of the lock must
104
104
* happen _before_ what follows.
105
105
*/
106
- if (mutex_is_locked (& anon_vma -> root -> mutex )) {
106
+ if (rwsem_is_locked (& anon_vma -> root -> rwsem )) {
107
107
anon_vma_lock (anon_vma );
108
108
anon_vma_unlock (anon_vma );
109
109
}
@@ -219,17 +219,17 @@ static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct
219
219
struct anon_vma * new_root = anon_vma -> root ;
220
220
if (new_root != root ) {
221
221
if (WARN_ON_ONCE (root ))
222
- mutex_unlock (& root -> mutex );
222
+ up_write (& root -> rwsem );
223
223
root = new_root ;
224
- mutex_lock (& root -> mutex );
224
+ down_write (& root -> rwsem );
225
225
}
226
226
return root ;
227
227
}
228
228
229
229
static inline void unlock_anon_vma_root (struct anon_vma * root )
230
230
{
231
231
if (root )
232
- mutex_unlock (& root -> mutex );
232
+ up_write (& root -> rwsem );
233
233
}
234
234
235
235
/*
@@ -349,7 +349,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
349
349
/*
350
350
* Iterate the list once more, it now only contains empty and unlinked
351
351
* anon_vmas, destroy them. Could not do before due to __put_anon_vma()
352
- * needing to acquire the anon_vma->root->mutex .
352
+ * needing to write- acquire the anon_vma->root->rwsem .
353
353
*/
354
354
list_for_each_entry_safe (avc , next , & vma -> anon_vma_chain , same_vma ) {
355
355
struct anon_vma * anon_vma = avc -> anon_vma ;
@@ -365,7 +365,7 @@ static void anon_vma_ctor(void *data)
365
365
{
366
366
struct anon_vma * anon_vma = data ;
367
367
368
- mutex_init (& anon_vma -> mutex );
368
+ init_rwsem (& anon_vma -> rwsem );
369
369
atomic_set (& anon_vma -> refcount , 0 );
370
370
anon_vma -> rb_root = RB_ROOT ;
371
371
}
@@ -457,14 +457,14 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
457
457
458
458
anon_vma = (struct anon_vma * ) (anon_mapping - PAGE_MAPPING_ANON );
459
459
root_anon_vma = ACCESS_ONCE (anon_vma -> root );
460
- if (mutex_trylock (& root_anon_vma -> mutex )) {
460
+ if (down_write_trylock (& root_anon_vma -> rwsem )) {
461
461
/*
462
462
* If the page is still mapped, then this anon_vma is still
463
463
* its anon_vma, and holding the mutex ensures that it will
464
464
* not go away, see anon_vma_free().
465
465
*/
466
466
if (!page_mapped (page )) {
467
- mutex_unlock (& root_anon_vma -> mutex );
467
+ up_write (& root_anon_vma -> rwsem );
468
468
anon_vma = NULL ;
469
469
}
470
470
goto out ;
@@ -1299,7 +1299,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1299
1299
/*
1300
1300
* We need mmap_sem locking, Otherwise VM_LOCKED check makes
1301
1301
* unstable result and race. Plus, We can't wait here because
1302
- * we now hold anon_vma->mutex or mapping->i_mmap_mutex.
1302
+ * we now hold anon_vma->rwsem or mapping->i_mmap_mutex.
1303
1303
* if trylock failed, the page remain in evictable lru and later
1304
1304
* vmscan could retry to move the page to unevictable lru if the
1305
1305
* page is actually mlocked.
0 commit comments