From 276940915f232f8569124811fd8a9524f27f5748 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 27 Aug 2024 04:05:48 +0200 Subject: [PATCH] btrfs: clear defragmented inodes using postorder in btrfs_cleanup_defrag_inodes() btrfs_cleanup_defrag_inodes() is not called frequently, only in remount or unmount, but the way it frees the inodes in fs_info->defrag_inodes is inefficient. Each time it needs to locate first node, remove it, potentially rebalance tree until it's done. This allows to do a conditional reschedule. For cleanups the rbtree_postorder_for_each_entry_safe() iterator is convenient but we can't reschedule and restart iteration because some of the tree nodes would be already freed. The cleanup operation is kmem_cache_free() which will likely take the fast path for most objects so rescheduling should not be necessary. Reviewed-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/defrag.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c index 41d67065d02bd0..89f51252d25cf6 100644 --- a/fs/btrfs/defrag.c +++ b/fs/btrfs/defrag.c @@ -212,20 +212,14 @@ static struct inode_defrag *btrfs_pick_defrag_inode( void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info) { - struct inode_defrag *defrag; - struct rb_node *node; + struct inode_defrag *defrag, *next; spin_lock(&fs_info->defrag_inodes_lock); - node = rb_first(&fs_info->defrag_inodes); - while (node) { - rb_erase(node, &fs_info->defrag_inodes); - defrag = rb_entry(node, struct inode_defrag, rb_node); - kmem_cache_free(btrfs_inode_defrag_cachep, defrag); - cond_resched_lock(&fs_info->defrag_inodes_lock); + rbtree_postorder_for_each_entry_safe(defrag, next, + &fs_info->defrag_inodes, rb_node) + kmem_cache_free(btrfs_inode_defrag_cachep, defrag); - node = rb_first(&fs_info->defrag_inodes); - } spin_unlock(&fs_info->defrag_inodes_lock); }