]> git.itanic.dy.fi Git - linux-stable/commitdiff
ext4: avoid unnecessary spreading of allocations among groups
authorJan Kara <jack@suse.cz>
Thu, 8 Sep 2022 09:21:25 +0000 (11:21 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Sep 2022 09:32:28 +0000 (11:32 +0200)
commit 1940265ede6683f6317cba0d428ce6505eaca944 upstream.

mb_set_largest_free_order() updates lists containing groups with largest
chunk of free space of given order. The way it updates it leads to
always moving the group to the tail of the list. Thus allocations
looking for free space of given order effectively end up cycling through
all groups (and due to initialization in last to first order). This
spreads allocations among block groups which reduces performance for
rotating disks or low-end flash media. Change
mb_set_largest_free_order() to only update lists if the order of the
largest free chunk in the group changed.

Fixes: 196e402adf2e ("ext4: improve cr 0 / cr 1 group scanning")
CC: stable@kernel.org
Reported-and-tested-by: Stefan Wahren <stefan.wahren@i2se.com>
Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/all/0d81a7c2-46b7-6010-62a4-3e6cfc1628d6@i2se.com/
Link: https://lore.kernel.org/r/20220908092136.11770-2-jack@suse.cz
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/ext4/mballoc.c

index 7d5e66fded0e55d9531026b4746a4765a3fe5c2a..da84f3456fbb584ab261aab31a853645c3b2a1a7 100644 (file)
@@ -1077,23 +1077,25 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        int i;
 
-       if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) {
+       for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
+               if (grp->bb_counters[i] > 0)
+                       break;
+       /* No need to move between order lists? */
+       if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
+           i == grp->bb_largest_free_order) {
+               grp->bb_largest_free_order = i;
+               return;
+       }
+
+       if (grp->bb_largest_free_order >= 0) {
                write_lock(&sbi->s_mb_largest_free_orders_locks[
                                              grp->bb_largest_free_order]);
                list_del_init(&grp->bb_largest_free_order_node);
                write_unlock(&sbi->s_mb_largest_free_orders_locks[
                                              grp->bb_largest_free_order]);
        }
-       grp->bb_largest_free_order = -1; /* uninit */
-
-       for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) {
-               if (grp->bb_counters[i] > 0) {
-                       grp->bb_largest_free_order = i;
-                       break;
-               }
-       }
-       if (test_opt2(sb, MB_OPTIMIZE_SCAN) &&
-           grp->bb_largest_free_order >= 0 && grp->bb_free) {
+       grp->bb_largest_free_order = i;
+       if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
                write_lock(&sbi->s_mb_largest_free_orders_locks[
                                              grp->bb_largest_free_order]);
                list_add_tail(&grp->bb_largest_free_order_node,