btrfs: raid56: reduce indentation in lock_stripe_add
authorJohannes Thumshirn <jthumshirn@suse.de>
Fri, 18 Oct 2019 09:58:20 +0000 (11:58 +0200)
committerDavid Sterba <dsterba@suse.com>
Mon, 18 Nov 2019 11:47:00 +0000 (12:47 +0100)
In lock_stripe_add() we're traversing the stripe hash list and check if
the current list element's raid_map equals is equal to the raid bio's
raid_map. If both are equal we continue processing.

If we'd check for inequality instead of equality we can reduce one level
of indentation.

Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/raid56.c

index 8f47a85944eb7883d6c165988ccb1e668b1de173..d3fc55f8846e11c2b5d1e2ad6071a12a1ac150ab 100644 (file)
@@ -682,62 +682,59 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
 
        spin_lock_irqsave(&h->lock, flags);
        list_for_each_entry(cur, &h->hash_list, hash_list) {
-               if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
-                       spin_lock(&cur->bio_list_lock);
-
-                       /* can we steal this cached rbio's pages? */
-                       if (bio_list_empty(&cur->bio_list) &&
-                           list_empty(&cur->plug_list) &&
-                           test_bit(RBIO_CACHE_BIT, &cur->flags) &&
-                           !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
-                               list_del_init(&cur->hash_list);
-                               refcount_dec(&cur->refs);
-
-                               steal_rbio(cur, rbio);
-                               cache_drop = cur;
-                               spin_unlock(&cur->bio_list_lock);
+               if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
+                       continue;
 
-                               goto lockit;
-                       }
+               spin_lock(&cur->bio_list_lock);
 
-                       /* can we merge into the lock owner? */
-                       if (rbio_can_merge(cur, rbio)) {
-                               merge_rbio(cur, rbio);
-                               spin_unlock(&cur->bio_list_lock);
-                               freeit = rbio;
-                               ret = 1;
-                               goto out;
-                       }
+               /* Can we steal this cached rbio's pages? */
+               if (bio_list_empty(&cur->bio_list) &&
+                   list_empty(&cur->plug_list) &&
+                   test_bit(RBIO_CACHE_BIT, &cur->flags) &&
+                   !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
+                       list_del_init(&cur->hash_list);
+                       refcount_dec(&cur->refs);
 
+                       steal_rbio(cur, rbio);
+                       cache_drop = cur;
+                       spin_unlock(&cur->bio_list_lock);
 
-                       /*
-                        * we couldn't merge with the running
-                        * rbio, see if we can merge with the
-                        * pending ones.  We don't have to
-                        * check for rmw_locked because there
-                        * is no way they are inside finish_rmw
-                        * right now
-                        */
-                       list_for_each_entry(pending, &cur->plug_list,
-                                           plug_list) {
-                               if (rbio_can_merge(pending, rbio)) {
-                                       merge_rbio(pending, rbio);
-                                       spin_unlock(&cur->bio_list_lock);
-                                       freeit = rbio;
-                                       ret = 1;
-                                       goto out;
-                               }
-                       }
+                       goto lockit;
+               }
 
-                       /* no merging, put us on the tail of the plug list,
-                        * our rbio will be started with the currently
-                        * running rbio unlocks
-                        */
-                       list_add_tail(&rbio->plug_list, &cur->plug_list);
+               /* Can we merge into the lock owner? */
+               if (rbio_can_merge(cur, rbio)) {
+                       merge_rbio(cur, rbio);
                        spin_unlock(&cur->bio_list_lock);
+                       freeit = rbio;
                        ret = 1;
                        goto out;
                }
+
+
+               /*
+                * We couldn't merge with the running rbio, see if we can merge
+                * with the pending ones.  We don't have to check for rmw_locked
+                * because there is no way they are inside finish_rmw right now
+                */
+               list_for_each_entry(pending, &cur->plug_list, plug_list) {
+                       if (rbio_can_merge(pending, rbio)) {
+                               merge_rbio(pending, rbio);
+                               spin_unlock(&cur->bio_list_lock);
+                               freeit = rbio;
+                               ret = 1;
+                               goto out;
+                       }
+               }
+
+               /*
+                * No merging, put us on the tail of the plug list, our rbio
+                * will be started with the currently running rbio unlocks
+                */
+               list_add_tail(&rbio->plug_list, &cur->plug_list);
+               spin_unlock(&cur->bio_list_lock);
+               ret = 1;
+               goto out;
        }
 lockit:
        refcount_inc(&rbio->refs);