btrfs: scrub: use bool for flush_all_writes
authorDavid Sterba <dsterba@suse.com>
Fri, 31 Mar 2017 15:12:51 +0000 (17:12 +0200)
committerDavid Sterba <dsterba@suse.com>
Mon, 21 Aug 2017 15:47:42 +0000 (17:47 +0200)
flush_all_writes is an atomic but does not use the semantics at all,
it's just on/off indicator, we can use bool.

Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/scrub.c

index 20c1ba19e66564887b97886d30411a5eafc27ec2..810fce2404898c32e102cc5c5b29b6d81c71f942 100644 (file)
@@ -182,8 +182,8 @@ struct scrub_ctx {
        struct scrub_bio        *wr_curr_bio;
        struct mutex            wr_lock;
        int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
-       atomic_t                flush_all_writes;
        struct btrfs_device     *wr_tgtdev;
+       bool                    flush_all_writes;
 
        /*
         * statistics
@@ -717,7 +717,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
                WARN_ON(!fs_info->dev_replace.tgtdev);
                sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
                sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
-               atomic_set(&sctx->flush_all_writes, 0);
+               sctx->flush_all_writes = false;
        }
 
        return sctx;
@@ -2402,8 +2402,7 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
 
        scrub_block_put(sblock);
 
-       if (sctx->is_dev_replace &&
-           atomic_read(&sctx->flush_all_writes)) {
+       if (sctx->is_dev_replace && sctx->flush_all_writes) {
                mutex_lock(&sctx->wr_lock);
                scrub_wr_submit(sctx);
                mutex_unlock(&sctx->wr_lock);
@@ -2607,8 +2606,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
        sctx->first_free = sbio->index;
        spin_unlock(&sctx->list_lock);
 
-       if (sctx->is_dev_replace &&
-           atomic_read(&sctx->flush_all_writes)) {
+       if (sctx->is_dev_replace && sctx->flush_all_writes) {
                mutex_lock(&sctx->wr_lock);
                scrub_wr_submit(sctx);
                mutex_unlock(&sctx->wr_lock);
@@ -3440,14 +3438,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
                 */
                if (atomic_read(&fs_info->scrub_pause_req)) {
                        /* push queued extents */
-                       atomic_set(&sctx->flush_all_writes, 1);
+                       sctx->flush_all_writes = true;
                        scrub_submit(sctx);
                        mutex_lock(&sctx->wr_lock);
                        scrub_wr_submit(sctx);
                        mutex_unlock(&sctx->wr_lock);
                        wait_event(sctx->list_wait,
                                   atomic_read(&sctx->bios_in_flight) == 0);
-                       atomic_set(&sctx->flush_all_writes, 0);
+                       sctx->flush_all_writes = false;
                        scrub_blocked_if_needed(fs_info);
                }
 
@@ -3892,7 +3890,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                 * write requests are really completed when bios_in_flight
                 * changes to 0.
                 */
-               atomic_set(&sctx->flush_all_writes, 1);
+               sctx->flush_all_writes = true;
                scrub_submit(sctx);
                mutex_lock(&sctx->wr_lock);
                scrub_wr_submit(sctx);
@@ -3910,7 +3908,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                 */
                wait_event(sctx->list_wait,
                           atomic_read(&sctx->workers_pending) == 0);
-               atomic_set(&sctx->flush_all_writes, 0);
+               sctx->flush_all_writes = false;
 
                scrub_pause_off(fs_info);