dm clone: replace spin_lock_irqsave with spin_lock_irq
authorMikulas Patocka <mpatocka@redhat.com>
Fri, 4 Oct 2019 14:17:37 +0000 (10:17 -0400)
committerMike Snitzer <snitzer@redhat.com>
Tue, 5 Nov 2019 19:35:42 +0000 (14:35 -0500)
If we are in a place where it is known that interrupts are enabled,
functions spin_lock_irq/spin_unlock_irq should be used instead of
spin_lock_irqsave/spin_unlock_irqrestore.

spin_lock_irq and spin_unlock_irq are faster because they don't need to
push and pop the flags register.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/md/dm-clone-metadata.c
drivers/md/dm-clone-metadata.h
drivers/md/dm-clone-target.c

index 6bc8c1d1c351c8441769739b84d6d89a701a982c..08c552e5e41b2f6c96f0142db37df659ab96fa9f 100644 (file)
@@ -712,7 +712,7 @@ static int __metadata_commit(struct dm_clone_metadata *cmd)
 static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
 {
        int r;
-       unsigned long word, flags;
+       unsigned long word;
 
        word = 0;
        do {
@@ -736,9 +736,9 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
                return r;
 
        /* Update the changed flag */
-       spin_lock_irqsave(&cmd->bitmap_lock, flags);
+       spin_lock_irq(&cmd->bitmap_lock);
        dmap->changed = 0;
-       spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+       spin_unlock_irq(&cmd->bitmap_lock);
 
        return 0;
 }
@@ -746,7 +746,6 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
 int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
 {
        int r = -EPERM;
-       unsigned long flags;
        struct dirty_map *dmap, *next_dmap;
 
        down_write(&cmd->lock);
@@ -770,9 +769,9 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
        }
 
        /* Swap dirty bitmaps */
-       spin_lock_irqsave(&cmd->bitmap_lock, flags);
+       spin_lock_irq(&cmd->bitmap_lock);
        cmd->current_dmap = next_dmap;
-       spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+       spin_unlock_irq(&cmd->bitmap_lock);
 
        /*
         * No one is accessing the old dirty bitmap anymore, so we can flush
@@ -817,9 +816,9 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
 {
        int r = 0;
        struct dirty_map *dmap;
-       unsigned long word, region_nr, flags;
+       unsigned long word, region_nr;
 
-       spin_lock_irqsave(&cmd->bitmap_lock, flags);
+       spin_lock_irq(&cmd->bitmap_lock);
 
        if (cmd->read_only) {
                r = -EPERM;
@@ -836,7 +835,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
                }
        }
 out:
-       spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+       spin_unlock_irq(&cmd->bitmap_lock);
 
        return r;
 }
@@ -903,13 +902,11 @@ out:
 
 void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
 {
-       unsigned long flags;
-
        down_write(&cmd->lock);
 
-       spin_lock_irqsave(&cmd->bitmap_lock, flags);
+       spin_lock_irq(&cmd->bitmap_lock);
        cmd->read_only = 1;
-       spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+       spin_unlock_irq(&cmd->bitmap_lock);
 
        if (!cmd->fail_io)
                dm_bm_set_read_only(cmd->bm);
@@ -919,13 +916,11 @@ void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
 
 void dm_clone_metadata_set_read_write(struct dm_clone_metadata *cmd)
 {
-       unsigned long flags;
-
        down_write(&cmd->lock);
 
-       spin_lock_irqsave(&cmd->bitmap_lock, flags);
+       spin_lock_irq(&cmd->bitmap_lock);
        cmd->read_only = 0;
-       spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+       spin_unlock_irq(&cmd->bitmap_lock);
 
        if (!cmd->fail_io)
                dm_bm_set_read_write(cmd->bm);
index 434bff08508b53b7c194c63e0b5f9efc687ae134..3fe50a781c1161a1050707331fa2c28169a8a917 100644 (file)
@@ -44,7 +44,9 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
  * @start: Starting region number
  * @nr_regions: Number of regions in the range
  *
- * This function doesn't block, so it's safe to call it from interrupt context.
+ * This function doesn't block, but since it uses spin_lock_irq()/spin_unlock_irq()
+ * it's NOT safe to call it from any context where interrupts are disabled, e.g.,
+ * from interrupt context.
  */
 int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
                            unsigned long nr_regions);
index 4ca8f19772224a9bc3c3cdc76b1fbe2a7774d20f..f7d1070a4d401dd789799905f834b45901ce69a6 100644 (file)
@@ -332,8 +332,6 @@ static void submit_bios(struct bio_list *bios)
  */
 static void issue_bio(struct clone *clone, struct bio *bio)
 {
-       unsigned long flags;
-
        if (!bio_triggers_commit(clone, bio)) {
                generic_make_request(bio);
                return;
@@ -352,9 +350,9 @@ static void issue_bio(struct clone *clone, struct bio *bio)
         * Batch together any bios that trigger commits and then issue a single
         * commit for them in process_deferred_flush_bios().
         */
-       spin_lock_irqsave(&clone->lock, flags);
+       spin_lock_irq(&clone->lock);
        bio_list_add(&clone->deferred_flush_bios, bio);
-       spin_unlock_irqrestore(&clone->lock, flags);
+       spin_unlock_irq(&clone->lock);
 
        wake_worker(clone);
 }
@@ -469,7 +467,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
 
 static void process_discard_bio(struct clone *clone, struct bio *bio)
 {
-       unsigned long rs, re, flags;
+       unsigned long rs, re;
 
        bio_region_range(clone, bio, &rs, &re);
        BUG_ON(re > clone->nr_regions);
@@ -501,9 +499,9 @@ static void process_discard_bio(struct clone *clone, struct bio *bio)
        /*
         * Defer discard processing.
         */
-       spin_lock_irqsave(&clone->lock, flags);
+       spin_lock_irq(&clone->lock);
        bio_list_add(&clone->deferred_discard_bios, bio);
-       spin_unlock_irqrestore(&clone->lock, flags);
+       spin_unlock_irq(&clone->lock);
 
        wake_worker(clone);
 }
@@ -1140,13 +1138,13 @@ static void process_deferred_discards(struct clone *clone)
        int r = -EPERM;
        struct bio *bio;
        struct blk_plug plug;
-       unsigned long rs, re, flags;
+       unsigned long rs, re;
        struct bio_list discards = BIO_EMPTY_LIST;
 
-       spin_lock_irqsave(&clone->lock, flags);
+       spin_lock_irq(&clone->lock);
        bio_list_merge(&discards, &clone->deferred_discard_bios);
        bio_list_init(&clone->deferred_discard_bios);
-       spin_unlock_irqrestore(&clone->lock, flags);
+       spin_unlock_irq(&clone->lock);
 
        if (bio_list_empty(&discards))
                return;
@@ -1176,13 +1174,12 @@ out:
 
 static void process_deferred_bios(struct clone *clone)
 {
-       unsigned long flags;
        struct bio_list bios = BIO_EMPTY_LIST;
 
-       spin_lock_irqsave(&clone->lock, flags);
+       spin_lock_irq(&clone->lock);
        bio_list_merge(&bios, &clone->deferred_bios);
        bio_list_init(&clone->deferred_bios);
-       spin_unlock_irqrestore(&clone->lock, flags);
+       spin_unlock_irq(&clone->lock);
 
        if (bio_list_empty(&bios))
                return;
@@ -1193,7 +1190,6 @@ static void process_deferred_bios(struct clone *clone)
 static void process_deferred_flush_bios(struct clone *clone)
 {
        struct bio *bio;
-       unsigned long flags;
        struct bio_list bios = BIO_EMPTY_LIST;
        struct bio_list bio_completions = BIO_EMPTY_LIST;
 
@@ -1201,13 +1197,13 @@ static void process_deferred_flush_bios(struct clone *clone)
         * If there are any deferred flush bios, we must commit the metadata
         * before issuing them or signaling their completion.
         */
-       spin_lock_irqsave(&clone->lock, flags);
+       spin_lock_irq(&clone->lock);
        bio_list_merge(&bios, &clone->deferred_flush_bios);
        bio_list_init(&clone->deferred_flush_bios);
 
        bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
        bio_list_init(&clone->deferred_flush_completions);
-       spin_unlock_irqrestore(&clone->lock, flags);
+       spin_unlock_irq(&clone->lock);
 
        if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
            !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))