r8169: load firmware for RTL8168fp/RTL8117
[linux-2.6-block.git] / drivers / md / dm-snap.c
index f150f5c5492b9fe3fdf02e3e5f24dd120fd609fc..4fb1a40e68a08748b94069802c1d8f666bcb7566 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/vmalloc.h>
 #include <linux/log2.h>
 #include <linux/dm-kcopyd.h>
-#include <linux/semaphore.h>
 
 #include "dm.h"
 
@@ -107,8 +106,8 @@ struct dm_snapshot {
        /* The on disk metadata handler */
        struct dm_exception_store *store;
 
-       /* Maximum number of in-flight COW jobs. */
-       struct semaphore cow_count;
+       unsigned in_progress;
+       struct wait_queue_head in_progress_wait;
 
        struct dm_kcopyd_client *kcopyd_client;
 
@@ -162,8 +161,8 @@ struct dm_snapshot {
  */
 #define DEFAULT_COW_THRESHOLD 2048
 
-static int cow_threshold = DEFAULT_COW_THRESHOLD;
-module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
+static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
 MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
 
 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
@@ -1327,7 +1326,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad_hash_tables;
        }
 
-       sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
+       init_waitqueue_head(&s->in_progress_wait);
 
        s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
        if (IS_ERR(s->kcopyd_client)) {
@@ -1509,9 +1508,56 @@ static void snapshot_dtr(struct dm_target *ti)
 
        dm_put_device(ti, s->origin);
 
+       WARN_ON(s->in_progress);
+
        kfree(s);
 }
 
+static void account_start_copy(struct dm_snapshot *s)
+{
+       spin_lock(&s->in_progress_wait.lock);
+       s->in_progress++;
+       spin_unlock(&s->in_progress_wait.lock);
+}
+
+static void account_end_copy(struct dm_snapshot *s)
+{
+       spin_lock(&s->in_progress_wait.lock);
+       BUG_ON(!s->in_progress);
+       s->in_progress--;
+       if (likely(s->in_progress <= cow_threshold) &&
+           unlikely(waitqueue_active(&s->in_progress_wait)))
+               wake_up_locked(&s->in_progress_wait);
+       spin_unlock(&s->in_progress_wait.lock);
+}
+
+static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
+{
+       if (unlikely(s->in_progress > cow_threshold)) {
+               spin_lock(&s->in_progress_wait.lock);
+               if (likely(s->in_progress > cow_threshold)) {
+                       /*
+                        * NOTE: this throttle doesn't account for whether
+                        * the caller is servicing an IO that will trigger a COW
+                        * so excess throttling may result for chunks not required
+                        * to be COW'd.  But if cow_threshold was reached, extra
+                        * throttling is unlikely to negatively impact performance.
+                        */
+                       DECLARE_WAITQUEUE(wait, current);
+                       __add_wait_queue(&s->in_progress_wait, &wait);
+                       __set_current_state(TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&s->in_progress_wait.lock);
+                       if (unlock_origins)
+                               up_read(&_origins_lock);
+                       io_schedule();
+                       remove_wait_queue(&s->in_progress_wait, &wait);
+                       return false;
+               }
+               spin_unlock(&s->in_progress_wait.lock);
+       }
+       return true;
+}
+
 /*
  * Flush a list of buffers.
  */
@@ -1527,7 +1573,7 @@ static void flush_bios(struct bio *bio)
        }
 }
 
-static int do_origin(struct dm_dev *origin, struct bio *bio);
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
 
 /*
  * Flush a list of buffers.
@@ -1540,7 +1586,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
        while (bio) {
                n = bio->bi_next;
                bio->bi_next = NULL;
-               r = do_origin(s->origin, bio);
+               r = do_origin(s->origin, bio, false);
                if (r == DM_MAPIO_REMAPPED)
                        generic_make_request(bio);
                bio = n;
@@ -1732,7 +1778,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
                rb_link_node(&pe->out_of_order_node, parent, p);
                rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
        }
-       up(&s->cow_count);
+       account_end_copy(s);
 }
 
 /*
@@ -1756,7 +1802,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
        dest.count = src.count;
 
        /* Hand over to kcopyd */
-       down(&s->cow_count);
+       account_start_copy(s);
        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
 }
 
@@ -1776,7 +1822,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
        pe->full_bio = bio;
        pe->full_bio_end_io = bio->bi_end_io;
 
-       down(&s->cow_count);
+       account_start_copy(s);
        callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
                                                   copy_callback, pe);
 
@@ -1866,7 +1912,7 @@ static void zero_callback(int read_err, unsigned long write_err, void *context)
        struct bio *bio = context;
        struct dm_snapshot *s = bio->bi_private;
 
-       up(&s->cow_count);
+       account_end_copy(s);
        bio->bi_status = write_err ? BLK_STS_IOERR : 0;
        bio_endio(bio);
 }
@@ -1880,7 +1926,7 @@ static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
        dest.sector = bio->bi_iter.bi_sector;
        dest.count = s->store->chunk_size;
 
-       down(&s->cow_count);
+       account_start_copy(s);
        WARN_ON_ONCE(bio->bi_private);
        bio->bi_private = s;
        dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
@@ -1916,6 +1962,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
        if (!s->valid)
                return DM_MAPIO_KILL;
 
+       if (bio_data_dir(bio) == WRITE) {
+               while (unlikely(!wait_for_in_progress(s, false)))
+                       ; /* wait_for_in_progress() has slept */
+       }
+
        down_read(&s->lock);
        dm_exception_table_lock(&lock);
 
@@ -2112,7 +2163,7 @@ redirect_to_origin:
 
        if (bio_data_dir(bio) == WRITE) {
                up_write(&s->lock);
-               return do_origin(s->origin, bio);
+               return do_origin(s->origin, bio, false);
        }
 
 out_unlock:
@@ -2487,15 +2538,24 @@ next_snapshot:
 /*
  * Called on a write from the origin driver.
  */
-static int do_origin(struct dm_dev *origin, struct bio *bio)
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
 {
        struct origin *o;
        int r = DM_MAPIO_REMAPPED;
 
+again:
        down_read(&_origins_lock);
        o = __lookup_origin(origin->bdev);
-       if (o)
+       if (o) {
+               if (limit) {
+                       struct dm_snapshot *s;
+                       list_for_each_entry(s, &o->snapshots, list)
+                               if (unlikely(!wait_for_in_progress(s, true)))
+                                       goto again;
+               }
+
                r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
+       }
        up_read(&_origins_lock);
 
        return r;
@@ -2608,7 +2668,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
                dm_accept_partial_bio(bio, available_sectors);
 
        /* Only tell snapshots if this is a write */
-       return do_origin(o->dev, bio);
+       return do_origin(o->dev, bio, true);
 }
 
 /*