Merge branch 'for-linus' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Dec 2016 18:23:39 +0000 (10:23 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Dec 2016 18:23:39 +0000 (10:23 -0800)
Pull block layer fixes from Jens Axboe:
 "Just a set of small fixes that have either been queued up after the
  original pull for this merge window, or just missed the original pull
  request.

   - a few bcache fixes/changes from Eric and Kent

   - add WRITE_SAME to the command filter whitelist frm Mauricio

   - kill an unused struct member from Ritesh

   - partition IO alignment fix from Stefan

   - nvme sysfs printf fix from Stephen"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: check partition alignment
  nvme : Use correct scnprintf in cmb show
  block: allow WRITE_SAME commands with the SG_IO ioctl
  block: Remove unused member (busy) from struct blk_queue_tag
  bcache: partition support: add 16 minors per bcacheN device
  bcache: Make gc wakeup sane, remove set_task_state()

block/ioctl.c
block/scsi_ioctl.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/nvme/host/pci.c
include/linux/blkdev.h

index f856963204f4949f5197c1cb0c0a247ca12c95da..656c8c6ed206f876bbdbd0f240dedf9997a88cf0 100644 (file)
@@ -45,6 +45,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
                                    || pstart < 0 || plength < 0 || partno > 65535)
                                        return -EINVAL;
                        }
+                       /* check if partition is aligned to blocksize */
+                       if (p.start & (bdev_logical_block_size(bdev) - 1))
+                               return -EINVAL;
 
                        mutex_lock(&bdev->bd_mutex);
 
index 0774799942e06a8d890a5c88e40990cd53a15037..c6fee7437be44573ade684d064e161954485301a 100644 (file)
@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
        __set_bit(WRITE_16, filter->write_ok);
        __set_bit(WRITE_LONG, filter->write_ok);
        __set_bit(WRITE_LONG_2, filter->write_ok);
+       __set_bit(WRITE_SAME, filter->write_ok);
+       __set_bit(WRITE_SAME_16, filter->write_ok);
+       __set_bit(WRITE_SAME_32, filter->write_ok);
        __set_bit(ERASE, filter->write_ok);
        __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
        __set_bit(MODE_SELECT, filter->write_ok);
index 6b420a55c7459f1af42b9dd9c920dce63bd7d089..c3ea03c9a1a8ef603a25934ccbe32ea4bfca3d66 100644 (file)
@@ -425,7 +425,7 @@ struct cache {
         * until a gc finishes - otherwise we could pointlessly burn a ton of
         * cpu
         */
-       unsigned                invalidate_needs_gc:1;
+       unsigned                invalidate_needs_gc;
 
        bool                    discard; /* Get rid of? */
 
@@ -593,8 +593,8 @@ struct cache_set {
 
        /* Counts how many sectors bio_insert has added to the cache */
        atomic_t                sectors_to_gc;
+       wait_queue_head_t       gc_wait;
 
-       wait_queue_head_t       moving_gc_wait;
        struct keybuf           moving_gc_keys;
        /* Number of moving GC bios in flight */
        struct semaphore        moving_in_flight;
index 6fdd8e252760cbc11ff8cceb1c38fb85eccbcbad..a43eedd5804dd8a9c13b60d96c5bca59fd355b81 100644 (file)
@@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c)
        bch_moving_gc(c);
 }
 
-static int bch_gc_thread(void *arg)
+static bool gc_should_run(struct cache_set *c)
 {
-       struct cache_set *c = arg;
        struct cache *ca;
        unsigned i;
 
-       while (1) {
-again:
-               bch_btree_gc(c);
+       for_each_cache(ca, c, i)
+               if (ca->invalidate_needs_gc)
+                       return true;
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (kthread_should_stop())
-                       break;
+       if (atomic_read(&c->sectors_to_gc) < 0)
+               return true;
 
-               mutex_lock(&c->bucket_lock);
+       return false;
+}
 
-               for_each_cache(ca, c, i)
-                       if (ca->invalidate_needs_gc) {
-                               mutex_unlock(&c->bucket_lock);
-                               set_current_state(TASK_RUNNING);
-                               goto again;
-                       }
+static int bch_gc_thread(void *arg)
+{
+       struct cache_set *c = arg;
 
-               mutex_unlock(&c->bucket_lock);
+       while (1) {
+               wait_event_interruptible(c->gc_wait,
+                          kthread_should_stop() || gc_should_run(c));
 
-               schedule();
+               if (kthread_should_stop())
+                       break;
+
+               set_gc_sectors(c);
+               bch_btree_gc(c);
        }
 
        return 0;
@@ -1790,11 +1792,10 @@ again:
 
 int bch_gc_thread_start(struct cache_set *c)
 {
-       c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+       c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
        if (IS_ERR(c->gc_thread))
                return PTR_ERR(c->gc_thread);
 
-       set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
        return 0;
 }
 
index 5c391fa01bedbfba3f1dea062605460ccadc1c6a..9b80417cd547f52c264c1b4b993f3ee2155405f2 100644 (file)
@@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
 
 static inline void wake_up_gc(struct cache_set *c)
 {
-       if (c->gc_thread)
-               wake_up_process(c->gc_thread);
+       wake_up(&c->gc_wait);
 }
 
 #define MAP_DONE       0
index f49c5417527dcbb8e0a839a32ec5d354323d63a4..76d20875503c17c6f5956d7f7bfde4eaa90ffbcd 100644 (file)
@@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl)
        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
        struct bio *bio = op->bio, *n;
 
-       if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
-               set_gc_sectors(op->c);
+       if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
                wake_up_gc(op->c);
-       }
 
        if (op->bypass)
                return bch_data_invalidate(cl);
index 2fb5bfeb43e2e1668051667d6582a087b6bb14ad..3a19cbc8b230e5a7b77cafa89427b8e282c251dc 100644 (file)
@@ -58,6 +58,7 @@ static wait_queue_head_t unregister_wait;
 struct workqueue_struct *bcache_wq;
 
 #define BTREE_MAX_PAGES                (256 * 1024 / PAGE_SIZE)
+#define BCACHE_MINORS          16 /* partition support */
 
 /* Superblock */
 
@@ -783,8 +784,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
        if (minor < 0)
                return minor;
 
+       minor *= BCACHE_MINORS;
+
        if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-           !(d->disk = alloc_disk(1))) {
+           !(d->disk = alloc_disk(BCACHE_MINORS))) {
                ida_simple_remove(&bcache_minor, minor);
                return -ENOMEM;
        }
@@ -1489,6 +1492,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
        mutex_init(&c->bucket_lock);
        init_waitqueue_head(&c->btree_cache_wait);
        init_waitqueue_head(&c->bucket_wait);
+       init_waitqueue_head(&c->gc_wait);
        sema_init(&c->uuid_write_mutex, 1);
 
        spin_lock_init(&c->btree_gc_time.lock);
@@ -1548,6 +1552,7 @@ static void run_cache_set(struct cache_set *c)
 
        for_each_cache(ca, c, i)
                c->nbuckets += ca->sb.nbuckets;
+       set_gc_sectors(c);
 
        if (CACHE_SYNC(&c->sb)) {
                LIST_HEAD(journal);
index 2fd7dc2e8fc4b742f505b7c54961f1be4dc927de..3d21a154dce79deceeff77cd16ef5c6bf2a71978 100644 (file)
@@ -50,7 +50,7 @@
 #define NVME_AQ_DEPTH          256
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
-               
+
 /*
  * We handle AEN commands ourselves and don't even let the
  * block layer know about them.
@@ -1349,7 +1349,7 @@ static ssize_t nvme_cmb_show(struct device *dev,
 {
        struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
 
-       return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
+       return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
                       ndev->cmbloc, ndev->cmbsz);
 }
 static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
index 286b2a2643833615633e82d7d9e667b66abbfaef..83695641bd5ec272551857c448cc9b4f354898b8 100644 (file)
@@ -288,7 +288,6 @@ enum blk_queue_state {
 struct blk_queue_tag {
        struct request **tag_index;     /* map of busy tags */
        unsigned long *tag_map;         /* bit map of free/busy tags */
-       int busy;                       /* current depth */
        int max_depth;                  /* what we will send to device */
        int real_max_depth;             /* what the array can hold */
        atomic_t refcnt;                /* map can be shared */