Merge branch 'for-4.10/block' into for-4.10/merge for-4.10/merge
authorJens Axboe <axboe@fb.com>
Mon, 12 Dec 2016 01:31:01 +0000 (18:31 -0700)
committerJens Axboe <axboe@fb.com>
Mon, 12 Dec 2016 01:31:01 +0000 (18:31 -0700)
Signed-off-by: Jens Axboe <axboe@fb.com>
28 files changed:
1  2 
MAINTAINERS
block/blk-flush.c
block/blk-map.c
drivers/md/dm-raid1.c
drivers/md/dm-rq.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid5-cache.c
drivers/mmc/card/block.c
drivers/nvme/host/core.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/scsi.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/core.c
drivers/nvme/target/rdma.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/st.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/cifs/connect.c
fs/ext4/super.c
fs/f2fs/gc.c
fs/splice.c
include/linux/fs.h
include/linux/nvme.h
include/uapi/linux/Kbuild
lib/iov_iter.c

diff --cc MAINTAINERS
Simple merge
index 3c882cbc75417d60bfa92c20f5da27aaaa86c4bf,27a42dab5a36644770f428a003dd2cfe75e7ad7d..20b7c7a02f1cbdfe5a63c5918d3abd356bd4265b
@@@ -342,34 -340,6 +340,34 @@@ static void flush_data_end_io(struct re
        struct request_queue *q = rq->q;
        struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
  
-       rq->cmd_flags &= ~REQ_STARTED;
 +      /*
 +       * Updating q->in_flight[] here for making this tag usable
 +       * early. Because in blk_queue_start_tag(),
 +       * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
 +       * reserve tags for sync I/O.
 +       *
 +       * More importantly this way can avoid the following I/O
 +       * deadlock:
 +       *
 +       * - suppose there are 40 fua requests comming to flush queue
 +       *   and queue depth is 31
 +       * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
 +       *   tag for async I/O any more
 +       * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
 +       *   and flush_data_end_io() is called
 +       * - the other rqs still can't go ahead if not updating
 +       *   q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
 +       *   are held in flush data queue and make no progress of
 +       *   handling post flush rq
 +       * - only after the post flush rq is handled, all these rqs
 +       *   can be completed
 +       */
 +
 +      elv_completed_request(q, rq);
 +
 +      /* for avoiding double accounting */
++      rq->rq_flags &= ~RQF_STARTED;
 +
        /*
         * After populating an empty queue, kick it to avoid stall.  Read
         * the comment in flush_end_io().
diff --cc block/blk-map.c
Simple merge
Simple merge
Simple merge
diff --cc drivers/md/dm.c
Simple merge
diff --cc drivers/md/md.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc fs/ext4/super.c
Simple merge
diff --cc fs/f2fs/gc.c
Simple merge
diff --cc fs/splice.c
Simple merge
Simple merge
index fc3c2420659395039be1288a82e8d62d98aefab3,5ac1f57226f426992f43cb4ddafc7ffb0835d4ad..3d1c6f1b15c9bd351fa04d4e9824ff83b5c0045a
@@@ -970,9 -961,21 +990,22 @@@ enum 
        NVME_SC_REFTAG_CHECK            = 0x284,
        NVME_SC_COMPARE_FAILED          = 0x285,
        NVME_SC_ACCESS_DENIED           = 0x286,
 +      NVME_SC_UNWRITTEN_BLOCK         = 0x287,
  
        NVME_SC_DNR                     = 0x4000,
+       /*
+        * FC Transport-specific error status values for NVME commands
+        *
+        * Transport-specific status code values must be in the range 0xB0..0xBF
+        */
+       /* Generic FC failure - catchall */
+       NVME_SC_FC_TRANSPORT_ERROR      = 0x00B0,
+       /* I/O failure due to FC ABTS'd */
+       NVME_SC_FC_TRANSPORT_ABORTED    = 0x00B1,
  };
  
  struct nvme_completion {
Simple merge
diff --cc lib/iov_iter.c
Simple merge