Merge branch 'for-4.7/core' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 17 May 2016 22:29:49 +0000 (15:29 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 17 May 2016 22:29:49 +0000 (15:29 -0700)
Pull core block layer updates from Jens Axboe:
 "This is the core block IO changes for this merge window.  Nothing
  earth shattering in here, it's mostly just fixes.  In detail:

   - Fix for a long standing issue where wrong ordering in blk-mq caused
     order_to_size() to spew a warning.  From Bart.

   - Async discard support from Christoph.  Basically just splitting our
     sync interface into a submit + wait part.

   - Add a cleaner interface for flagging whether a device has a write
     back cache or not.  We've previously overloaded blk_queue_flush()
     with this, but let's make it more explicit.  Drivers cleaned up and
     updated in the drivers pull request.  From me.

   - Fix for a double check for whether IO accounting is enabled or not.
     From Michael Callahan.

   - Fix for the async discard from Mike Snitzer, reinstating the early
     EOPNOTSUPP return if the device doesn't support discards.

   - Also from Mike, export bio_inc_remaining() so dm can drop it's
     private copy of it.

   - From Ming Lin, add support for passing in an offset for request
     payloads.

   - Tag function export from Sagi, which will be used in NVMe in the
     drivers pull.

   - Two blktrace related fixes from Shaohua.

   - Propagate NOMERGE flag when making a request from a bio, also from
     Shaohua.

   - An optimization to not parse cgroup paths in blk-throttle, if we
     don't need to.  From Shaohua"

* 'for-4.7/core' of git://git.kernel.dk/linux-block:
  blk-mq: fix undefined behaviour in order_to_size()
  blk-throttle: don't parse cgroup path if trace isn't enabled
  blktrace: add missed mask name
  blktrace: delete garbage for message trace
  block: make bio_inc_remaining() interface accessible again
  block: reinstate early return of -EOPNOTSUPP from blkdev_issue_discard
  block: Minor blk_account_io_start usage cleanup
  block: add __blkdev_issue_discard
  block: remove struct bio_batch
  block: copy NOMERGE flag from bio to request
  block: add ability to flag write back caching on a device
  blk-mq: Export tagset iter function
  block: add offset in blk_add_request_payload()
  writeback: Fix performance regression in wb_over_bg_thresh()

17 files changed:
Documentation/block/queue-sysfs.txt
block/bio.c
block/blk-core.c
block/blk-lib.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-settings.c
block/blk-sysfs.c
block/blk-throttle.c
drivers/block/skd_main.c
drivers/scsi/sd.c
include/linux/bio.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/blktrace_api.h
kernel/trace/blktrace.c

index e5d914845be6de4e59d59b7c1e905392d6bc5fb5..dce25d848d92c67074ebd26ffaa526217f8f3957 100644 (file)
@@ -141,6 +141,15 @@ control of this block device to that new IO scheduler. Note that writing
 an IO scheduler name to this file will attempt to load that IO scheduler
 module, if it isn't already present in the system.
 
+write_cache (RW)
+----------------
+When read, this file will display whether the device has write back
+caching enabled or not. It will return "write back" for the former
+case, and "write through" for the latter. Writing to this file can
+change the kernels view of the device, but it doesn't alter the
+device state. This means that it might not be safe to toggle the
+setting from "write back" to "write through", since that will also
+eliminate cache flushes issued by the kernel.
 
 
 Jens Axboe <jens.axboe@oracle.com>, February 2009
index 807d25e466ec2ab7ae8c319e62a5621dc4fc29a7..0e4aa42bc30dc7919a58b5c93b4470d43cf01f85 100644 (file)
@@ -311,17 +311,6 @@ static void bio_chain_endio(struct bio *bio)
        bio_endio(__bio_chain_endio(bio));
 }
 
-/*
- * Increment chain count for the bio. Make sure the CHAIN flag update
- * is visible before the raised count.
- */
-static inline void bio_inc_remaining(struct bio *bio)
-{
-       bio_set_flag(bio, BIO_CHAIN);
-       smp_mb__before_atomic();
-       atomic_inc(&bio->__bi_remaining);
-}
-
 /**
  * bio_chain - chain bio completions
  * @bio: the target bio
index b60537b2c35b4152343c0239374d8ba332865b09..c50227796a26cd29a892907a20c6aa5438775966 100644 (file)
@@ -1523,6 +1523,7 @@ EXPORT_SYMBOL(blk_put_request);
  * blk_add_request_payload - add a payload to a request
  * @rq: request to update
  * @page: page backing the payload
+ * @offset: offset in page
  * @len: length of the payload.
  *
  * This allows to later add a payload to an already submitted request by
@@ -1533,12 +1534,12 @@ EXPORT_SYMBOL(blk_put_request);
  * discard requests should ever use it.
  */
 void blk_add_request_payload(struct request *rq, struct page *page,
-               unsigned int len)
+               int offset, unsigned int len)
 {
        struct bio *bio = rq->bio;
 
        bio->bi_io_vec->bv_page = page;
-       bio->bi_io_vec->bv_offset = 0;
+       bio->bi_io_vec->bv_offset = offset;
        bio->bi_io_vec->bv_len = len;
 
        bio->bi_iter.bi_size = len;
index 9ebf65379556a0f5730b3934acd2fa78e232e816..23d7f301a1967483ec79a383a1a317881caf3358 100644 (file)
@@ -9,82 +9,46 @@
 
 #include "blk.h"
 
-struct bio_batch {
-       atomic_t                done;
-       int                     error;
-       struct completion       *wait;
-};
-
-static void bio_batch_end_io(struct bio *bio)
+static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
+               gfp_t gfp)
 {
-       struct bio_batch *bb = bio->bi_private;
+       struct bio *new = bio_alloc(gfp, nr_pages);
+
+       if (bio) {
+               bio_chain(bio, new);
+               submit_bio(rw, bio);
+       }
 
-       if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
-               bb->error = bio->bi_error;
-       if (atomic_dec_and_test(&bb->done))
-               complete(bb->wait);
-       bio_put(bio);
+       return new;
 }
 
-/**
- * blkdev_issue_discard - queue a discard
- * @bdev:      blockdev to issue discard for
- * @sector:    start sector
- * @nr_sects:  number of sectors to discard
- * @gfp_mask:  memory allocation flags (for bio_alloc)
- * @flags:     BLKDEV_IFL_* flags to control behaviour
- *
- * Description:
- *    Issue a discard request for the sectors in question.
- */
-int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
-               sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+               sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
 {
-       DECLARE_COMPLETION_ONSTACK(wait);
        struct request_queue *q = bdev_get_queue(bdev);
-       int type = REQ_WRITE | REQ_DISCARD;
+       struct bio *bio = *biop;
        unsigned int granularity;
        int alignment;
-       struct bio_batch bb;
-       struct bio *bio;
-       int ret = 0;
-       struct blk_plug plug;
 
        if (!q)
                return -ENXIO;
-
        if (!blk_queue_discard(q))
                return -EOPNOTSUPP;
+       if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
+               return -EOPNOTSUPP;
 
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
        granularity = max(q->limits.discard_granularity >> 9, 1U);
        alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
 
-       if (flags & BLKDEV_DISCARD_SECURE) {
-               if (!blk_queue_secdiscard(q))
-                       return -EOPNOTSUPP;
-               type |= REQ_SECURE;
-       }
-
-       atomic_set(&bb.done, 1);
-       bb.error = 0;
-       bb.wait = &wait;
-
-       blk_start_plug(&plug);
        while (nr_sects) {
                unsigned int req_sects;
                sector_t end_sect, tmp;
 
-               bio = bio_alloc(gfp_mask, 1);
-               if (!bio) {
-                       ret = -ENOMEM;
-                       break;
-               }
-
                /* Make sure bi_size doesn't overflow */
                req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
 
-               /*
+               /**
                 * If splitting a request, and the next starting sector would be
                 * misaligned, stop the discard at the previous aligned sector.
                 */
@@ -98,18 +62,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                        req_sects = end_sect - sector;
                }
 
+               bio = next_bio(bio, type, 1, gfp_mask);
                bio->bi_iter.bi_sector = sector;
-               bio->bi_end_io = bio_batch_end_io;
                bio->bi_bdev = bdev;
-               bio->bi_private = &bb;
 
                bio->bi_iter.bi_size = req_sects << 9;
                nr_sects -= req_sects;
                sector = end_sect;
 
-               atomic_inc(&bb.done);
-               submit_bio(type, bio);
-
                /*
                 * We can loop for a long time in here, if someone does
                 * full device discards (like mkfs). Be nice and allow
@@ -118,14 +78,44 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                 */
                cond_resched();
        }
-       blk_finish_plug(&plug);
 
-       /* Wait for bios in-flight */
-       if (!atomic_dec_and_test(&bb.done))
-               wait_for_completion_io(&wait);
+       *biop = bio;
+       return 0;
+}
+EXPORT_SYMBOL(__blkdev_issue_discard);
+
+/**
+ * blkdev_issue_discard - queue a discard
+ * @bdev:      blockdev to issue discard for
+ * @sector:    start sector
+ * @nr_sects:  number of sectors to discard
+ * @gfp_mask:  memory allocation flags (for bio_alloc)
+ * @flags:     BLKDEV_IFL_* flags to control behaviour
+ *
+ * Description:
+ *    Issue a discard request for the sectors in question.
+ */
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+               sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+       int type = REQ_WRITE | REQ_DISCARD;
+       struct bio *bio = NULL;
+       struct blk_plug plug;
+       int ret;
+
+       if (flags & BLKDEV_DISCARD_SECURE)
+               type |= REQ_SECURE;
+
+       blk_start_plug(&plug);
+       ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
+                       &bio);
+       if (!ret && bio) {
+               ret = submit_bio_wait(type, bio);
+               if (ret == -EOPNOTSUPP)
+                       ret = 0;
+       }
+       blk_finish_plug(&plug);
 
-       if (bb.error)
-               return bb.error;
        return ret;
 }
 EXPORT_SYMBOL(blkdev_issue_discard);
@@ -145,11 +135,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                            sector_t nr_sects, gfp_t gfp_mask,
                            struct page *page)
 {
-       DECLARE_COMPLETION_ONSTACK(wait);
        struct request_queue *q = bdev_get_queue(bdev);
        unsigned int max_write_same_sectors;
-       struct bio_batch bb;
-       struct bio *bio;
+       struct bio *bio = NULL;
        int ret = 0;
 
        if (!q)
@@ -158,21 +146,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
        /* Ensure that max_write_same_sectors doesn't overflow bi_size */
        max_write_same_sectors = UINT_MAX >> 9;
 
-       atomic_set(&bb.done, 1);
-       bb.error = 0;
-       bb.wait = &wait;
-
        while (nr_sects) {
-               bio = bio_alloc(gfp_mask, 1);
-               if (!bio) {
-                       ret = -ENOMEM;
-                       break;
-               }
-
+               bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
                bio->bi_iter.bi_sector = sector;
-               bio->bi_end_io = bio_batch_end_io;
                bio->bi_bdev = bdev;
-               bio->bi_private = &bb;
                bio->bi_vcnt = 1;
                bio->bi_io_vec->bv_page = page;
                bio->bi_io_vec->bv_offset = 0;
@@ -186,18 +163,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                        bio->bi_iter.bi_size = nr_sects << 9;
                        nr_sects = 0;
                }
-
-               atomic_inc(&bb.done);
-               submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
        }
 
-       /* Wait for bios in-flight */
-       if (!atomic_dec_and_test(&bb.done))
-               wait_for_completion_io(&wait);
-
-       if (bb.error)
-               return bb.error;
-       return ret;
+       if (bio)
+               ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+       return ret != -EOPNOTSUPP ? ret : 0;
 }
 EXPORT_SYMBOL(blkdev_issue_write_same);
 
@@ -216,28 +186,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                                  sector_t nr_sects, gfp_t gfp_mask)
 {
        int ret;
-       struct bio *bio;
-       struct bio_batch bb;
+       struct bio *bio = NULL;
        unsigned int sz;
-       DECLARE_COMPLETION_ONSTACK(wait);
-
-       atomic_set(&bb.done, 1);
-       bb.error = 0;
-       bb.wait = &wait;
 
-       ret = 0;
        while (nr_sects != 0) {
-               bio = bio_alloc(gfp_mask,
-                               min(nr_sects, (sector_t)BIO_MAX_PAGES));
-               if (!bio) {
-                       ret = -ENOMEM;
-                       break;
-               }
-
+               bio = next_bio(bio, WRITE,
+                               min(nr_sects, (sector_t)BIO_MAX_PAGES),
+                               gfp_mask);
                bio->bi_iter.bi_sector = sector;
                bio->bi_bdev   = bdev;
-               bio->bi_end_io = bio_batch_end_io;
-               bio->bi_private = &bb;
 
                while (nr_sects != 0) {
                        sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
@@ -247,18 +204,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                        if (ret < (sz << 9))
                                break;
                }
-               ret = 0;
-               atomic_inc(&bb.done);
-               submit_bio(WRITE, bio);
        }
 
-       /* Wait for bios in-flight */
-       if (!atomic_dec_and_test(&bb.done))
-               wait_for_completion_io(&wait);
-
-       if (bb.error)
-               return bb.error;
-       return ret;
+       if (bio)
+               return submit_bio_wait(WRITE, bio);
+       return 0;
 }
 
 /**
index abdbb47405cb840c7005618cdece6b3165850ecc..2fd04286f103520a8fd9b370b4dba1633be810ef 100644 (file)
@@ -474,6 +474,18 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
 }
 EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
 
+void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+               busy_tag_iter_fn *fn, void *priv)
+{
+       int i;
+
+       for (i = 0; i < tagset->nr_hw_queues; i++) {
+               if (tagset->tags && tagset->tags[i])
+                       blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
+       }
+}
+EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
+
 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                void *priv)
 {
index 1699baf39b78a8379c3cd5559b324d71da41f100..7df9c9263b2125a86bceb05c1d357ded38051c70 100644 (file)
@@ -1122,8 +1122,7 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
 {
        init_request_from_bio(rq, bio);
 
-       if (blk_do_io_stat(rq))
-               blk_account_io_start(rq, 1);
+       blk_account_io_start(rq, 1);
 }
 
 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
@@ -1496,7 +1495,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                int to_do;
                void *p;
 
-               while (left < order_to_size(this_order - 1) && this_order)
+               while (this_order && left < order_to_size(this_order - 1))
                        this_order--;
 
                do {
index 331e4eee0dda0c29cc673b63c7e7341ae45e5859..c903bee43cf85221a5a007d6c3b60b8b0aa81b23 100644 (file)
@@ -846,6 +846,32 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 
+/**
+ * blk_queue_write_cache - configure queue's write cache
+ * @q:         the request queue for the device
+ * @wc:                write back cache on or off
+ * @fua:       device supports FUA writes, if true
+ *
+ * Tell the block layer about the write cache of @q.
+ */
+void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+       spin_lock_irq(q->queue_lock);
+       if (wc) {
+               queue_flag_set(QUEUE_FLAG_WC, q);
+               q->flush_flags = REQ_FLUSH;
+       } else
+               queue_flag_clear(QUEUE_FLAG_WC, q);
+       if (fua) {
+               if (wc)
+                       q->flush_flags |= REQ_FUA;
+               queue_flag_set(QUEUE_FLAG_FUA, q);
+       } else
+               queue_flag_clear(QUEUE_FLAG_FUA, q);
+       spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_write_cache);
+
 static int __init blk_settings_init(void)
 {
        blk_max_low_pfn = max_low_pfn - 1;
index 995b58d46ed109b0c7241b6db5b870ed911dc845..99205965f5596c2c935045c049f5cf9683c7bf42 100644 (file)
@@ -347,6 +347,38 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
        return ret;
 }
 
+static ssize_t queue_wc_show(struct request_queue *q, char *page)
+{
+       if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+               return sprintf(page, "write back\n");
+
+       return sprintf(page, "write through\n");
+}
+
+static ssize_t queue_wc_store(struct request_queue *q, const char *page,
+                             size_t count)
+{
+       int set = -1;
+
+       if (!strncmp(page, "write back", 10))
+               set = 1;
+       else if (!strncmp(page, "write through", 13) ||
+                !strncmp(page, "none", 4))
+               set = 0;
+
+       if (set == -1)
+               return -EINVAL;
+
+       spin_lock_irq(q->queue_lock);
+       if (set)
+               queue_flag_set(QUEUE_FLAG_WC, q);
+       else
+               queue_flag_clear(QUEUE_FLAG_WC, q);
+       spin_unlock_irq(q->queue_lock);
+
+       return count;
+}
+
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
        .show = queue_requests_show,
@@ -478,6 +510,12 @@ static struct queue_sysfs_entry queue_poll_entry = {
        .store = queue_poll_store,
 };
 
+static struct queue_sysfs_entry queue_wc_entry = {
+       .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_wc_show,
+       .store = queue_wc_store,
+};
+
 static struct attribute *default_attrs[] = {
        &queue_requests_entry.attr,
        &queue_ra_entry.attr,
@@ -503,6 +541,7 @@ static struct attribute *default_attrs[] = {
        &queue_iostats_entry.attr,
        &queue_random_entry.attr,
        &queue_poll_entry.attr,
+       &queue_wc_entry.attr,
        NULL,
 };
 
index 2149a1ddbacf21a02a164b042c76e06bf6734c77..47a3e540631a38b123a078350d28ae457753ebbd 100644 (file)
@@ -211,15 +211,14 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
  *
  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
  * throtl_grp; otherwise, just "throtl".
- *
- * TODO: this should be made a function and name formatting should happen
- * after testing whether blktrace is enabled.
  */
 #define throtl_log(sq, fmt, args...)   do {                            \
        struct throtl_grp *__tg = sq_to_tg((sq));                       \
        struct throtl_data *__td = sq_to_td((sq));                      \
                                                                        \
        (void)__td;                                                     \
+       if (likely(!blk_trace_note_message_enabled(__td->queue)))       \
+               break;                                                  \
        if ((__tg)) {                                                   \
                char __pbuf[128];                                       \
                                                                        \
index 586f9168ffa4828def11d1cfed6526f8ec8ab05f..9a9ec212fab8e6f329f351ef59adb38aca75ba64 100644 (file)
@@ -562,7 +562,7 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
        put_unaligned_be32(count, &buf[16]);
 
        req = skreq->req;
-       blk_add_request_payload(req, page, len);
+       blk_add_request_payload(req, page, 0, len);
 }
 
 static void skd_request_fn_not_online(struct request_queue *q);
index f52b74cf8d1e691a10676f00c2012f809cc940f8..69b0a4a7a15f472e31884b02c46228a5b85e6314 100644 (file)
@@ -779,7 +779,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
         * discarded on disk. This allows us to report completion on the full
         * amount of blocks described by the request.
         */
-       blk_add_request_payload(rq, page, len);
+       blk_add_request_payload(rq, page, 0, len);
        ret = scsi_init_io(cmd);
        rq->__data_len = nr_bytes;
 
index 6b7481f62218895945ba6dcd1b7df2e49268d552..9faebf7f9a33c04a73506e7e379432ce1cce93bf 100644 (file)
@@ -702,6 +702,17 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
        return bio;
 }
 
+/*
+ * Increment chain count for the bio. Make sure the CHAIN flag update
+ * is visible before the raised count.
+ */
+static inline void bio_inc_remaining(struct bio *bio)
+{
+       bio_set_flag(bio, BIO_CHAIN);
+       smp_mb__before_atomic();
+       atomic_inc(&bio->__bi_remaining);
+}
+
 /*
  * bio_set is used to allow other portions of the IO system to
  * allocate their own private memory pools for bio and iovec structures.
index 9ac9799b702b94617dcbc5842af002030440ccf3..c808fec1ce446914aa8b193284ad3cf41a5ca886 100644 (file)
@@ -240,6 +240,8 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
                void *priv);
+void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+               busy_tag_iter_fn *fn, void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_unfreeze_queue(struct request_queue *q);
 void blk_mq_freeze_queue_start(struct request_queue *q);
index 86a38ea1823f3307caec422941fcb15dcf7c4e25..77e5d81f07aaf36533e3e016267229ae64a21a74 100644 (file)
@@ -208,7 +208,7 @@ enum rq_flag_bits {
 #define REQ_COMMON_MASK \
        (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
         REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
-        REQ_SECURE | REQ_INTEGRITY)
+        REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE)
 #define REQ_CLONE_MASK         REQ_COMMON_MASK
 
 #define BIO_NO_ADVANCE_ITER_MASK       (REQ_DISCARD|REQ_WRITE_SAME)
index 669e419d62347e2965bdaffbf8e960e1e34af4f9..b79131acf6c0cf76cb096a4d2721779c061d0277 100644 (file)
@@ -491,6 +491,8 @@ struct request_queue {
 #define QUEUE_FLAG_INIT_DONE   20      /* queue is initialized */
 #define QUEUE_FLAG_NO_SG_MERGE 21      /* don't attempt to merge SG segments*/
 #define QUEUE_FLAG_POLL               22       /* IO polling enabled if set */
+#define QUEUE_FLAG_WC         23       /* Write back caching */
+#define QUEUE_FLAG_FUA        24       /* device supports FUA writes */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -779,7 +781,7 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *,
 extern void blk_rq_set_block_pc(struct request *);
 extern void blk_requeue_request(struct request_queue *, struct request *);
 extern void blk_add_request_payload(struct request *rq, struct page *page,
-               unsigned int len);
+               int offset, unsigned int len);
 extern int blk_lld_busy(struct request_queue *q);
 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
                             struct bio_set *bs, gfp_t gfp_mask,
@@ -1009,6 +1011,7 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
 extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
+extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
@@ -1128,6 +1131,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+               sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop);
 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, struct page *page);
 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
index afc1343df3c7ab3775c8fc2e822c4e619bf658c1..0f3172b8b22597309cacf6aa8567786996ce7aa5 100644 (file)
@@ -57,6 +57,14 @@ void __trace_note_message(struct blk_trace *, const char *fmt, ...);
        } while (0)
 #define BLK_TN_MAX_MSG         128
 
+static inline bool blk_trace_note_message_enabled(struct request_queue *q)
+{
+       struct blk_trace *bt = q->blk_trace;
+       if (likely(!bt))
+               return false;
+       return bt->act_mask & BLK_TC_NOTIFY;
+}
+
 extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
                                void *data, size_t len);
 extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
@@ -79,6 +87,7 @@ extern struct attribute_group blk_trace_attr_group;
 # define blk_trace_remove(q)                           (-ENOTTY)
 # define blk_add_trace_msg(q, fmt, ...)                        do { } while (0)
 # define blk_trace_remove_sysfs(dev)                   do { } while (0)
+# define blk_trace_note_message_enabled(q)             (false)
 static inline int blk_trace_init_sysfs(struct device *dev)
 {
        return 0;
index f94e7a21f52d962439e2df0d66d96bfb11f5cd1b..9aef8654e90d12f954368e52d5d11b381ec9724f 100644 (file)
@@ -1349,6 +1349,7 @@ static enum print_line_t print_one_line(struct trace_iterator *iter,
        if (t->action == BLK_TN_MESSAGE) {
                log_action(iter, long_act ? "message" : "m");
                blk_log_msg(s, iter->ent);
+               return trace_handle_return(s);
        }
 
        if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
@@ -1551,6 +1552,7 @@ static const struct {
        { BLK_TC_COMPLETE,      "complete"      },
        { BLK_TC_FS,            "fs"            },
        { BLK_TC_PC,            "pc"            },
+       { BLK_TC_NOTIFY,        "notify"        },
        { BLK_TC_AHEAD,         "ahead"         },
        { BLK_TC_META,          "meta"          },
        { BLK_TC_DISCARD,       "discard"       },