CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
+static ssize_t ctx_tag_hit_write(void *data, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct blk_mq_ctx *ctx = data;
+
+ ctx->tag_hit = ctx->tag_refill = 0;
+ return count;
+}
+
+static int ctx_tag_hit_show(void *data, struct seq_file *m)
+{
+ struct blk_mq_ctx *ctx = data;
+
+ seq_printf(m, "hit=%lu refills=%lu\n", ctx->tag_hit, ctx->tag_refill);
+ return 0;
+}
+
static int ctx_dispatched_show(void *data, struct seq_file *m)
{
struct blk_mq_ctx *ctx = data;
{"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
{"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
{"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
+ {"tag_hit", 0600, ctx_tag_hit_show, ctx_tag_hit_write},
{"merged", 0600, ctx_merged_show, ctx_merged_write},
{"completed", 0600, ctx_completed_show, ctx_completed_write},
{},
*/
#include <linux/kernel.h>
#include <linux/module.h>
-
#include <linux/blk-mq.h>
#include <linux/delay.h>
+#include <linux/cpu.h>
+
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
return __sbitmap_queue_get(bt);
}
+void blk_mq_tag_ctx_flush_batch(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
+{
+ struct sbitmap_queue *bt = &hctx->tags->bitmap_tags;
+ unsigned int i;
+
+ for (i = 0; i < hctx->queue->tag_set->nr_maps; i++) {
+ struct blk_mq_ctx_type *type = &ctx->type[i];
+
+ if (!type->tags)
+ continue;
+
+ __sbitmap_queue_clear_batch(bt, type->tag_offset, type->tags);
+ type->tags = 0;
+ }
+}
+
+static void ctx_flush_ipi(void *data)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+ struct blk_mq_ctx *ctx;
+
+ ctx = __blk_mq_get_ctx(hctx->queue, smp_processor_id());
+ blk_mq_tag_ctx_flush_batch(hctx, ctx);
+ atomic_dec(&hctx->flush_pending);
+}
+
+static void blk_mq_tag_flush_batches(struct blk_mq_hw_ctx *hctx)
+{
+ int cpu, err;
+
+ if (atomic_cmpxchg(&hctx->flush_pending, 0, hctx->nr_ctx))
+ return;
+ cpus_read_lock();
+ for_each_cpu(cpu, hctx->cpumask) {
+ err = smp_call_function_single(cpu, ctx_flush_ipi, hctx, 0);
+ if (err)
+ atomic_dec(&hctx->flush_pending);
+ }
+ cpus_read_unlock();
+}
+
+void blk_mq_tag_queue_flush_batches(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_tag_flush_batches(hctx);
+}
+
+static int blk_mq_get_tag_batch(struct blk_mq_alloc_data *data)
+{
+ struct blk_mq_hw_ctx *hctx = data->hctx;
+ struct blk_mq_ctx_type *type;
+ struct blk_mq_ctx *ctx = data->ctx;
+ struct blk_mq_tags *tags;
+ struct sbitmap_queue *bt;
+ int tag = -1;
+
+ if (!ctx || (data->flags & BLK_MQ_REQ_INTERNAL))
+ return -1;
+
+ tags = hctx->tags;
+ bt = &tags->bitmap_tags;
+ /* don't do batches for round-robin or (very) sparse maps */
+ if (bt->round_robin || bt->sb.shift < ilog2(BITS_PER_LONG) - 1)
+ return -1;
+
+ /* we could make do with preempt disable, but we need to block flush */
+ local_irq_disable();
+ if (unlikely(ctx->cpu != smp_processor_id()))
+ goto out;
+
+ type = &ctx->type[hctx->type];
+
+ if (type->tags) {
+get_tag:
+ ctx->tag_hit++;
+
+ tag = __ffs(type->tags);
+ type->tags &= ~(1UL << tag);
+ tag += type->tag_offset;
+out:
+ local_irq_enable();
+ return tag;
+ }
+
+ /* no current tag cache, attempt to refill a batch */
+ if (!__sbitmap_queue_get_batch(bt, &type->tag_offset, &type->tags)) {
+ ctx->tag_refill++;
+ goto get_tag;
+ }
+
+ goto out;
+}
+
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
{
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
bt = &tags->breserved_tags;
tag_offset = 0;
} else {
- bt = &tags->bitmap_tags;
tag_offset = tags->nr_reserved_tags;
+
+ tag = blk_mq_get_tag_batch(data);
+ if (tag != -1)
+ goto found_tag;
+
+ bt = &tags->bitmap_tags;
}
tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
break;
+ if (!(data->flags & BLK_MQ_REQ_RESERVED))
+ blk_mq_tag_flush_batches(data->hctx);
+
sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
tag = __blk_mq_get_tag(data, bt);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv);
+void blk_mq_tag_queue_flush_batches(struct request_queue *q);
+void blk_mq_tag_ctx_flush_batch(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx);
static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
struct blk_mq_hw_ctx *hctx)
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
type = hctx->type;
+ blk_mq_tag_ctx_flush_batch(hctx, ctx);
+
spin_lock(&ctx->lock);
if (!list_empty(&ctx->type[type].rq_list)) {
list_splice_init(&ctx->type[type].rq_list, &tmp);
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
- for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
+ for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) {
INIT_LIST_HEAD(&__ctx->type[k].rq_list);
+ __ctx->type[k].tags = 0;
+ }
/*
* Set local node, IFF we have more than one hw queue. If
}
hctx = blk_mq_map_queue_type(q, j, i);
+ ctx->type[j].tags = 0;
ctx->hctxs[j] = hctx;
/*
* If the CPU is already set in the mask, then we've
BUG_ON(!hctx->nr_ctx);
}
- for (; j < HCTX_MAX_TYPES; j++)
+ for (; j < HCTX_MAX_TYPES; j++) {
ctx->hctxs[j] = blk_mq_map_queue_type(q,
HCTX_TYPE_DEFAULT, i);
+ ctx->type[j].tags = 0;
+ }
}
queue_for_each_hw_ctx(q, hctx, i) {
if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
return;
- list_for_each_entry(q, &set->tag_list, tag_set_list)
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_tag_queue_flush_batches(q);
blk_mq_freeze_queue(q);
+ }
+
/*
* Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done
struct blk_mq_ctx_type {
struct list_head rq_list;
+
+ /* tag batch cache */
+ unsigned long tags;
+ unsigned int tag_offset;
};
/**
struct {
spinlock_t lock;
struct blk_mq_ctx_type type[HCTX_MAX_TYPES];
+ unsigned long tag_hit, tag_refill;
} ____cacheline_aligned_in_smp;
unsigned int cpu;
*/
atomic_t nr_active;
+ atomic_t flush_pending;
+
/** @cpuhp_dead: List to store request if some CPU die. */
struct hlist_node cpuhp_dead;
/** @kobj: Kernel object for sysfs. */