blk-stat: use READ and WRITE instead of BLK_STAT_{READ,WRITE}
authorOmar Sandoval <osandov@fb.com>
Tue, 21 Mar 2017 15:56:06 +0000 (08:56 -0700)
committerJens Axboe <axboe@fb.com>
Tue, 21 Mar 2017 16:03:08 +0000 (10:03 -0600)
The stats buckets will become generic soon, so make the existing users
use the common READ and WRITE definitions instead of one internal to
blk-stat.

Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-mq-debugfs.c
block/blk-mq.c
block/blk-stat.c
block/blk-stat.h
block/blk-sysfs.c
block/blk-wbt.c

index f6d917977b3318689f1a1ef387bae47abb03ab0b..48c88723944a90ab5a931d80b93ff83bdffc086e 100644 (file)
@@ -333,17 +333,17 @@ static int hctx_stats_show(struct seq_file *m, void *v)
        struct blk_mq_hw_ctx *hctx = m->private;
        struct blk_rq_stat stat[2];
 
-       blk_stat_init(&stat[BLK_STAT_READ]);
-       blk_stat_init(&stat[BLK_STAT_WRITE]);
+       blk_stat_init(&stat[READ]);
+       blk_stat_init(&stat[WRITE]);
 
        blk_hctx_stat_get(hctx, stat);
 
        seq_puts(m, "read: ");
-       print_stat(m, &stat[BLK_STAT_READ]);
+       print_stat(m, &stat[READ]);
        seq_puts(m, "\n");
 
        seq_puts(m, "write: ");
-       print_stat(m, &stat[BLK_STAT_WRITE]);
+       print_stat(m, &stat[WRITE]);
        seq_puts(m, "\n");
        return 0;
 }
@@ -362,8 +362,8 @@ static ssize_t hctx_stats_write(struct file *file, const char __user *buf,
        int i;
 
        hctx_for_each_ctx(hctx, ctx, i) {
-               blk_stat_init(&ctx->stat[BLK_STAT_READ]);
-               blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
+               blk_stat_init(&ctx->stat[READ]);
+               blk_stat_init(&ctx->stat[WRITE]);
        }
        return count;
 }
index 534f49a90e3a4fd2c8d88eaa422cc30df3c1c10c..559e5363bb2c15485b3663d576690be46835d2d2 100644 (file)
@@ -2040,8 +2040,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
                spin_lock_init(&__ctx->lock);
                INIT_LIST_HEAD(&__ctx->rq_list);
                __ctx->queue = q;
-               blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
-               blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
+               blk_stat_init(&__ctx->stat[READ]);
+               blk_stat_init(&__ctx->stat[WRITE]);
 
                /* If the cpu isn't online, the cpu is mapped to first hctx */
                if (!cpu_online(i))
@@ -2769,10 +2769,10 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
         * important on devices where the completion latencies are longer
         * than ~10 usec.
         */
-       if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
-               ret = (stat[BLK_STAT_READ].mean + 1) / 2;
-       else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
-               ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
+       if (req_op(rq) == REQ_OP_READ && stat[READ].nr_samples)
+               ret = (stat[READ].mean + 1) / 2;
+       else if (req_op(rq) == REQ_OP_WRITE && stat[WRITE].nr_samples)
+               ret = (stat[WRITE].mean + 1) / 2;
 
        return ret;
 }
index 186fcb981e9b1d9696e3e000b0fde7b86e1a9663..f80582be5344fc5a2dc69f9748cbd269ccdc9d7c 100644 (file)
@@ -55,8 +55,8 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
        uint64_t latest = 0;
        int i, j, nr;
 
-       blk_stat_init(&dst[BLK_STAT_READ]);
-       blk_stat_init(&dst[BLK_STAT_WRITE]);
+       blk_stat_init(&dst[READ]);
+       blk_stat_init(&dst[WRITE]);
 
        nr = 0;
        do {
@@ -64,16 +64,16 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
 
                queue_for_each_hw_ctx(q, hctx, i) {
                        hctx_for_each_ctx(hctx, ctx, j) {
-                               blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
-                               blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
+                               blk_stat_flush_batch(&ctx->stat[READ]);
+                               blk_stat_flush_batch(&ctx->stat[WRITE]);
 
-                               if (!ctx->stat[BLK_STAT_READ].nr_samples &&
-                                   !ctx->stat[BLK_STAT_WRITE].nr_samples)
+                               if (!ctx->stat[READ].nr_samples &&
+                                   !ctx->stat[WRITE].nr_samples)
                                        continue;
-                               if (ctx->stat[BLK_STAT_READ].time > newest)
-                                       newest = ctx->stat[BLK_STAT_READ].time;
-                               if (ctx->stat[BLK_STAT_WRITE].time > newest)
-                                       newest = ctx->stat[BLK_STAT_WRITE].time;
+                               if (ctx->stat[READ].time > newest)
+                                       newest = ctx->stat[READ].time;
+                               if (ctx->stat[WRITE].time > newest)
+                                       newest = ctx->stat[WRITE].time;
                        }
                }
 
@@ -88,14 +88,14 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
 
                queue_for_each_hw_ctx(q, hctx, i) {
                        hctx_for_each_ctx(hctx, ctx, j) {
-                               if (ctx->stat[BLK_STAT_READ].time == newest) {
-                                       blk_stat_sum(&dst[BLK_STAT_READ],
-                                                    &ctx->stat[BLK_STAT_READ]);
+                               if (ctx->stat[READ].time == newest) {
+                                       blk_stat_sum(&dst[READ],
+                                                    &ctx->stat[READ]);
                                        nr++;
                                }
-                               if (ctx->stat[BLK_STAT_WRITE].time == newest) {
-                                       blk_stat_sum(&dst[BLK_STAT_WRITE],
-                                                    &ctx->stat[BLK_STAT_WRITE]);
+                               if (ctx->stat[WRITE].time == newest) {
+                                       blk_stat_sum(&dst[WRITE],
+                                                    &ctx->stat[WRITE]);
                                        nr++;
                                }
                        }
@@ -106,7 +106,7 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
                 */
        } while (!nr);
 
-       dst[BLK_STAT_READ].time = dst[BLK_STAT_WRITE].time = latest;
+       dst[READ].time = dst[WRITE].time = latest;
 }
 
 void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
@@ -114,12 +114,12 @@ void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
        if (q->mq_ops)
                blk_mq_stat_get(q, dst);
        else {
-               blk_stat_flush_batch(&q->rq_stats[BLK_STAT_READ]);
-               blk_stat_flush_batch(&q->rq_stats[BLK_STAT_WRITE]);
-               memcpy(&dst[BLK_STAT_READ], &q->rq_stats[BLK_STAT_READ],
-                               sizeof(struct blk_rq_stat));
-               memcpy(&dst[BLK_STAT_WRITE], &q->rq_stats[BLK_STAT_WRITE],
-                               sizeof(struct blk_rq_stat));
+               blk_stat_flush_batch(&q->rq_stats[READ]);
+               blk_stat_flush_batch(&q->rq_stats[WRITE]);
+               memcpy(&dst[READ], &q->rq_stats[READ],
+                      sizeof(struct blk_rq_stat));
+               memcpy(&dst[WRITE], &q->rq_stats[WRITE],
+                      sizeof(struct blk_rq_stat));
        }
 }
 
@@ -133,31 +133,29 @@ void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
                uint64_t newest = 0;
 
                hctx_for_each_ctx(hctx, ctx, i) {
-                       blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
-                       blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
+                       blk_stat_flush_batch(&ctx->stat[READ]);
+                       blk_stat_flush_batch(&ctx->stat[WRITE]);
 
-                       if (!ctx->stat[BLK_STAT_READ].nr_samples &&
-                           !ctx->stat[BLK_STAT_WRITE].nr_samples)
+                       if (!ctx->stat[READ].nr_samples &&
+                           !ctx->stat[WRITE].nr_samples)
                                continue;
 
-                       if (ctx->stat[BLK_STAT_READ].time > newest)
-                               newest = ctx->stat[BLK_STAT_READ].time;
-                       if (ctx->stat[BLK_STAT_WRITE].time > newest)
-                               newest = ctx->stat[BLK_STAT_WRITE].time;
+                       if (ctx->stat[READ].time > newest)
+                               newest = ctx->stat[READ].time;
+                       if (ctx->stat[WRITE].time > newest)
+                               newest = ctx->stat[WRITE].time;
                }
 
                if (!newest)
                        break;
 
                hctx_for_each_ctx(hctx, ctx, i) {
-                       if (ctx->stat[BLK_STAT_READ].time == newest) {
-                               blk_stat_sum(&dst[BLK_STAT_READ],
-                                               &ctx->stat[BLK_STAT_READ]);
+                       if (ctx->stat[READ].time == newest) {
+                               blk_stat_sum(&dst[READ], &ctx->stat[READ]);
                                nr++;
                        }
-                       if (ctx->stat[BLK_STAT_WRITE].time == newest) {
-                               blk_stat_sum(&dst[BLK_STAT_WRITE],
-                                               &ctx->stat[BLK_STAT_WRITE]);
+                       if (ctx->stat[WRITE].time == newest) {
+                               blk_stat_sum(&dst[WRITE], &ctx->stat[WRITE]);
                                nr++;
                        }
                }
@@ -226,13 +224,13 @@ void blk_stat_clear(struct request_queue *q)
 
                queue_for_each_hw_ctx(q, hctx, i) {
                        hctx_for_each_ctx(hctx, ctx, j) {
-                               blk_stat_init(&ctx->stat[BLK_STAT_READ]);
-                               blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
+                               blk_stat_init(&ctx->stat[READ]);
+                               blk_stat_init(&ctx->stat[WRITE]);
                        }
                }
        } else {
-               blk_stat_init(&q->rq_stats[BLK_STAT_READ]);
-               blk_stat_init(&q->rq_stats[BLK_STAT_WRITE]);
+               blk_stat_init(&q->rq_stats[READ]);
+               blk_stat_init(&q->rq_stats[WRITE]);
        }
 }
 
index a2050a0a5314bba690cfe6b1f6526e285c892f0d..34384328b46b2e3d90675c8c385667913f17e070 100644 (file)
 #define BLK_STAT_TIME_MASK     ((1ULL << BLK_STAT_SHIFT) - 1)
 #define BLK_STAT_MASK          ~BLK_STAT_TIME_MASK
 
-enum {
-       BLK_STAT_READ   = 0,
-       BLK_STAT_WRITE,
-};
-
 void blk_stat_add(struct blk_rq_stat *, struct request *);
 void blk_hctx_stat_get(struct blk_mq_hw_ctx *, struct blk_rq_stat *);
 void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *);
index c44b321335f3ebbcc662f0f70b7605f5019c60b7..fdb45fd0db0bdde2c19cb0d963c4daf463d9cb35 100644 (file)
@@ -518,8 +518,8 @@ static ssize_t queue_stats_show(struct request_queue *q, char *page)
 
        blk_queue_stat_get(q, stat);
 
-       ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
-       ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
+       ret = print_stat(page, &stat[READ], "read :");
+       ret += print_stat(page + ret, &stat[WRITE], "write:");
        return ret;
 }
 
index 1aedb1f7ee0c7fde717d7701d3ee74cc90c21d17..aafe5b5512245458a251fe984d82355e83f29693 100644 (file)
@@ -255,8 +255,8 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat)
         * that it's writes impacting us, and not just some sole read on
         * a device that is in a lower power state.
         */
-       return stat[BLK_STAT_READ].nr_samples >= 1 &&
-               stat[BLK_STAT_WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES;
+       return (stat[READ].nr_samples >= 1 &&
+               stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
 }
 
 static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
@@ -293,7 +293,7 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
         */
        thislat = rwb_sync_issue_lat(rwb);
        if (thislat > rwb->cur_win_nsec ||
-           (thislat > rwb->min_lat_nsec && !stat[BLK_STAT_READ].nr_samples)) {
+           (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
                trace_wbt_lat(bdi, thislat);
                return LAT_EXCEEDED;
        }
@@ -308,7 +308,7 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
                 * waited or still has writes in flights, consider us doing
                 * just writes as well.
                 */
-               if ((stat[BLK_STAT_WRITE].nr_samples && blk_stat_is_current(stat)) ||
+               if ((stat[WRITE].nr_samples && blk_stat_is_current(stat)) ||
                    wb_recent_wait(rwb) || wbt_inflight(rwb))
                        return LAT_UNKNOWN_WRITES;
                return LAT_UNKNOWN;
@@ -317,8 +317,8 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
        /*
         * If the 'min' latency exceeds our target, step down.
         */
-       if (stat[BLK_STAT_READ].min > rwb->min_lat_nsec) {
-               trace_wbt_lat(bdi, stat[BLK_STAT_READ].min);
+       if (stat[READ].min > rwb->min_lat_nsec) {
+               trace_wbt_lat(bdi, stat[READ].min);
                trace_wbt_stat(bdi, stat);
                return LAT_EXCEEDED;
        }