| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * buffered writeback throttling. loosely based on CoDel. We can't drop |
| 4 | * packets for IO scheduling, so the logic is something like this: |
| 5 | * |
| 6 | * - Monitor latencies in a defined window of time. |
| 7 | * - If the minimum latency in the above window exceeds some target, increment |
| 8 | * scaling step and scale down queue depth by a factor of 2x. The monitoring |
| 9 | * window is then shrunk to 100 / sqrt(scaling step + 1). |
| 10 | * - For any window where we don't have solid data on what the latencies |
| 11 | * look like, retain status quo. |
| 12 | * - If latencies look good, decrement scaling step. |
| 13 | * - If we're only doing writes, allow the scaling step to go negative. This |
| 14 | * will temporarily boost write performance, snapping back to a stable |
| 15 | * scaling step of 0 if reads show up or the heavy writers finish. Unlike |
| 16 | * positive scaling steps where we shrink the monitoring window, a negative |
| 17 | * scaling step retains the default step==0 window size. |
| 18 | * |
| 19 | * Copyright (C) 2016 Jens Axboe |
| 20 | * |
| 21 | */ |
| 22 | #include <linux/kernel.h> |
| 23 | #include <linux/blk_types.h> |
| 24 | #include <linux/slab.h> |
| 25 | #include <linux/backing-dev.h> |
| 26 | #include <linux/swap.h> |
| 27 | |
| 28 | #include "blk-stat.h" |
| 29 | #include "blk-wbt.h" |
| 30 | #include "blk-rq-qos.h" |
| 31 | #include "elevator.h" |
| 32 | |
| 33 | #define CREATE_TRACE_POINTS |
| 34 | #include <trace/events/wbt.h> |
| 35 | |
| 36 | enum wbt_flags { |
| 37 | WBT_TRACKED = 1, /* write, tracked for throttling */ |
| 38 | WBT_READ = 2, /* read */ |
| 39 | WBT_KSWAPD = 4, /* write, from kswapd */ |
| 40 | WBT_DISCARD = 8, /* discard */ |
| 41 | |
| 42 | WBT_NR_BITS = 4, /* number of bits */ |
| 43 | }; |
| 44 | |
| 45 | enum { |
| 46 | WBT_RWQ_BG = 0, |
| 47 | WBT_RWQ_KSWAPD, |
| 48 | WBT_RWQ_DISCARD, |
| 49 | WBT_NUM_RWQ, |
| 50 | }; |
| 51 | |
| 52 | /* |
| 53 | * If current state is WBT_STATE_ON/OFF_DEFAULT, it can be covered to any other |
| 54 | * state, if current state is WBT_STATE_ON/OFF_MANUAL, it can only be covered |
| 55 | * to WBT_STATE_OFF/ON_MANUAL. |
| 56 | */ |
| 57 | enum { |
| 58 | WBT_STATE_ON_DEFAULT = 1, /* on by default */ |
| 59 | WBT_STATE_ON_MANUAL = 2, /* on manually by sysfs */ |
| 60 | WBT_STATE_OFF_DEFAULT = 3, /* off by default */ |
| 61 | WBT_STATE_OFF_MANUAL = 4, /* off manually by sysfs */ |
| 62 | }; |
| 63 | |
| 64 | struct rq_wb { |
| 65 | /* |
| 66 | * Settings that govern how we throttle |
| 67 | */ |
| 68 | unsigned int wb_background; /* background writeback */ |
| 69 | unsigned int wb_normal; /* normal writeback */ |
| 70 | |
| 71 | short enable_state; /* WBT_STATE_* */ |
| 72 | |
| 73 | /* |
| 74 | * Number of consecutive periods where we don't have enough |
| 75 | * information to make a firm scale up/down decision. |
| 76 | */ |
| 77 | unsigned int unknown_cnt; |
| 78 | |
| 79 | u64 win_nsec; /* default window size */ |
| 80 | u64 cur_win_nsec; /* current window size */ |
| 81 | |
| 82 | struct blk_stat_callback *cb; |
| 83 | |
| 84 | u64 sync_issue; |
| 85 | void *sync_cookie; |
| 86 | |
| 87 | unsigned int wc; |
| 88 | |
| 89 | unsigned long last_issue; /* last non-throttled issue */ |
| 90 | unsigned long last_comp; /* last non-throttled comp */ |
| 91 | unsigned long min_lat_nsec; |
| 92 | struct rq_qos rqos; |
| 93 | struct rq_wait rq_wait[WBT_NUM_RWQ]; |
| 94 | struct rq_depth rq_depth; |
| 95 | }; |
| 96 | |
| 97 | static inline struct rq_wb *RQWB(struct rq_qos *rqos) |
| 98 | { |
| 99 | return container_of(rqos, struct rq_wb, rqos); |
| 100 | } |
| 101 | |
| 102 | static inline void wbt_clear_state(struct request *rq) |
| 103 | { |
| 104 | rq->wbt_flags = 0; |
| 105 | } |
| 106 | |
| 107 | static inline enum wbt_flags wbt_flags(struct request *rq) |
| 108 | { |
| 109 | return rq->wbt_flags; |
| 110 | } |
| 111 | |
| 112 | static inline bool wbt_is_tracked(struct request *rq) |
| 113 | { |
| 114 | return rq->wbt_flags & WBT_TRACKED; |
| 115 | } |
| 116 | |
| 117 | static inline bool wbt_is_read(struct request *rq) |
| 118 | { |
| 119 | return rq->wbt_flags & WBT_READ; |
| 120 | } |
| 121 | |
| 122 | enum { |
| 123 | /* |
| 124 | * Default setting, we'll scale up (to 75% of QD max) or down (min 1) |
| 125 | * from here depending on device stats |
| 126 | */ |
| 127 | RWB_DEF_DEPTH = 16, |
| 128 | |
| 129 | /* |
| 130 | * 100msec window |
| 131 | */ |
| 132 | RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL, |
| 133 | |
| 134 | /* |
| 135 | * Disregard stats, if we don't meet this minimum |
| 136 | */ |
| 137 | RWB_MIN_WRITE_SAMPLES = 3, |
| 138 | |
| 139 | /* |
| 140 | * If we have this number of consecutive windows with not enough |
| 141 | * information to scale up or down, scale up. |
| 142 | */ |
| 143 | RWB_UNKNOWN_BUMP = 5, |
| 144 | }; |
| 145 | |
| 146 | static inline bool rwb_enabled(struct rq_wb *rwb) |
| 147 | { |
| 148 | return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT && |
| 149 | rwb->wb_normal != 0; |
| 150 | } |
| 151 | |
| 152 | static void wb_timestamp(struct rq_wb *rwb, unsigned long *var) |
| 153 | { |
| 154 | if (rwb_enabled(rwb)) { |
| 155 | const unsigned long cur = jiffies; |
| 156 | |
| 157 | if (cur != *var) |
| 158 | *var = cur; |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | /* |
| 163 | * If a task was rate throttled in balance_dirty_pages() within the last |
| 164 | * second or so, use that to indicate a higher cleaning rate. |
| 165 | */ |
| 166 | static bool wb_recent_wait(struct rq_wb *rwb) |
| 167 | { |
| 168 | struct bdi_writeback *wb = &rwb->rqos.disk->bdi->wb; |
| 169 | |
| 170 | return time_before(jiffies, wb->dirty_sleep + HZ); |
| 171 | } |
| 172 | |
| 173 | static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb, |
| 174 | enum wbt_flags wb_acct) |
| 175 | { |
| 176 | if (wb_acct & WBT_KSWAPD) |
| 177 | return &rwb->rq_wait[WBT_RWQ_KSWAPD]; |
| 178 | else if (wb_acct & WBT_DISCARD) |
| 179 | return &rwb->rq_wait[WBT_RWQ_DISCARD]; |
| 180 | |
| 181 | return &rwb->rq_wait[WBT_RWQ_BG]; |
| 182 | } |
| 183 | |
| 184 | static void rwb_wake_all(struct rq_wb *rwb) |
| 185 | { |
| 186 | int i; |
| 187 | |
| 188 | for (i = 0; i < WBT_NUM_RWQ; i++) { |
| 189 | struct rq_wait *rqw = &rwb->rq_wait[i]; |
| 190 | |
| 191 | if (wq_has_sleeper(&rqw->wait)) |
| 192 | wake_up_all(&rqw->wait); |
| 193 | } |
| 194 | } |
| 195 | |
| 196 | static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw, |
| 197 | enum wbt_flags wb_acct) |
| 198 | { |
| 199 | int inflight, limit; |
| 200 | |
| 201 | inflight = atomic_dec_return(&rqw->inflight); |
| 202 | |
| 203 | /* |
| 204 | * wbt got disabled with IO in flight. Wake up any potential |
| 205 | * waiters, we don't have to do more than that. |
| 206 | */ |
| 207 | if (unlikely(!rwb_enabled(rwb))) { |
| 208 | rwb_wake_all(rwb); |
| 209 | return; |
| 210 | } |
| 211 | |
| 212 | /* |
| 213 | * For discards, our limit is always the background. For writes, if |
| 214 | * the device does write back caching, drop further down before we |
| 215 | * wake people up. |
| 216 | */ |
| 217 | if (wb_acct & WBT_DISCARD) |
| 218 | limit = rwb->wb_background; |
| 219 | else if (rwb->wc && !wb_recent_wait(rwb)) |
| 220 | limit = 0; |
| 221 | else |
| 222 | limit = rwb->wb_normal; |
| 223 | |
| 224 | /* |
| 225 | * Don't wake anyone up if we are above the normal limit. |
| 226 | */ |
| 227 | if (inflight && inflight >= limit) |
| 228 | return; |
| 229 | |
| 230 | if (wq_has_sleeper(&rqw->wait)) { |
| 231 | int diff = limit - inflight; |
| 232 | |
| 233 | if (!inflight || diff >= rwb->wb_background / 2) |
| 234 | wake_up_all(&rqw->wait); |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) |
| 239 | { |
| 240 | struct rq_wb *rwb = RQWB(rqos); |
| 241 | struct rq_wait *rqw; |
| 242 | |
| 243 | if (!(wb_acct & WBT_TRACKED)) |
| 244 | return; |
| 245 | |
| 246 | rqw = get_rq_wait(rwb, wb_acct); |
| 247 | wbt_rqw_done(rwb, rqw, wb_acct); |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * Called on completion of a request. Note that it's also called when |
| 252 | * a request is merged, when the request gets freed. |
| 253 | */ |
| 254 | static void wbt_done(struct rq_qos *rqos, struct request *rq) |
| 255 | { |
| 256 | struct rq_wb *rwb = RQWB(rqos); |
| 257 | |
| 258 | if (!wbt_is_tracked(rq)) { |
| 259 | if (rwb->sync_cookie == rq) { |
| 260 | rwb->sync_issue = 0; |
| 261 | rwb->sync_cookie = NULL; |
| 262 | } |
| 263 | |
| 264 | if (wbt_is_read(rq)) |
| 265 | wb_timestamp(rwb, &rwb->last_comp); |
| 266 | } else { |
| 267 | WARN_ON_ONCE(rq == rwb->sync_cookie); |
| 268 | __wbt_done(rqos, wbt_flags(rq)); |
| 269 | } |
| 270 | wbt_clear_state(rq); |
| 271 | } |
| 272 | |
| 273 | static inline bool stat_sample_valid(struct blk_rq_stat *stat) |
| 274 | { |
| 275 | /* |
| 276 | * We need at least one read sample, and a minimum of |
| 277 | * RWB_MIN_WRITE_SAMPLES. We require some write samples to know |
| 278 | * that it's writes impacting us, and not just some sole read on |
| 279 | * a device that is in a lower power state. |
| 280 | */ |
| 281 | return (stat[READ].nr_samples >= 1 && |
| 282 | stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES); |
| 283 | } |
| 284 | |
| 285 | static u64 rwb_sync_issue_lat(struct rq_wb *rwb) |
| 286 | { |
| 287 | u64 now, issue = READ_ONCE(rwb->sync_issue); |
| 288 | |
| 289 | if (!issue || !rwb->sync_cookie) |
| 290 | return 0; |
| 291 | |
| 292 | now = ktime_to_ns(ktime_get()); |
| 293 | return now - issue; |
| 294 | } |
| 295 | |
| 296 | static inline unsigned int wbt_inflight(struct rq_wb *rwb) |
| 297 | { |
| 298 | unsigned int i, ret = 0; |
| 299 | |
| 300 | for (i = 0; i < WBT_NUM_RWQ; i++) |
| 301 | ret += atomic_read(&rwb->rq_wait[i].inflight); |
| 302 | |
| 303 | return ret; |
| 304 | } |
| 305 | |
| 306 | enum { |
| 307 | LAT_OK = 1, |
| 308 | LAT_UNKNOWN, |
| 309 | LAT_UNKNOWN_WRITES, |
| 310 | LAT_EXCEEDED, |
| 311 | }; |
| 312 | |
| 313 | static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) |
| 314 | { |
| 315 | struct backing_dev_info *bdi = rwb->rqos.disk->bdi; |
| 316 | struct rq_depth *rqd = &rwb->rq_depth; |
| 317 | u64 thislat; |
| 318 | |
| 319 | /* |
| 320 | * If our stored sync issue exceeds the window size, or it |
| 321 | * exceeds our min target AND we haven't logged any entries, |
| 322 | * flag the latency as exceeded. wbt works off completion latencies, |
| 323 | * but for a flooded device, a single sync IO can take a long time |
| 324 | * to complete after being issued. If this time exceeds our |
| 325 | * monitoring window AND we didn't see any other completions in that |
| 326 | * window, then count that sync IO as a violation of the latency. |
| 327 | */ |
| 328 | thislat = rwb_sync_issue_lat(rwb); |
| 329 | if (thislat > rwb->cur_win_nsec || |
| 330 | (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) { |
| 331 | trace_wbt_lat(bdi, thislat); |
| 332 | return LAT_EXCEEDED; |
| 333 | } |
| 334 | |
| 335 | /* |
| 336 | * No read/write mix, if stat isn't valid |
| 337 | */ |
| 338 | if (!stat_sample_valid(stat)) { |
| 339 | /* |
| 340 | * If we had writes in this stat window and the window is |
| 341 | * current, we're only doing writes. If a task recently |
| 342 | * waited or still has writes in flights, consider us doing |
| 343 | * just writes as well. |
| 344 | */ |
| 345 | if (stat[WRITE].nr_samples || wb_recent_wait(rwb) || |
| 346 | wbt_inflight(rwb)) |
| 347 | return LAT_UNKNOWN_WRITES; |
| 348 | return LAT_UNKNOWN; |
| 349 | } |
| 350 | |
| 351 | /* |
| 352 | * If the 'min' latency exceeds our target, step down. |
| 353 | */ |
| 354 | if (stat[READ].min > rwb->min_lat_nsec) { |
| 355 | trace_wbt_lat(bdi, stat[READ].min); |
| 356 | trace_wbt_stat(bdi, stat); |
| 357 | return LAT_EXCEEDED; |
| 358 | } |
| 359 | |
| 360 | if (rqd->scale_step) |
| 361 | trace_wbt_stat(bdi, stat); |
| 362 | |
| 363 | return LAT_OK; |
| 364 | } |
| 365 | |
| 366 | static void rwb_trace_step(struct rq_wb *rwb, const char *msg) |
| 367 | { |
| 368 | struct backing_dev_info *bdi = rwb->rqos.disk->bdi; |
| 369 | struct rq_depth *rqd = &rwb->rq_depth; |
| 370 | |
| 371 | trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec, |
| 372 | rwb->wb_background, rwb->wb_normal, rqd->max_depth); |
| 373 | } |
| 374 | |
| 375 | static void calc_wb_limits(struct rq_wb *rwb) |
| 376 | { |
| 377 | if (rwb->min_lat_nsec == 0) { |
| 378 | rwb->wb_normal = rwb->wb_background = 0; |
| 379 | } else if (rwb->rq_depth.max_depth <= 2) { |
| 380 | rwb->wb_normal = rwb->rq_depth.max_depth; |
| 381 | rwb->wb_background = 1; |
| 382 | } else { |
| 383 | rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2; |
| 384 | rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4; |
| 385 | } |
| 386 | } |
| 387 | |
| 388 | static void scale_up(struct rq_wb *rwb) |
| 389 | { |
| 390 | if (!rq_depth_scale_up(&rwb->rq_depth)) |
| 391 | return; |
| 392 | calc_wb_limits(rwb); |
| 393 | rwb->unknown_cnt = 0; |
| 394 | rwb_wake_all(rwb); |
| 395 | rwb_trace_step(rwb, tracepoint_string("scale up")); |
| 396 | } |
| 397 | |
| 398 | static void scale_down(struct rq_wb *rwb, bool hard_throttle) |
| 399 | { |
| 400 | if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle)) |
| 401 | return; |
| 402 | calc_wb_limits(rwb); |
| 403 | rwb->unknown_cnt = 0; |
| 404 | rwb_trace_step(rwb, tracepoint_string("scale down")); |
| 405 | } |
| 406 | |
| 407 | static void rwb_arm_timer(struct rq_wb *rwb) |
| 408 | { |
| 409 | struct rq_depth *rqd = &rwb->rq_depth; |
| 410 | |
| 411 | if (rqd->scale_step > 0) { |
| 412 | /* |
| 413 | * We should speed this up, using some variant of a fast |
| 414 | * integer inverse square root calculation. Since we only do |
| 415 | * this for every window expiration, it's not a huge deal, |
| 416 | * though. |
| 417 | */ |
| 418 | rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4, |
| 419 | int_sqrt((rqd->scale_step + 1) << 8)); |
| 420 | } else { |
| 421 | /* |
| 422 | * For step < 0, we don't want to increase/decrease the |
| 423 | * window size. |
| 424 | */ |
| 425 | rwb->cur_win_nsec = rwb->win_nsec; |
| 426 | } |
| 427 | |
| 428 | blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec); |
| 429 | } |
| 430 | |
| 431 | static void wb_timer_fn(struct blk_stat_callback *cb) |
| 432 | { |
| 433 | struct rq_wb *rwb = cb->data; |
| 434 | struct rq_depth *rqd = &rwb->rq_depth; |
| 435 | unsigned int inflight = wbt_inflight(rwb); |
| 436 | int status; |
| 437 | |
| 438 | if (!rwb->rqos.disk) |
| 439 | return; |
| 440 | |
| 441 | status = latency_exceeded(rwb, cb->stat); |
| 442 | |
| 443 | trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd->scale_step, inflight); |
| 444 | |
| 445 | /* |
| 446 | * If we exceeded the latency target, step down. If we did not, |
| 447 | * step one level up. If we don't know enough to say either exceeded |
| 448 | * or ok, then don't do anything. |
| 449 | */ |
| 450 | switch (status) { |
| 451 | case LAT_EXCEEDED: |
| 452 | scale_down(rwb, true); |
| 453 | break; |
| 454 | case LAT_OK: |
| 455 | scale_up(rwb); |
| 456 | break; |
| 457 | case LAT_UNKNOWN_WRITES: |
| 458 | /* |
| 459 | * We started a the center step, but don't have a valid |
| 460 | * read/write sample, but we do have writes going on. |
| 461 | * Allow step to go negative, to increase write perf. |
| 462 | */ |
| 463 | scale_up(rwb); |
| 464 | break; |
| 465 | case LAT_UNKNOWN: |
| 466 | if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP) |
| 467 | break; |
| 468 | /* |
| 469 | * We get here when previously scaled reduced depth, and we |
| 470 | * currently don't have a valid read/write sample. For that |
| 471 | * case, slowly return to center state (step == 0). |
| 472 | */ |
| 473 | if (rqd->scale_step > 0) |
| 474 | scale_up(rwb); |
| 475 | else if (rqd->scale_step < 0) |
| 476 | scale_down(rwb, false); |
| 477 | break; |
| 478 | default: |
| 479 | break; |
| 480 | } |
| 481 | |
| 482 | /* |
| 483 | * Re-arm timer, if we have IO in flight |
| 484 | */ |
| 485 | if (rqd->scale_step || inflight) |
| 486 | rwb_arm_timer(rwb); |
| 487 | } |
| 488 | |
| 489 | static void wbt_update_limits(struct rq_wb *rwb) |
| 490 | { |
| 491 | struct rq_depth *rqd = &rwb->rq_depth; |
| 492 | |
| 493 | rqd->scale_step = 0; |
| 494 | rqd->scaled_max = false; |
| 495 | |
| 496 | rq_depth_calc_max_depth(rqd); |
| 497 | calc_wb_limits(rwb); |
| 498 | |
| 499 | rwb_wake_all(rwb); |
| 500 | } |
| 501 | |
| 502 | bool wbt_disabled(struct request_queue *q) |
| 503 | { |
| 504 | struct rq_qos *rqos = wbt_rq_qos(q); |
| 505 | |
| 506 | return !rqos || RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT || |
| 507 | RQWB(rqos)->enable_state == WBT_STATE_OFF_MANUAL; |
| 508 | } |
| 509 | |
| 510 | u64 wbt_get_min_lat(struct request_queue *q) |
| 511 | { |
| 512 | struct rq_qos *rqos = wbt_rq_qos(q); |
| 513 | if (!rqos) |
| 514 | return 0; |
| 515 | return RQWB(rqos)->min_lat_nsec; |
| 516 | } |
| 517 | |
| 518 | void wbt_set_min_lat(struct request_queue *q, u64 val) |
| 519 | { |
| 520 | struct rq_qos *rqos = wbt_rq_qos(q); |
| 521 | if (!rqos) |
| 522 | return; |
| 523 | |
| 524 | RQWB(rqos)->min_lat_nsec = val; |
| 525 | if (val) |
| 526 | RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL; |
| 527 | else |
| 528 | RQWB(rqos)->enable_state = WBT_STATE_OFF_MANUAL; |
| 529 | |
| 530 | wbt_update_limits(RQWB(rqos)); |
| 531 | } |
| 532 | |
| 533 | |
| 534 | static bool close_io(struct rq_wb *rwb) |
| 535 | { |
| 536 | const unsigned long now = jiffies; |
| 537 | |
| 538 | return time_before(now, rwb->last_issue + HZ / 10) || |
| 539 | time_before(now, rwb->last_comp + HZ / 10); |
| 540 | } |
| 541 | |
| 542 | #define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO) |
| 543 | |
| 544 | static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf) |
| 545 | { |
| 546 | unsigned int limit; |
| 547 | |
| 548 | /* |
| 549 | * If we got disabled, just return UINT_MAX. This ensures that |
| 550 | * we'll properly inc a new IO, and dec+wakeup at the end. |
| 551 | */ |
| 552 | if (!rwb_enabled(rwb)) |
| 553 | return UINT_MAX; |
| 554 | |
| 555 | if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD) |
| 556 | return rwb->wb_background; |
| 557 | |
| 558 | /* |
| 559 | * At this point we know it's a buffered write. If this is |
| 560 | * kswapd trying to free memory, or REQ_SYNC is set, then |
| 561 | * it's WB_SYNC_ALL writeback, and we'll use the max limit for |
| 562 | * that. If the write is marked as a background write, then use |
| 563 | * the idle limit, or go to normal if we haven't had competing |
| 564 | * IO for a bit. |
| 565 | */ |
| 566 | if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd()) |
| 567 | limit = rwb->rq_depth.max_depth; |
| 568 | else if ((opf & REQ_BACKGROUND) || close_io(rwb)) { |
| 569 | /* |
| 570 | * If less than 100ms since we completed unrelated IO, |
| 571 | * limit us to half the depth for background writeback. |
| 572 | */ |
| 573 | limit = rwb->wb_background; |
| 574 | } else |
| 575 | limit = rwb->wb_normal; |
| 576 | |
| 577 | return limit; |
| 578 | } |
| 579 | |
| 580 | struct wbt_wait_data { |
| 581 | struct rq_wb *rwb; |
| 582 | enum wbt_flags wb_acct; |
| 583 | blk_opf_t opf; |
| 584 | }; |
| 585 | |
| 586 | static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data) |
| 587 | { |
| 588 | struct wbt_wait_data *data = private_data; |
| 589 | return rq_wait_inc_below(rqw, get_limit(data->rwb, data->opf)); |
| 590 | } |
| 591 | |
| 592 | static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data) |
| 593 | { |
| 594 | struct wbt_wait_data *data = private_data; |
| 595 | wbt_rqw_done(data->rwb, rqw, data->wb_acct); |
| 596 | } |
| 597 | |
| 598 | /* |
| 599 | * Block if we will exceed our limit, or if we are currently waiting for |
| 600 | * the timer to kick off queuing again. |
| 601 | */ |
| 602 | static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, |
| 603 | blk_opf_t opf) |
| 604 | { |
| 605 | struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); |
| 606 | struct wbt_wait_data data = { |
| 607 | .rwb = rwb, |
| 608 | .wb_acct = wb_acct, |
| 609 | .opf = opf, |
| 610 | }; |
| 611 | |
| 612 | rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb); |
| 613 | } |
| 614 | |
| 615 | static inline bool wbt_should_throttle(struct bio *bio) |
| 616 | { |
| 617 | switch (bio_op(bio)) { |
| 618 | case REQ_OP_WRITE: |
| 619 | /* |
| 620 | * Don't throttle WRITE_ODIRECT |
| 621 | */ |
| 622 | if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == |
| 623 | (REQ_SYNC | REQ_IDLE)) |
| 624 | return false; |
| 625 | fallthrough; |
| 626 | case REQ_OP_DISCARD: |
| 627 | return true; |
| 628 | default: |
| 629 | return false; |
| 630 | } |
| 631 | } |
| 632 | |
| 633 | static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio) |
| 634 | { |
| 635 | enum wbt_flags flags = 0; |
| 636 | |
| 637 | if (!rwb_enabled(rwb)) |
| 638 | return 0; |
| 639 | |
| 640 | if (bio_op(bio) == REQ_OP_READ) { |
| 641 | flags = WBT_READ; |
| 642 | } else if (wbt_should_throttle(bio)) { |
| 643 | if (current_is_kswapd()) |
| 644 | flags |= WBT_KSWAPD; |
| 645 | if (bio_op(bio) == REQ_OP_DISCARD) |
| 646 | flags |= WBT_DISCARD; |
| 647 | flags |= WBT_TRACKED; |
| 648 | } |
| 649 | return flags; |
| 650 | } |
| 651 | |
| 652 | static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio) |
| 653 | { |
| 654 | struct rq_wb *rwb = RQWB(rqos); |
| 655 | enum wbt_flags flags = bio_to_wbt_flags(rwb, bio); |
| 656 | __wbt_done(rqos, flags); |
| 657 | } |
| 658 | |
| 659 | /* |
| 660 | * May sleep, if we have exceeded the writeback limits. Caller can pass |
| 661 | * in an irq held spinlock, if it holds one when calling this function. |
| 662 | * If we do sleep, we'll release and re-grab it. |
| 663 | */ |
| 664 | static void wbt_wait(struct rq_qos *rqos, struct bio *bio) |
| 665 | { |
| 666 | struct rq_wb *rwb = RQWB(rqos); |
| 667 | enum wbt_flags flags; |
| 668 | |
| 669 | flags = bio_to_wbt_flags(rwb, bio); |
| 670 | if (!(flags & WBT_TRACKED)) { |
| 671 | if (flags & WBT_READ) |
| 672 | wb_timestamp(rwb, &rwb->last_issue); |
| 673 | return; |
| 674 | } |
| 675 | |
| 676 | __wbt_wait(rwb, flags, bio->bi_opf); |
| 677 | |
| 678 | if (!blk_stat_is_active(rwb->cb)) |
| 679 | rwb_arm_timer(rwb); |
| 680 | } |
| 681 | |
| 682 | static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) |
| 683 | { |
| 684 | struct rq_wb *rwb = RQWB(rqos); |
| 685 | rq->wbt_flags |= bio_to_wbt_flags(rwb, bio); |
| 686 | } |
| 687 | |
| 688 | static void wbt_issue(struct rq_qos *rqos, struct request *rq) |
| 689 | { |
| 690 | struct rq_wb *rwb = RQWB(rqos); |
| 691 | |
| 692 | if (!rwb_enabled(rwb)) |
| 693 | return; |
| 694 | |
| 695 | /* |
| 696 | * Track sync issue, in case it takes a long time to complete. Allows us |
| 697 | * to react quicker, if a sync IO takes a long time to complete. Note |
| 698 | * that this is just a hint. The request can go away when it completes, |
| 699 | * so it's important we never dereference it. We only use the address to |
| 700 | * compare with, which is why we store the sync_issue time locally. |
| 701 | */ |
| 702 | if (wbt_is_read(rq) && !rwb->sync_issue) { |
| 703 | rwb->sync_cookie = rq; |
| 704 | rwb->sync_issue = rq->io_start_time_ns; |
| 705 | } |
| 706 | } |
| 707 | |
| 708 | static void wbt_requeue(struct rq_qos *rqos, struct request *rq) |
| 709 | { |
| 710 | struct rq_wb *rwb = RQWB(rqos); |
| 711 | if (!rwb_enabled(rwb)) |
| 712 | return; |
| 713 | if (rq == rwb->sync_cookie) { |
| 714 | rwb->sync_issue = 0; |
| 715 | rwb->sync_cookie = NULL; |
| 716 | } |
| 717 | } |
| 718 | |
| 719 | void wbt_set_write_cache(struct request_queue *q, bool write_cache_on) |
| 720 | { |
| 721 | struct rq_qos *rqos = wbt_rq_qos(q); |
| 722 | if (rqos) |
| 723 | RQWB(rqos)->wc = write_cache_on; |
| 724 | } |
| 725 | |
| 726 | /* |
| 727 | * Enable wbt if defaults are configured that way |
| 728 | */ |
| 729 | void wbt_enable_default(struct gendisk *disk) |
| 730 | { |
| 731 | struct request_queue *q = disk->queue; |
| 732 | struct rq_qos *rqos; |
| 733 | bool disable_flag = q->elevator && |
| 734 | test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags); |
| 735 | |
| 736 | /* Throttling already enabled? */ |
| 737 | rqos = wbt_rq_qos(q); |
| 738 | if (rqos) { |
| 739 | if (!disable_flag && |
| 740 | RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT) |
| 741 | RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT; |
| 742 | return; |
| 743 | } |
| 744 | |
| 745 | /* Queue not registered? Maybe shutting down... */ |
| 746 | if (!blk_queue_registered(q)) |
| 747 | return; |
| 748 | |
| 749 | if (queue_is_mq(q) && !disable_flag) |
| 750 | wbt_init(disk); |
| 751 | } |
| 752 | EXPORT_SYMBOL_GPL(wbt_enable_default); |
| 753 | |
| 754 | u64 wbt_default_latency_nsec(struct request_queue *q) |
| 755 | { |
| 756 | /* |
| 757 | * We default to 2msec for non-rotational storage, and 75msec |
| 758 | * for rotational storage. |
| 759 | */ |
| 760 | if (blk_queue_nonrot(q)) |
| 761 | return 2000000ULL; |
| 762 | else |
| 763 | return 75000000ULL; |
| 764 | } |
| 765 | |
| 766 | static int wbt_data_dir(const struct request *rq) |
| 767 | { |
| 768 | const enum req_op op = req_op(rq); |
| 769 | |
| 770 | if (op == REQ_OP_READ) |
| 771 | return READ; |
| 772 | else if (op_is_write(op)) |
| 773 | return WRITE; |
| 774 | |
| 775 | /* don't account */ |
| 776 | return -1; |
| 777 | } |
| 778 | |
| 779 | static void wbt_queue_depth_changed(struct rq_qos *rqos) |
| 780 | { |
| 781 | RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->disk->queue); |
| 782 | wbt_update_limits(RQWB(rqos)); |
| 783 | } |
| 784 | |
| 785 | static void wbt_exit(struct rq_qos *rqos) |
| 786 | { |
| 787 | struct rq_wb *rwb = RQWB(rqos); |
| 788 | |
| 789 | blk_stat_remove_callback(rqos->disk->queue, rwb->cb); |
| 790 | blk_stat_free_callback(rwb->cb); |
| 791 | kfree(rwb); |
| 792 | } |
| 793 | |
| 794 | /* |
| 795 | * Disable wbt, if enabled by default. |
| 796 | */ |
| 797 | void wbt_disable_default(struct gendisk *disk) |
| 798 | { |
| 799 | struct rq_qos *rqos = wbt_rq_qos(disk->queue); |
| 800 | struct rq_wb *rwb; |
| 801 | if (!rqos) |
| 802 | return; |
| 803 | rwb = RQWB(rqos); |
| 804 | if (rwb->enable_state == WBT_STATE_ON_DEFAULT) { |
| 805 | blk_stat_deactivate(rwb->cb); |
| 806 | rwb->enable_state = WBT_STATE_OFF_DEFAULT; |
| 807 | } |
| 808 | } |
| 809 | EXPORT_SYMBOL_GPL(wbt_disable_default); |
| 810 | |
| 811 | #ifdef CONFIG_BLK_DEBUG_FS |
| 812 | static int wbt_curr_win_nsec_show(void *data, struct seq_file *m) |
| 813 | { |
| 814 | struct rq_qos *rqos = data; |
| 815 | struct rq_wb *rwb = RQWB(rqos); |
| 816 | |
| 817 | seq_printf(m, "%llu\n", rwb->cur_win_nsec); |
| 818 | return 0; |
| 819 | } |
| 820 | |
| 821 | static int wbt_enabled_show(void *data, struct seq_file *m) |
| 822 | { |
| 823 | struct rq_qos *rqos = data; |
| 824 | struct rq_wb *rwb = RQWB(rqos); |
| 825 | |
| 826 | seq_printf(m, "%d\n", rwb->enable_state); |
| 827 | return 0; |
| 828 | } |
| 829 | |
| 830 | static int wbt_id_show(void *data, struct seq_file *m) |
| 831 | { |
| 832 | struct rq_qos *rqos = data; |
| 833 | |
| 834 | seq_printf(m, "%u\n", rqos->id); |
| 835 | return 0; |
| 836 | } |
| 837 | |
| 838 | static int wbt_inflight_show(void *data, struct seq_file *m) |
| 839 | { |
| 840 | struct rq_qos *rqos = data; |
| 841 | struct rq_wb *rwb = RQWB(rqos); |
| 842 | int i; |
| 843 | |
| 844 | for (i = 0; i < WBT_NUM_RWQ; i++) |
| 845 | seq_printf(m, "%d: inflight %d\n", i, |
| 846 | atomic_read(&rwb->rq_wait[i].inflight)); |
| 847 | return 0; |
| 848 | } |
| 849 | |
| 850 | static int wbt_min_lat_nsec_show(void *data, struct seq_file *m) |
| 851 | { |
| 852 | struct rq_qos *rqos = data; |
| 853 | struct rq_wb *rwb = RQWB(rqos); |
| 854 | |
| 855 | seq_printf(m, "%lu\n", rwb->min_lat_nsec); |
| 856 | return 0; |
| 857 | } |
| 858 | |
| 859 | static int wbt_unknown_cnt_show(void *data, struct seq_file *m) |
| 860 | { |
| 861 | struct rq_qos *rqos = data; |
| 862 | struct rq_wb *rwb = RQWB(rqos); |
| 863 | |
| 864 | seq_printf(m, "%u\n", rwb->unknown_cnt); |
| 865 | return 0; |
| 866 | } |
| 867 | |
| 868 | static int wbt_normal_show(void *data, struct seq_file *m) |
| 869 | { |
| 870 | struct rq_qos *rqos = data; |
| 871 | struct rq_wb *rwb = RQWB(rqos); |
| 872 | |
| 873 | seq_printf(m, "%u\n", rwb->wb_normal); |
| 874 | return 0; |
| 875 | } |
| 876 | |
| 877 | static int wbt_background_show(void *data, struct seq_file *m) |
| 878 | { |
| 879 | struct rq_qos *rqos = data; |
| 880 | struct rq_wb *rwb = RQWB(rqos); |
| 881 | |
| 882 | seq_printf(m, "%u\n", rwb->wb_background); |
| 883 | return 0; |
| 884 | } |
| 885 | |
| 886 | static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = { |
| 887 | {"curr_win_nsec", 0400, wbt_curr_win_nsec_show}, |
| 888 | {"enabled", 0400, wbt_enabled_show}, |
| 889 | {"id", 0400, wbt_id_show}, |
| 890 | {"inflight", 0400, wbt_inflight_show}, |
| 891 | {"min_lat_nsec", 0400, wbt_min_lat_nsec_show}, |
| 892 | {"unknown_cnt", 0400, wbt_unknown_cnt_show}, |
| 893 | {"wb_normal", 0400, wbt_normal_show}, |
| 894 | {"wb_background", 0400, wbt_background_show}, |
| 895 | {}, |
| 896 | }; |
| 897 | #endif |
| 898 | |
| 899 | static const struct rq_qos_ops wbt_rqos_ops = { |
| 900 | .throttle = wbt_wait, |
| 901 | .issue = wbt_issue, |
| 902 | .track = wbt_track, |
| 903 | .requeue = wbt_requeue, |
| 904 | .done = wbt_done, |
| 905 | .cleanup = wbt_cleanup, |
| 906 | .queue_depth_changed = wbt_queue_depth_changed, |
| 907 | .exit = wbt_exit, |
| 908 | #ifdef CONFIG_BLK_DEBUG_FS |
| 909 | .debugfs_attrs = wbt_debugfs_attrs, |
| 910 | #endif |
| 911 | }; |
| 912 | |
| 913 | int wbt_init(struct gendisk *disk) |
| 914 | { |
| 915 | struct request_queue *q = disk->queue; |
| 916 | struct rq_wb *rwb; |
| 917 | int i; |
| 918 | int ret; |
| 919 | |
| 920 | rwb = kzalloc(sizeof(*rwb), GFP_KERNEL); |
| 921 | if (!rwb) |
| 922 | return -ENOMEM; |
| 923 | |
| 924 | rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb); |
| 925 | if (!rwb->cb) { |
| 926 | kfree(rwb); |
| 927 | return -ENOMEM; |
| 928 | } |
| 929 | |
| 930 | for (i = 0; i < WBT_NUM_RWQ; i++) |
| 931 | rq_wait_init(&rwb->rq_wait[i]); |
| 932 | |
| 933 | rwb->last_comp = rwb->last_issue = jiffies; |
| 934 | rwb->win_nsec = RWB_WINDOW_NSEC; |
| 935 | rwb->enable_state = WBT_STATE_ON_DEFAULT; |
| 936 | rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags); |
| 937 | rwb->rq_depth.default_depth = RWB_DEF_DEPTH; |
| 938 | rwb->min_lat_nsec = wbt_default_latency_nsec(q); |
| 939 | rwb->rq_depth.queue_depth = blk_queue_depth(q); |
| 940 | wbt_update_limits(rwb); |
| 941 | |
| 942 | /* |
| 943 | * Assign rwb and add the stats callback. |
| 944 | */ |
| 945 | mutex_lock(&q->rq_qos_mutex); |
| 946 | ret = rq_qos_add(&rwb->rqos, disk, RQ_QOS_WBT, &wbt_rqos_ops); |
| 947 | mutex_unlock(&q->rq_qos_mutex); |
| 948 | if (ret) |
| 949 | goto err_free; |
| 950 | |
| 951 | blk_stat_add_callback(q, rwb->cb); |
| 952 | |
| 953 | return 0; |
| 954 | |
| 955 | err_free: |
| 956 | blk_stat_free_callback(rwb->cb); |
| 957 | kfree(rwb); |
| 958 | return ret; |
| 959 | |
| 960 | } |