Commit | Line | Data |
---|---|---|
aee69d78 PV |
1 | /* |
2 | * Budget Fair Queueing (BFQ) I/O scheduler. | |
3 | * | |
4 | * Based on ideas and code from CFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it> | |
11 | * Arianna Avanzini <avanzini@google.com> | |
12 | * | |
13 | * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org> | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License as | |
17 | * published by the Free Software Foundation; either version 2 of the | |
18 | * License, or (at your option) any later version. | |
19 | * | |
20 | * This program is distributed in the hope that it will be useful, | |
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
23 | * General Public License for more details. | |
24 | * | |
25 | * BFQ is a proportional-share I/O scheduler, with some extra | |
26 | * low-latency capabilities. BFQ also supports full hierarchical | |
27 | * scheduling through cgroups. Next paragraphs provide an introduction | |
28 | * on BFQ inner workings. Details on BFQ benefits, usage and | |
29 | * limitations can be found in Documentation/block/bfq-iosched.txt. | |
30 | * | |
31 | * BFQ is a proportional-share storage-I/O scheduling algorithm based | |
32 | * on the slice-by-slice service scheme of CFQ. But BFQ assigns | |
33 | * budgets, measured in number of sectors, to processes instead of | |
34 | * time slices. The device is not granted to the in-service process | |
35 | * for a given time slice, but until it has exhausted its assigned | |
36 | * budget. This change from the time to the service domain enables BFQ | |
37 | * to distribute the device throughput among processes as desired, | |
38 | * without any distortion due to throughput fluctuations, or to device | |
39 | * internal queueing. BFQ uses an ad hoc internal scheduler, called | |
40 | * B-WF2Q+, to schedule processes according to their budgets. More | |
41 | * precisely, BFQ schedules queues associated with processes. Each | |
42 | * process/queue is assigned a user-configurable weight, and B-WF2Q+ | |
43 | * guarantees that each queue receives a fraction of the throughput | |
44 | * proportional to its weight. Thanks to the accurate policy of | |
45 | * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound | |
46 | * processes issuing sequential requests (to boost the throughput), | |
47 | * and yet guarantee a low latency to interactive and soft real-time | |
48 | * applications. | |
49 | * | |
50 | * In particular, to provide these low-latency guarantees, BFQ | |
51 | * explicitly privileges the I/O of two classes of time-sensitive | |
52 | * applications: interactive and soft real-time. This feature enables | |
53 | * BFQ to provide applications in these classes with a very low | |
54 | * latency. Finally, BFQ also features additional heuristics for | |
55 | * preserving both a low latency and a high throughput on NCQ-capable, | |
56 | * rotational or flash-based devices, and to get the job done quickly | |
57 | * for applications consisting in many I/O-bound processes. | |
58 | * | |
43c1b3d6 PV |
59 | * NOTE: if the main or only goal, with a given device, is to achieve |
60 | * the maximum-possible throughput at all times, then do switch off | |
61 | * all low-latency heuristics for that device, by setting low_latency | |
62 | * to 0. | |
63 | * | |
aee69d78 PV |
64 | * BFQ is described in [1], where also a reference to the initial, more |
65 | * theoretical paper on BFQ can be found. The interested reader can find | |
66 | * in the latter paper full details on the main algorithm, as well as | |
67 | * formulas of the guarantees and formal proofs of all the properties. | |
68 | * With respect to the version of BFQ presented in these papers, this | |
69 | * implementation adds a few more heuristics, such as the one that | |
70 | * guarantees a low latency to soft real-time applications, and a | |
71 | * hierarchical extension based on H-WF2Q+. | |
72 | * | |
73 | * B-WF2Q+ is based on WF2Q+, which is described in [2], together with | |
74 | * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+ | |
75 | * with O(log N) complexity derives from the one introduced with EEVDF | |
76 | * in [3]. | |
77 | * | |
78 | * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O | |
79 | * Scheduler", Proceedings of the First Workshop on Mobile System | |
80 | * Technologies (MST-2015), May 2015. | |
81 | * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf | |
82 | * | |
83 | * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing | |
84 | * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689, | |
85 | * Oct 1997. | |
86 | * | |
87 | * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz | |
88 | * | |
89 | * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline | |
90 | * First: A Flexible and Accurate Mechanism for Proportional Share | |
91 | * Resource Allocation", technical report. | |
92 | * | |
93 | * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf | |
94 | */ | |
95 | #include <linux/module.h> | |
96 | #include <linux/slab.h> | |
97 | #include <linux/blkdev.h> | |
e21b7a0b | 98 | #include <linux/cgroup.h> |
aee69d78 PV |
99 | #include <linux/elevator.h> |
100 | #include <linux/ktime.h> | |
101 | #include <linux/rbtree.h> | |
102 | #include <linux/ioprio.h> | |
103 | #include <linux/sbitmap.h> | |
104 | #include <linux/delay.h> | |
105 | ||
106 | #include "blk.h" | |
107 | #include "blk-mq.h" | |
108 | #include "blk-mq-tag.h" | |
109 | #include "blk-mq-sched.h" | |
ea25da48 | 110 | #include "bfq-iosched.h" |
b5dc5d4d | 111 | #include "blk-wbt.h" |
aee69d78 | 112 | |
ea25da48 PV |
113 | #define BFQ_BFQQ_FNS(name) \ |
114 | void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ | |
115 | { \ | |
116 | __set_bit(BFQQF_##name, &(bfqq)->flags); \ | |
117 | } \ | |
118 | void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ | |
119 | { \ | |
120 | __clear_bit(BFQQF_##name, &(bfqq)->flags); \ | |
121 | } \ | |
122 | int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ | |
123 | { \ | |
124 | return test_bit(BFQQF_##name, &(bfqq)->flags); \ | |
44e44a1b PV |
125 | } |
126 | ||
ea25da48 PV |
127 | BFQ_BFQQ_FNS(just_created); |
128 | BFQ_BFQQ_FNS(busy); | |
129 | BFQ_BFQQ_FNS(wait_request); | |
130 | BFQ_BFQQ_FNS(non_blocking_wait_rq); | |
131 | BFQ_BFQQ_FNS(fifo_expire); | |
d5be3fef | 132 | BFQ_BFQQ_FNS(has_short_ttime); |
ea25da48 PV |
133 | BFQ_BFQQ_FNS(sync); |
134 | BFQ_BFQQ_FNS(IO_bound); | |
135 | BFQ_BFQQ_FNS(in_large_burst); | |
136 | BFQ_BFQQ_FNS(coop); | |
137 | BFQ_BFQQ_FNS(split_coop); | |
138 | BFQ_BFQQ_FNS(softrt_update); | |
139 | #undef BFQ_BFQQ_FNS \ | |
aee69d78 | 140 | |
ea25da48 PV |
141 | /* Expiration time of sync (0) and async (1) requests, in ns. */ |
142 | static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; | |
aee69d78 | 143 | |
ea25da48 PV |
144 | /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */ |
145 | static const int bfq_back_max = 16 * 1024; | |
aee69d78 | 146 | |
ea25da48 PV |
147 | /* Penalty of a backwards seek, in number of sectors. */ |
148 | static const int bfq_back_penalty = 2; | |
e21b7a0b | 149 | |
ea25da48 PV |
150 | /* Idling period duration, in ns. */ |
151 | static u64 bfq_slice_idle = NSEC_PER_SEC / 125; | |
aee69d78 | 152 | |
ea25da48 PV |
153 | /* Minimum number of assigned budgets for which stats are safe to compute. */ |
154 | static const int bfq_stats_min_budgets = 194; | |
aee69d78 | 155 | |
ea25da48 PV |
156 | /* Default maximum budget values, in sectors and number of requests. */ |
157 | static const int bfq_default_max_budget = 16 * 1024; | |
e21b7a0b | 158 | |
ea25da48 PV |
159 | /* |
160 | * Async to sync throughput distribution is controlled as follows: | |
161 | * when an async request is served, the entity is charged the number | |
162 | * of sectors of the request, multiplied by the factor below | |
163 | */ | |
164 | static const int bfq_async_charge_factor = 10; | |
aee69d78 | 165 | |
ea25da48 PV |
166 | /* Default timeout values, in jiffies, approximating CFQ defaults. */ |
167 | const int bfq_timeout = HZ / 8; | |
aee69d78 | 168 | |
7b8fa3b9 PV |
169 | /* |
170 | * Time limit for merging (see comments in bfq_setup_cooperator). Set | |
171 | * to the slowest value that, in our tests, proved to be effective in | |
172 | * removing false positives, while not causing true positives to miss | |
173 | * queue merging. | |
174 | * | |
175 | * As can be deduced from the low time limit below, queue merging, if | |
176 | * successful, happens at the very beggining of the I/O of the involved | |
177 | * cooperating processes, as a consequence of the arrival of the very | |
178 | * first requests from each cooperator. After that, there is very | |
179 | * little chance to find cooperators. | |
180 | */ | |
181 | static const unsigned long bfq_merge_time_limit = HZ/10; | |
182 | ||
ea25da48 | 183 | static struct kmem_cache *bfq_pool; |
e21b7a0b | 184 | |
ea25da48 PV |
185 | /* Below this threshold (in ns), we consider thinktime immediate. */ |
186 | #define BFQ_MIN_TT (2 * NSEC_PER_MSEC) | |
e21b7a0b | 187 | |
ea25da48 PV |
188 | /* hw_tag detection: parallel requests threshold and min samples needed. */ |
189 | #define BFQ_HW_QUEUE_THRESHOLD 4 | |
190 | #define BFQ_HW_QUEUE_SAMPLES 32 | |
aee69d78 | 191 | |
ea25da48 PV |
192 | #define BFQQ_SEEK_THR (sector_t)(8 * 100) |
193 | #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) | |
194 | #define BFQQ_CLOSE_THR (sector_t)(8 * 1024) | |
f0ba5ea2 | 195 | #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19) |
aee69d78 | 196 | |
ea25da48 PV |
197 | /* Min number of samples required to perform peak-rate update */ |
198 | #define BFQ_RATE_MIN_SAMPLES 32 | |
199 | /* Min observation time interval required to perform a peak-rate update (ns) */ | |
200 | #define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC) | |
201 | /* Target observation time interval for a peak-rate update (ns) */ | |
202 | #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC | |
aee69d78 | 203 | |
ea25da48 PV |
204 | /* Shift used for peak rate fixed precision calculations. */ |
205 | #define BFQ_RATE_SHIFT 16 | |
aee69d78 | 206 | |
ea25da48 PV |
207 | /* |
208 | * By default, BFQ computes the duration of the weight raising for | |
209 | * interactive applications automatically, using the following formula: | |
210 | * duration = (R / r) * T, where r is the peak rate of the device, and | |
211 | * R and T are two reference parameters. | |
212 | * In particular, R is the peak rate of the reference device (see below), | |
213 | * and T is a reference time: given the systems that are likely to be | |
214 | * installed on the reference device according to its speed class, T is | |
215 | * about the maximum time needed, under BFQ and while reading two files in | |
216 | * parallel, to load typical large applications on these systems. | |
217 | * In practice, the slower/faster the device at hand is, the more/less it | |
218 | * takes to load applications with respect to the reference device. | |
219 | * Accordingly, the longer/shorter BFQ grants weight raising to interactive | |
220 | * applications. | |
221 | * | |
222 | * BFQ uses four different reference pairs (R, T), depending on: | |
223 | * . whether the device is rotational or non-rotational; | |
224 | * . whether the device is slow, such as old or portable HDDs, as well as | |
225 | * SD cards, or fast, such as newer HDDs and SSDs. | |
226 | * | |
227 | * The device's speed class is dynamically (re)detected in | |
228 | * bfq_update_peak_rate() every time the estimated peak rate is updated. | |
229 | * | |
230 | * In the following definitions, R_slow[0]/R_fast[0] and | |
231 | * T_slow[0]/T_fast[0] are the reference values for a slow/fast | |
232 | * rotational device, whereas R_slow[1]/R_fast[1] and | |
233 | * T_slow[1]/T_fast[1] are the reference values for a slow/fast | |
234 | * non-rotational device. Finally, device_speed_thresh are the | |
235 | * thresholds used to switch between speed classes. The reference | |
236 | * rates are not the actual peak rates of the devices used as a | |
237 | * reference, but slightly lower values. The reason for using these | |
238 | * slightly lower values is that the peak-rate estimator tends to | |
239 | * yield slightly lower values than the actual peak rate (it can yield | |
240 | * the actual peak rate only if there is only one process doing I/O, | |
241 | * and the process does sequential I/O). | |
242 | * | |
243 | * Both the reference peak rates and the thresholds are measured in | |
244 | * sectors/usec, left-shifted by BFQ_RATE_SHIFT. | |
245 | */ | |
246 | static int R_slow[2] = {1000, 10700}; | |
247 | static int R_fast[2] = {14000, 33000}; | |
248 | /* | |
249 | * To improve readability, a conversion function is used to initialize the | |
250 | * following arrays, which entails that they can be initialized only in a | |
251 | * function. | |
252 | */ | |
253 | static int T_slow[2]; | |
254 | static int T_fast[2]; | |
255 | static int device_speed_thresh[2]; | |
aee69d78 | 256 | |
12cd3a2f | 257 | #define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0]) |
ea25da48 | 258 | #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) |
aee69d78 | 259 | |
ea25da48 | 260 | struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) |
e21b7a0b | 261 | { |
ea25da48 | 262 | return bic->bfqq[is_sync]; |
aee69d78 PV |
263 | } |
264 | ||
ea25da48 | 265 | void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) |
aee69d78 | 266 | { |
ea25da48 | 267 | bic->bfqq[is_sync] = bfqq; |
aee69d78 PV |
268 | } |
269 | ||
ea25da48 | 270 | struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) |
aee69d78 | 271 | { |
ea25da48 | 272 | return bic->icq.q->elevator->elevator_data; |
e21b7a0b | 273 | } |
aee69d78 | 274 | |
ea25da48 PV |
275 | /** |
276 | * icq_to_bic - convert iocontext queue structure to bfq_io_cq. | |
277 | * @icq: the iocontext queue. | |
278 | */ | |
279 | static struct bfq_io_cq *icq_to_bic(struct io_cq *icq) | |
e21b7a0b | 280 | { |
ea25da48 PV |
281 | /* bic->icq is the first member, %NULL will convert to %NULL */ |
282 | return container_of(icq, struct bfq_io_cq, icq); | |
e21b7a0b | 283 | } |
aee69d78 | 284 | |
ea25da48 PV |
285 | /** |
286 | * bfq_bic_lookup - search into @ioc a bic associated to @bfqd. | |
287 | * @bfqd: the lookup key. | |
288 | * @ioc: the io_context of the process doing I/O. | |
289 | * @q: the request queue. | |
290 | */ | |
291 | static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, | |
292 | struct io_context *ioc, | |
293 | struct request_queue *q) | |
e21b7a0b | 294 | { |
ea25da48 PV |
295 | if (ioc) { |
296 | unsigned long flags; | |
297 | struct bfq_io_cq *icq; | |
aee69d78 | 298 | |
ea25da48 PV |
299 | spin_lock_irqsave(q->queue_lock, flags); |
300 | icq = icq_to_bic(ioc_lookup_icq(ioc, q)); | |
301 | spin_unlock_irqrestore(q->queue_lock, flags); | |
aee69d78 | 302 | |
ea25da48 | 303 | return icq; |
e21b7a0b | 304 | } |
e21b7a0b | 305 | |
ea25da48 | 306 | return NULL; |
aee69d78 PV |
307 | } |
308 | ||
ea25da48 PV |
309 | /* |
310 | * Scheduler run of queue, if there are requests pending and no one in the | |
311 | * driver that will restart queueing. | |
312 | */ | |
313 | void bfq_schedule_dispatch(struct bfq_data *bfqd) | |
aee69d78 | 314 | { |
ea25da48 PV |
315 | if (bfqd->queued != 0) { |
316 | bfq_log(bfqd, "schedule dispatch"); | |
317 | blk_mq_run_hw_queues(bfqd->queue, true); | |
e21b7a0b | 318 | } |
aee69d78 PV |
319 | } |
320 | ||
321 | #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | |
322 | #define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT) | |
323 | ||
324 | #define bfq_sample_valid(samples) ((samples) > 80) | |
325 | ||
aee69d78 PV |
326 | /* |
327 | * Lifted from AS - choose which of rq1 and rq2 that is best served now. | |
328 | * We choose the request that is closesr to the head right now. Distance | |
329 | * behind the head is penalized and only allowed to a certain extent. | |
330 | */ | |
331 | static struct request *bfq_choose_req(struct bfq_data *bfqd, | |
332 | struct request *rq1, | |
333 | struct request *rq2, | |
334 | sector_t last) | |
335 | { | |
336 | sector_t s1, s2, d1 = 0, d2 = 0; | |
337 | unsigned long back_max; | |
338 | #define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ | |
339 | #define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ | |
340 | unsigned int wrap = 0; /* bit mask: requests behind the disk head? */ | |
341 | ||
342 | if (!rq1 || rq1 == rq2) | |
343 | return rq2; | |
344 | if (!rq2) | |
345 | return rq1; | |
346 | ||
347 | if (rq_is_sync(rq1) && !rq_is_sync(rq2)) | |
348 | return rq1; | |
349 | else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) | |
350 | return rq2; | |
351 | if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) | |
352 | return rq1; | |
353 | else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) | |
354 | return rq2; | |
355 | ||
356 | s1 = blk_rq_pos(rq1); | |
357 | s2 = blk_rq_pos(rq2); | |
358 | ||
359 | /* | |
360 | * By definition, 1KiB is 2 sectors. | |
361 | */ | |
362 | back_max = bfqd->bfq_back_max * 2; | |
363 | ||
364 | /* | |
365 | * Strict one way elevator _except_ in the case where we allow | |
366 | * short backward seeks which are biased as twice the cost of a | |
367 | * similar forward seek. | |
368 | */ | |
369 | if (s1 >= last) | |
370 | d1 = s1 - last; | |
371 | else if (s1 + back_max >= last) | |
372 | d1 = (last - s1) * bfqd->bfq_back_penalty; | |
373 | else | |
374 | wrap |= BFQ_RQ1_WRAP; | |
375 | ||
376 | if (s2 >= last) | |
377 | d2 = s2 - last; | |
378 | else if (s2 + back_max >= last) | |
379 | d2 = (last - s2) * bfqd->bfq_back_penalty; | |
380 | else | |
381 | wrap |= BFQ_RQ2_WRAP; | |
382 | ||
383 | /* Found required data */ | |
384 | ||
385 | /* | |
386 | * By doing switch() on the bit mask "wrap" we avoid having to | |
387 | * check two variables for all permutations: --> faster! | |
388 | */ | |
389 | switch (wrap) { | |
390 | case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ | |
391 | if (d1 < d2) | |
392 | return rq1; | |
393 | else if (d2 < d1) | |
394 | return rq2; | |
395 | ||
396 | if (s1 >= s2) | |
397 | return rq1; | |
398 | else | |
399 | return rq2; | |
400 | ||
401 | case BFQ_RQ2_WRAP: | |
402 | return rq1; | |
403 | case BFQ_RQ1_WRAP: | |
404 | return rq2; | |
405 | case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */ | |
406 | default: | |
407 | /* | |
408 | * Since both rqs are wrapped, | |
409 | * start with the one that's further behind head | |
410 | * (--> only *one* back seek required), | |
411 | * since back seek takes more time than forward. | |
412 | */ | |
413 | if (s1 <= s2) | |
414 | return rq1; | |
415 | else | |
416 | return rq2; | |
417 | } | |
418 | } | |
419 | ||
a52a69ea PV |
420 | /* |
421 | * See the comments on bfq_limit_depth for the purpose of | |
422 | * the depths set in the function. | |
423 | */ | |
424 | static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt) | |
425 | { | |
426 | bfqd->sb_shift = bt->sb.shift; | |
427 | ||
428 | /* | |
429 | * In-word depths if no bfq_queue is being weight-raised: | |
430 | * leaving 25% of tags only for sync reads. | |
431 | * | |
432 | * In next formulas, right-shift the value | |
433 | * (1U<<bfqd->sb_shift), instead of computing directly | |
434 | * (1U<<(bfqd->sb_shift - something)), to be robust against | |
435 | * any possible value of bfqd->sb_shift, without having to | |
436 | * limit 'something'. | |
437 | */ | |
438 | /* no more than 50% of tags for async I/O */ | |
439 | bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U); | |
440 | /* | |
441 | * no more than 75% of tags for sync writes (25% extra tags | |
442 | * w.r.t. async I/O, to prevent async I/O from starving sync | |
443 | * writes) | |
444 | */ | |
445 | bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U); | |
446 | ||
447 | /* | |
448 | * In-word depths in case some bfq_queue is being weight- | |
449 | * raised: leaving ~63% of tags for sync reads. This is the | |
450 | * highest percentage for which, in our tests, application | |
451 | * start-up times didn't suffer from any regression due to tag | |
452 | * shortage. | |
453 | */ | |
454 | /* no more than ~18% of tags for async I/O */ | |
455 | bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U); | |
456 | /* no more than ~37% of tags for sync writes (~20% extra tags) */ | |
457 | bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U); | |
458 | } | |
459 | ||
460 | /* | |
461 | * Async I/O can easily starve sync I/O (both sync reads and sync | |
462 | * writes), by consuming all tags. Similarly, storms of sync writes, | |
463 | * such as those that sync(2) may trigger, can starve sync reads. | |
464 | * Limit depths of async I/O and sync writes so as to counter both | |
465 | * problems. | |
466 | */ | |
467 | static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) | |
468 | { | |
469 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); | |
470 | struct bfq_data *bfqd = data->q->elevator->elevator_data; | |
471 | struct sbitmap_queue *bt; | |
472 | ||
473 | if (op_is_sync(op) && !op_is_write(op)) | |
474 | return; | |
475 | ||
476 | if (data->flags & BLK_MQ_REQ_RESERVED) { | |
477 | if (unlikely(!tags->nr_reserved_tags)) { | |
478 | WARN_ON_ONCE(1); | |
479 | return; | |
480 | } | |
481 | bt = &tags->breserved_tags; | |
482 | } else | |
483 | bt = &tags->bitmap_tags; | |
484 | ||
485 | if (unlikely(bfqd->sb_shift != bt->sb.shift)) | |
486 | bfq_update_depths(bfqd, bt); | |
487 | ||
488 | data->shallow_depth = | |
489 | bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; | |
490 | ||
491 | bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u", | |
492 | __func__, bfqd->wr_busy_queues, op_is_sync(op), | |
493 | data->shallow_depth); | |
494 | } | |
495 | ||
36eca894 AA |
496 | static struct bfq_queue * |
497 | bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, | |
498 | sector_t sector, struct rb_node **ret_parent, | |
499 | struct rb_node ***rb_link) | |
500 | { | |
501 | struct rb_node **p, *parent; | |
502 | struct bfq_queue *bfqq = NULL; | |
503 | ||
504 | parent = NULL; | |
505 | p = &root->rb_node; | |
506 | while (*p) { | |
507 | struct rb_node **n; | |
508 | ||
509 | parent = *p; | |
510 | bfqq = rb_entry(parent, struct bfq_queue, pos_node); | |
511 | ||
512 | /* | |
513 | * Sort strictly based on sector. Smallest to the left, | |
514 | * largest to the right. | |
515 | */ | |
516 | if (sector > blk_rq_pos(bfqq->next_rq)) | |
517 | n = &(*p)->rb_right; | |
518 | else if (sector < blk_rq_pos(bfqq->next_rq)) | |
519 | n = &(*p)->rb_left; | |
520 | else | |
521 | break; | |
522 | p = n; | |
523 | bfqq = NULL; | |
524 | } | |
525 | ||
526 | *ret_parent = parent; | |
527 | if (rb_link) | |
528 | *rb_link = p; | |
529 | ||
530 | bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", | |
531 | (unsigned long long)sector, | |
532 | bfqq ? bfqq->pid : 0); | |
533 | ||
534 | return bfqq; | |
535 | } | |
536 | ||
7b8fa3b9 PV |
537 | static bool bfq_too_late_for_merging(struct bfq_queue *bfqq) |
538 | { | |
539 | return bfqq->service_from_backlogged > 0 && | |
540 | time_is_before_jiffies(bfqq->first_IO_time + | |
541 | bfq_merge_time_limit); | |
542 | } | |
543 | ||
ea25da48 | 544 | void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
36eca894 AA |
545 | { |
546 | struct rb_node **p, *parent; | |
547 | struct bfq_queue *__bfqq; | |
548 | ||
549 | if (bfqq->pos_root) { | |
550 | rb_erase(&bfqq->pos_node, bfqq->pos_root); | |
551 | bfqq->pos_root = NULL; | |
552 | } | |
553 | ||
7b8fa3b9 PV |
554 | /* |
555 | * bfqq cannot be merged any longer (see comments in | |
556 | * bfq_setup_cooperator): no point in adding bfqq into the | |
557 | * position tree. | |
558 | */ | |
559 | if (bfq_too_late_for_merging(bfqq)) | |
560 | return; | |
561 | ||
36eca894 AA |
562 | if (bfq_class_idle(bfqq)) |
563 | return; | |
564 | if (!bfqq->next_rq) | |
565 | return; | |
566 | ||
567 | bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; | |
568 | __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, | |
569 | blk_rq_pos(bfqq->next_rq), &parent, &p); | |
570 | if (!__bfqq) { | |
571 | rb_link_node(&bfqq->pos_node, parent, p); | |
572 | rb_insert_color(&bfqq->pos_node, bfqq->pos_root); | |
573 | } else | |
574 | bfqq->pos_root = NULL; | |
575 | } | |
576 | ||
1de0c4cd AA |
577 | /* |
578 | * Tell whether there are active queues or groups with differentiated weights. | |
579 | */ | |
580 | static bool bfq_differentiated_weights(struct bfq_data *bfqd) | |
581 | { | |
582 | /* | |
583 | * For weights to differ, at least one of the trees must contain | |
584 | * at least two nodes. | |
585 | */ | |
586 | return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && | |
587 | (bfqd->queue_weights_tree.rb_node->rb_left || | |
588 | bfqd->queue_weights_tree.rb_node->rb_right) | |
589 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | |
590 | ) || | |
591 | (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) && | |
592 | (bfqd->group_weights_tree.rb_node->rb_left || | |
593 | bfqd->group_weights_tree.rb_node->rb_right) | |
594 | #endif | |
595 | ); | |
596 | } | |
597 | ||
598 | /* | |
599 | * The following function returns true if every queue must receive the | |
600 | * same share of the throughput (this condition is used when deciding | |
601 | * whether idling may be disabled, see the comments in the function | |
602 | * bfq_bfqq_may_idle()). | |
603 | * | |
604 | * Such a scenario occurs when: | |
605 | * 1) all active queues have the same weight, | |
606 | * 2) all active groups at the same level in the groups tree have the same | |
607 | * weight, | |
608 | * 3) all active groups at the same level in the groups tree have the same | |
609 | * number of children. | |
610 | * | |
611 | * Unfortunately, keeping the necessary state for evaluating exactly the | |
612 | * above symmetry conditions would be quite complex and time-consuming. | |
613 | * Therefore this function evaluates, instead, the following stronger | |
614 | * sub-conditions, for which it is much easier to maintain the needed | |
615 | * state: | |
616 | * 1) all active queues have the same weight, | |
617 | * 2) all active groups have the same weight, | |
618 | * 3) all active groups have at most one active child each. | |
619 | * In particular, the last two conditions are always true if hierarchical | |
620 | * support and the cgroups interface are not enabled, thus no state needs | |
621 | * to be maintained in this case. | |
622 | */ | |
623 | static bool bfq_symmetric_scenario(struct bfq_data *bfqd) | |
624 | { | |
625 | return !bfq_differentiated_weights(bfqd); | |
626 | } | |
627 | ||
628 | /* | |
629 | * If the weight-counter tree passed as input contains no counter for | |
630 | * the weight of the input entity, then add that counter; otherwise just | |
631 | * increment the existing counter. | |
632 | * | |
633 | * Note that weight-counter trees contain few nodes in mostly symmetric | |
634 | * scenarios. For example, if all queues have the same weight, then the | |
635 | * weight-counter tree for the queues may contain at most one node. | |
636 | * This holds even if low_latency is on, because weight-raised queues | |
637 | * are not inserted in the tree. | |
638 | * In most scenarios, the rate at which nodes are created/destroyed | |
639 | * should be low too. | |
640 | */ | |
ea25da48 PV |
641 | void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity, |
642 | struct rb_root *root) | |
1de0c4cd AA |
643 | { |
644 | struct rb_node **new = &(root->rb_node), *parent = NULL; | |
645 | ||
646 | /* | |
647 | * Do not insert if the entity is already associated with a | |
648 | * counter, which happens if: | |
649 | * 1) the entity is associated with a queue, | |
650 | * 2) a request arrival has caused the queue to become both | |
651 | * non-weight-raised, and hence change its weight, and | |
652 | * backlogged; in this respect, each of the two events | |
653 | * causes an invocation of this function, | |
654 | * 3) this is the invocation of this function caused by the | |
655 | * second event. This second invocation is actually useless, | |
656 | * and we handle this fact by exiting immediately. More | |
657 | * efficient or clearer solutions might possibly be adopted. | |
658 | */ | |
659 | if (entity->weight_counter) | |
660 | return; | |
661 | ||
662 | while (*new) { | |
663 | struct bfq_weight_counter *__counter = container_of(*new, | |
664 | struct bfq_weight_counter, | |
665 | weights_node); | |
666 | parent = *new; | |
667 | ||
668 | if (entity->weight == __counter->weight) { | |
669 | entity->weight_counter = __counter; | |
670 | goto inc_counter; | |
671 | } | |
672 | if (entity->weight < __counter->weight) | |
673 | new = &((*new)->rb_left); | |
674 | else | |
675 | new = &((*new)->rb_right); | |
676 | } | |
677 | ||
678 | entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), | |
679 | GFP_ATOMIC); | |
680 | ||
681 | /* | |
682 | * In the unlucky event of an allocation failure, we just | |
683 | * exit. This will cause the weight of entity to not be | |
684 | * considered in bfq_differentiated_weights, which, in its | |
685 | * turn, causes the scenario to be deemed wrongly symmetric in | |
686 | * case entity's weight would have been the only weight making | |
687 | * the scenario asymmetric. On the bright side, no unbalance | |
688 | * will however occur when entity becomes inactive again (the | |
689 | * invocation of this function is triggered by an activation | |
690 | * of entity). In fact, bfq_weights_tree_remove does nothing | |
691 | * if !entity->weight_counter. | |
692 | */ | |
693 | if (unlikely(!entity->weight_counter)) | |
694 | return; | |
695 | ||
696 | entity->weight_counter->weight = entity->weight; | |
697 | rb_link_node(&entity->weight_counter->weights_node, parent, new); | |
698 | rb_insert_color(&entity->weight_counter->weights_node, root); | |
699 | ||
700 | inc_counter: | |
701 | entity->weight_counter->num_active++; | |
702 | } | |
703 | ||
704 | /* | |
705 | * Decrement the weight counter associated with the entity, and, if the | |
706 | * counter reaches 0, remove the counter from the tree. | |
707 | * See the comments to the function bfq_weights_tree_add() for considerations | |
708 | * about overhead. | |
709 | */ | |
ea25da48 PV |
710 | void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity, |
711 | struct rb_root *root) | |
1de0c4cd AA |
712 | { |
713 | if (!entity->weight_counter) | |
714 | return; | |
715 | ||
716 | entity->weight_counter->num_active--; | |
717 | if (entity->weight_counter->num_active > 0) | |
718 | goto reset_entity_pointer; | |
719 | ||
720 | rb_erase(&entity->weight_counter->weights_node, root); | |
721 | kfree(entity->weight_counter); | |
722 | ||
723 | reset_entity_pointer: | |
724 | entity->weight_counter = NULL; | |
725 | } | |
726 | ||
aee69d78 PV |
727 | /* |
728 | * Return expired entry, or NULL to just start from scratch in rbtree. | |
729 | */ | |
730 | static struct request *bfq_check_fifo(struct bfq_queue *bfqq, | |
731 | struct request *last) | |
732 | { | |
733 | struct request *rq; | |
734 | ||
735 | if (bfq_bfqq_fifo_expire(bfqq)) | |
736 | return NULL; | |
737 | ||
738 | bfq_mark_bfqq_fifo_expire(bfqq); | |
739 | ||
740 | rq = rq_entry_fifo(bfqq->fifo.next); | |
741 | ||
742 | if (rq == last || ktime_get_ns() < rq->fifo_time) | |
743 | return NULL; | |
744 | ||
745 | bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); | |
746 | return rq; | |
747 | } | |
748 | ||
749 | static struct request *bfq_find_next_rq(struct bfq_data *bfqd, | |
750 | struct bfq_queue *bfqq, | |
751 | struct request *last) | |
752 | { | |
753 | struct rb_node *rbnext = rb_next(&last->rb_node); | |
754 | struct rb_node *rbprev = rb_prev(&last->rb_node); | |
755 | struct request *next, *prev = NULL; | |
756 | ||
757 | /* Follow expired path, else get first next available. */ | |
758 | next = bfq_check_fifo(bfqq, last); | |
759 | if (next) | |
760 | return next; | |
761 | ||
762 | if (rbprev) | |
763 | prev = rb_entry_rq(rbprev); | |
764 | ||
765 | if (rbnext) | |
766 | next = rb_entry_rq(rbnext); | |
767 | else { | |
768 | rbnext = rb_first(&bfqq->sort_list); | |
769 | if (rbnext && rbnext != &last->rb_node) | |
770 | next = rb_entry_rq(rbnext); | |
771 | } | |
772 | ||
773 | return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); | |
774 | } | |
775 | ||
c074170e | 776 | /* see the definition of bfq_async_charge_factor for details */ |
aee69d78 PV |
777 | static unsigned long bfq_serv_to_charge(struct request *rq, |
778 | struct bfq_queue *bfqq) | |
779 | { | |
44e44a1b | 780 | if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1) |
c074170e PV |
781 | return blk_rq_sectors(rq); |
782 | ||
cfd69712 PV |
783 | /* |
784 | * If there are no weight-raised queues, then amplify service | |
785 | * by just the async charge factor; otherwise amplify service | |
786 | * by twice the async charge factor, to further reduce latency | |
787 | * for weight-raised queues. | |
788 | */ | |
789 | if (bfqq->bfqd->wr_busy_queues == 0) | |
790 | return blk_rq_sectors(rq) * bfq_async_charge_factor; | |
791 | ||
792 | return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor; | |
aee69d78 PV |
793 | } |
794 | ||
795 | /** | |
796 | * bfq_updated_next_req - update the queue after a new next_rq selection. | |
797 | * @bfqd: the device data the queue belongs to. | |
798 | * @bfqq: the queue to update. | |
799 | * | |
800 | * If the first request of a queue changes we make sure that the queue | |
801 | * has enough budget to serve at least its first request (if the | |
802 | * request has grown). We do this because if the queue has not enough | |
803 | * budget for its first request, it has to go through two dispatch | |
804 | * rounds to actually get it dispatched. | |
805 | */ | |
806 | static void bfq_updated_next_req(struct bfq_data *bfqd, | |
807 | struct bfq_queue *bfqq) | |
808 | { | |
809 | struct bfq_entity *entity = &bfqq->entity; | |
810 | struct request *next_rq = bfqq->next_rq; | |
811 | unsigned long new_budget; | |
812 | ||
813 | if (!next_rq) | |
814 | return; | |
815 | ||
816 | if (bfqq == bfqd->in_service_queue) | |
817 | /* | |
818 | * In order not to break guarantees, budgets cannot be | |
819 | * changed after an entity has been selected. | |
820 | */ | |
821 | return; | |
822 | ||
823 | new_budget = max_t(unsigned long, bfqq->max_budget, | |
824 | bfq_serv_to_charge(next_rq, bfqq)); | |
825 | if (entity->budget != new_budget) { | |
826 | entity->budget = new_budget; | |
827 | bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", | |
828 | new_budget); | |
80294c3b | 829 | bfq_requeue_bfqq(bfqd, bfqq, false); |
aee69d78 PV |
830 | } |
831 | } | |
832 | ||
3e2bdd6d PV |
833 | static unsigned int bfq_wr_duration(struct bfq_data *bfqd) |
834 | { | |
835 | u64 dur; | |
836 | ||
837 | if (bfqd->bfq_wr_max_time > 0) | |
838 | return bfqd->bfq_wr_max_time; | |
839 | ||
840 | dur = bfqd->RT_prod; | |
841 | do_div(dur, bfqd->peak_rate); | |
842 | ||
843 | /* | |
844 | * Limit duration between 3 and 13 seconds. Tests show that | |
845 | * higher values than 13 seconds often yield the opposite of | |
846 | * the desired result, i.e., worsen responsiveness by letting | |
847 | * non-interactive and non-soft-real-time applications | |
848 | * preserve weight raising for a too long time interval. | |
849 | * | |
850 | * On the other end, lower values than 3 seconds make it | |
851 | * difficult for most interactive tasks to complete their jobs | |
852 | * before weight-raising finishes. | |
853 | */ | |
854 | if (dur > msecs_to_jiffies(13000)) | |
855 | dur = msecs_to_jiffies(13000); | |
856 | else if (dur < msecs_to_jiffies(3000)) | |
857 | dur = msecs_to_jiffies(3000); | |
858 | ||
859 | return dur; | |
860 | } | |
861 | ||
862 | /* switch back from soft real-time to interactive weight raising */ | |
863 | static void switch_back_to_interactive_wr(struct bfq_queue *bfqq, | |
864 | struct bfq_data *bfqd) | |
865 | { | |
866 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; | |
867 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); | |
868 | bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt; | |
869 | } | |
870 | ||
36eca894 | 871 | static void |
13c931bd PV |
872 | bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, |
873 | struct bfq_io_cq *bic, bool bfq_already_existing) | |
36eca894 | 874 | { |
13c931bd PV |
875 | unsigned int old_wr_coeff = bfqq->wr_coeff; |
876 | bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); | |
877 | ||
d5be3fef PV |
878 | if (bic->saved_has_short_ttime) |
879 | bfq_mark_bfqq_has_short_ttime(bfqq); | |
36eca894 | 880 | else |
d5be3fef | 881 | bfq_clear_bfqq_has_short_ttime(bfqq); |
36eca894 AA |
882 | |
883 | if (bic->saved_IO_bound) | |
884 | bfq_mark_bfqq_IO_bound(bfqq); | |
885 | else | |
886 | bfq_clear_bfqq_IO_bound(bfqq); | |
887 | ||
888 | bfqq->ttime = bic->saved_ttime; | |
889 | bfqq->wr_coeff = bic->saved_wr_coeff; | |
890 | bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; | |
891 | bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish; | |
892 | bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; | |
893 | ||
e1b2324d | 894 | if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || |
36eca894 | 895 | time_is_before_jiffies(bfqq->last_wr_start_finish + |
e1b2324d | 896 | bfqq->wr_cur_max_time))) { |
3e2bdd6d PV |
897 | if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && |
898 | !bfq_bfqq_in_large_burst(bfqq) && | |
899 | time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt + | |
900 | bfq_wr_duration(bfqd))) { | |
901 | switch_back_to_interactive_wr(bfqq, bfqd); | |
902 | } else { | |
903 | bfqq->wr_coeff = 1; | |
904 | bfq_log_bfqq(bfqq->bfqd, bfqq, | |
905 | "resume state: switching off wr"); | |
906 | } | |
36eca894 AA |
907 | } |
908 | ||
909 | /* make sure weight will be updated, however we got here */ | |
910 | bfqq->entity.prio_changed = 1; | |
13c931bd PV |
911 | |
912 | if (likely(!busy)) | |
913 | return; | |
914 | ||
915 | if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) | |
916 | bfqd->wr_busy_queues++; | |
917 | else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) | |
918 | bfqd->wr_busy_queues--; | |
36eca894 AA |
919 | } |
920 | ||
921 | static int bfqq_process_refs(struct bfq_queue *bfqq) | |
922 | { | |
923 | return bfqq->ref - bfqq->allocated - bfqq->entity.on_st; | |
924 | } | |
925 | ||
e1b2324d AA |
926 | /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */ |
927 | static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) | |
928 | { | |
929 | struct bfq_queue *item; | |
930 | struct hlist_node *n; | |
931 | ||
932 | hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) | |
933 | hlist_del_init(&item->burst_list_node); | |
934 | hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); | |
935 | bfqd->burst_size = 1; | |
936 | bfqd->burst_parent_entity = bfqq->entity.parent; | |
937 | } | |
938 | ||
939 | /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ | |
940 | static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) | |
941 | { | |
942 | /* Increment burst size to take into account also bfqq */ | |
943 | bfqd->burst_size++; | |
944 | ||
945 | if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { | |
946 | struct bfq_queue *pos, *bfqq_item; | |
947 | struct hlist_node *n; | |
948 | ||
949 | /* | |
950 | * Enough queues have been activated shortly after each | |
951 | * other to consider this burst as large. | |
952 | */ | |
953 | bfqd->large_burst = true; | |
954 | ||
955 | /* | |
956 | * We can now mark all queues in the burst list as | |
957 | * belonging to a large burst. | |
958 | */ | |
959 | hlist_for_each_entry(bfqq_item, &bfqd->burst_list, | |
960 | burst_list_node) | |
961 | bfq_mark_bfqq_in_large_burst(bfqq_item); | |
962 | bfq_mark_bfqq_in_large_burst(bfqq); | |
963 | ||
964 | /* | |
965 | * From now on, and until the current burst finishes, any | |
966 | * new queue being activated shortly after the last queue | |
967 | * was inserted in the burst can be immediately marked as | |
968 | * belonging to a large burst. So the burst list is not | |
969 | * needed any more. Remove it. | |
970 | */ | |
971 | hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, | |
972 | burst_list_node) | |
973 | hlist_del_init(&pos->burst_list_node); | |
974 | } else /* | |
975 | * Burst not yet large: add bfqq to the burst list. Do | |
976 | * not increment the ref counter for bfqq, because bfqq | |
977 | * is removed from the burst list before freeing bfqq | |
978 | * in put_queue. | |
979 | */ | |
980 | hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); | |
981 | } | |
982 | ||
983 | /* | |
984 | * If many queues belonging to the same group happen to be created | |
985 | * shortly after each other, then the processes associated with these | |
986 | * queues have typically a common goal. In particular, bursts of queue | |
987 | * creations are usually caused by services or applications that spawn | |
988 | * many parallel threads/processes. Examples are systemd during boot, | |
989 | * or git grep. To help these processes get their job done as soon as | |
990 | * possible, it is usually better to not grant either weight-raising | |
991 | * or device idling to their queues. | |
992 | * | |
993 | * In this comment we describe, firstly, the reasons why this fact | |
994 | * holds, and, secondly, the next function, which implements the main | |
995 | * steps needed to properly mark these queues so that they can then be | |
996 | * treated in a different way. | |
997 | * | |
998 | * The above services or applications benefit mostly from a high | |
999 | * throughput: the quicker the requests of the activated queues are | |
1000 | * cumulatively served, the sooner the target job of these queues gets | |
1001 | * completed. As a consequence, weight-raising any of these queues, | |
1002 | * which also implies idling the device for it, is almost always | |
1003 | * counterproductive. In most cases it just lowers throughput. | |
1004 | * | |
1005 | * On the other hand, a burst of queue creations may be caused also by | |
1006 | * the start of an application that does not consist of a lot of | |
1007 | * parallel I/O-bound threads. In fact, with a complex application, | |
1008 | * several short processes may need to be executed to start-up the | |
1009 | * application. In this respect, to start an application as quickly as | |
1010 | * possible, the best thing to do is in any case to privilege the I/O | |
1011 | * related to the application with respect to all other | |
1012 | * I/O. Therefore, the best strategy to start as quickly as possible | |
1013 | * an application that causes a burst of queue creations is to | |
1014 | * weight-raise all the queues created during the burst. This is the | |
1015 | * exact opposite of the best strategy for the other type of bursts. | |
1016 | * | |
1017 | * In the end, to take the best action for each of the two cases, the | |
1018 | * two types of bursts need to be distinguished. Fortunately, this | |
1019 | * seems relatively easy, by looking at the sizes of the bursts. In | |
1020 | * particular, we found a threshold such that only bursts with a | |
1021 | * larger size than that threshold are apparently caused by | |
1022 | * services or commands such as systemd or git grep. For brevity, | |
1023 | * hereafter we call just 'large' these bursts. BFQ *does not* | |
1024 | * weight-raise queues whose creation occurs in a large burst. In | |
1025 | * addition, for each of these queues BFQ performs or does not perform | |
1026 | * idling depending on which choice boosts the throughput more. The | |
1027 | * exact choice depends on the device and request pattern at | |
1028 | * hand. | |
1029 | * | |
1030 | * Unfortunately, false positives may occur while an interactive task | |
1031 | * is starting (e.g., an application is being started). The | |
1032 | * consequence is that the queues associated with the task do not | |
1033 | * enjoy weight raising as expected. Fortunately these false positives | |
1034 | * are very rare. They typically occur if some service happens to | |
1035 | * start doing I/O exactly when the interactive task starts. | |
1036 | * | |
1037 | * Turning back to the next function, it implements all the steps | |
1038 | * needed to detect the occurrence of a large burst and to properly | |
1039 | * mark all the queues belonging to it (so that they can then be | |
1040 | * treated in a different way). This goal is achieved by maintaining a | |
1041 | * "burst list" that holds, temporarily, the queues that belong to the | |
1042 | * burst in progress. The list is then used to mark these queues as | |
1043 | * belonging to a large burst if the burst does become large. The main | |
1044 | * steps are the following. | |
1045 | * | |
1046 | * . when the very first queue is created, the queue is inserted into the | |
1047 | * list (as it could be the first queue in a possible burst) | |
1048 | * | |
1049 | * . if the current burst has not yet become large, and a queue Q that does | |
1050 | * not yet belong to the burst is activated shortly after the last time | |
1051 | * at which a new queue entered the burst list, then the function appends | |
1052 | * Q to the burst list | |
1053 | * | |
1054 | * . if, as a consequence of the previous step, the burst size reaches | |
1055 | * the large-burst threshold, then | |
1056 | * | |
1057 | * . all the queues in the burst list are marked as belonging to a | |
1058 | * large burst | |
1059 | * | |
1060 | * . the burst list is deleted; in fact, the burst list already served | |
1061 | * its purpose (keeping temporarily track of the queues in a burst, | |
1062 | * so as to be able to mark them as belonging to a large burst in the | |
1063 | * previous sub-step), and now is not needed any more | |
1064 | * | |
1065 | * . the device enters a large-burst mode | |
1066 | * | |
1067 | * . if a queue Q that does not belong to the burst is created while | |
1068 | * the device is in large-burst mode and shortly after the last time | |
1069 | * at which a queue either entered the burst list or was marked as | |
1070 | * belonging to the current large burst, then Q is immediately marked | |
1071 | * as belonging to a large burst. | |
1072 | * | |
1073 | * . if a queue Q that does not belong to the burst is created a while | |
1074 | * later, i.e., not shortly after, than the last time at which a queue | |
1075 | * either entered the burst list or was marked as belonging to the | |
1076 | * current large burst, then the current burst is deemed as finished and: | |
1077 | * | |
1078 | * . the large-burst mode is reset if set | |
1079 | * | |
1080 | * . the burst list is emptied | |
1081 | * | |
1082 | * . Q is inserted in the burst list, as Q may be the first queue | |
1083 | * in a possible new burst (then the burst list contains just Q | |
1084 | * after this step). | |
1085 | */ | |
1086 | static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) | |
1087 | { | |
1088 | /* | |
1089 | * If bfqq is already in the burst list or is part of a large | |
1090 | * burst, or finally has just been split, then there is | |
1091 | * nothing else to do. | |
1092 | */ | |
1093 | if (!hlist_unhashed(&bfqq->burst_list_node) || | |
1094 | bfq_bfqq_in_large_burst(bfqq) || | |
1095 | time_is_after_eq_jiffies(bfqq->split_time + | |
1096 | msecs_to_jiffies(10))) | |
1097 | return; | |
1098 | ||
1099 | /* | |
1100 | * If bfqq's creation happens late enough, or bfqq belongs to | |
1101 | * a different group than the burst group, then the current | |
1102 | * burst is finished, and related data structures must be | |
1103 | * reset. | |
1104 | * | |
1105 | * In this respect, consider the special case where bfqq is | |
1106 | * the very first queue created after BFQ is selected for this | |
1107 | * device. In this case, last_ins_in_burst and | |
1108 | * burst_parent_entity are not yet significant when we get | |
1109 | * here. But it is easy to verify that, whether or not the | |
1110 | * following condition is true, bfqq will end up being | |
1111 | * inserted into the burst list. In particular the list will | |
1112 | * happen to contain only bfqq. And this is exactly what has | |
1113 | * to happen, as bfqq may be the first queue of the first | |
1114 | * burst. | |
1115 | */ | |
1116 | if (time_is_before_jiffies(bfqd->last_ins_in_burst + | |
1117 | bfqd->bfq_burst_interval) || | |
1118 | bfqq->entity.parent != bfqd->burst_parent_entity) { | |
1119 | bfqd->large_burst = false; | |
1120 | bfq_reset_burst_list(bfqd, bfqq); | |
1121 | goto end; | |
1122 | } | |
1123 | ||
1124 | /* | |
1125 | * If we get here, then bfqq is being activated shortly after the | |
1126 | * last queue. So, if the current burst is also large, we can mark | |
1127 | * bfqq as belonging to this large burst immediately. | |
1128 | */ | |
1129 | if (bfqd->large_burst) { | |
1130 | bfq_mark_bfqq_in_large_burst(bfqq); | |
1131 | goto end; | |
1132 | } | |
1133 | ||
1134 | /* | |
1135 | * If we get here, then a large-burst state has not yet been | |
1136 | * reached, but bfqq is being activated shortly after the last | |
1137 | * queue. Then we add bfqq to the burst. | |
1138 | */ | |
1139 | bfq_add_to_burst(bfqd, bfqq); | |
1140 | end: | |
1141 | /* | |
1142 | * At this point, bfqq either has been added to the current | |
1143 | * burst or has caused the current burst to terminate and a | |
1144 | * possible new burst to start. In particular, in the second | |
1145 | * case, bfqq has become the first queue in the possible new | |
1146 | * burst. In both cases last_ins_in_burst needs to be moved | |
1147 | * forward. | |
1148 | */ | |
1149 | bfqd->last_ins_in_burst = jiffies; | |
1150 | } | |
1151 | ||
aee69d78 PV |
1152 | static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) |
1153 | { | |
1154 | struct bfq_entity *entity = &bfqq->entity; | |
1155 | ||
1156 | return entity->budget - entity->service; | |
1157 | } | |
1158 | ||
1159 | /* | |
1160 | * If enough samples have been computed, return the current max budget | |
1161 | * stored in bfqd, which is dynamically updated according to the | |
1162 | * estimated disk peak rate; otherwise return the default max budget | |
1163 | */ | |
1164 | static int bfq_max_budget(struct bfq_data *bfqd) | |
1165 | { | |
1166 | if (bfqd->budgets_assigned < bfq_stats_min_budgets) | |
1167 | return bfq_default_max_budget; | |
1168 | else | |
1169 | return bfqd->bfq_max_budget; | |
1170 | } | |
1171 | ||
1172 | /* | |
1173 | * Return min budget, which is a fraction of the current or default | |
1174 | * max budget (trying with 1/32) | |
1175 | */ | |
1176 | static int bfq_min_budget(struct bfq_data *bfqd) | |
1177 | { | |
1178 | if (bfqd->budgets_assigned < bfq_stats_min_budgets) | |
1179 | return bfq_default_max_budget / 32; | |
1180 | else | |
1181 | return bfqd->bfq_max_budget / 32; | |
1182 | } | |
1183 | ||
aee69d78 PV |
1184 | /* |
1185 | * The next function, invoked after the input queue bfqq switches from | |
1186 | * idle to busy, updates the budget of bfqq. The function also tells | |
1187 | * whether the in-service queue should be expired, by returning | |
1188 | * true. The purpose of expiring the in-service queue is to give bfqq | |
1189 | * the chance to possibly preempt the in-service queue, and the reason | |
44e44a1b PV |
1190 | * for preempting the in-service queue is to achieve one of the two |
1191 | * goals below. | |
aee69d78 | 1192 | * |
44e44a1b PV |
1193 | * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has |
1194 | * expired because it has remained idle. In particular, bfqq may have | |
1195 | * expired for one of the following two reasons: | |
aee69d78 PV |
1196 | * |
1197 | * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling | |
1198 | * and did not make it to issue a new request before its last | |
1199 | * request was served; | |
1200 | * | |
1201 | * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue | |
1202 | * a new request before the expiration of the idling-time. | |
1203 | * | |
1204 | * Even if bfqq has expired for one of the above reasons, the process | |
1205 | * associated with the queue may be however issuing requests greedily, | |
1206 | * and thus be sensitive to the bandwidth it receives (bfqq may have | |
1207 | * remained idle for other reasons: CPU high load, bfqq not enjoying | |
1208 | * idling, I/O throttling somewhere in the path from the process to | |
1209 | * the I/O scheduler, ...). But if, after every expiration for one of | |
1210 | * the above two reasons, bfqq has to wait for the service of at least | |
1211 | * one full budget of another queue before being served again, then | |
1212 | * bfqq is likely to get a much lower bandwidth or resource time than | |
1213 | * its reserved ones. To address this issue, two countermeasures need | |
1214 | * to be taken. | |
1215 | * | |
1216 | * First, the budget and the timestamps of bfqq need to be updated in | |
1217 | * a special way on bfqq reactivation: they need to be updated as if | |
1218 | * bfqq did not remain idle and did not expire. In fact, if they are | |
1219 | * computed as if bfqq expired and remained idle until reactivation, | |
1220 | * then the process associated with bfqq is treated as if, instead of | |
1221 | * being greedy, it stopped issuing requests when bfqq remained idle, | |
1222 | * and restarts issuing requests only on this reactivation. In other | |
1223 | * words, the scheduler does not help the process recover the "service | |
1224 | * hole" between bfqq expiration and reactivation. As a consequence, | |
1225 | * the process receives a lower bandwidth than its reserved one. In | |
1226 | * contrast, to recover this hole, the budget must be updated as if | |
1227 | * bfqq was not expired at all before this reactivation, i.e., it must | |
1228 | * be set to the value of the remaining budget when bfqq was | |
1229 | * expired. Along the same line, timestamps need to be assigned the | |
1230 | * value they had the last time bfqq was selected for service, i.e., | |
1231 | * before last expiration. Thus timestamps need to be back-shifted | |
1232 | * with respect to their normal computation (see [1] for more details | |
1233 | * on this tricky aspect). | |
1234 | * | |
1235 | * Secondly, to allow the process to recover the hole, the in-service | |
1236 | * queue must be expired too, to give bfqq the chance to preempt it | |
1237 | * immediately. In fact, if bfqq has to wait for a full budget of the | |
1238 | * in-service queue to be completed, then it may become impossible to | |
1239 | * let the process recover the hole, even if the back-shifted | |
1240 | * timestamps of bfqq are lower than those of the in-service queue. If | |
1241 | * this happens for most or all of the holes, then the process may not | |
1242 | * receive its reserved bandwidth. In this respect, it is worth noting | |
1243 | * that, being the service of outstanding requests unpreemptible, a | |
1244 | * little fraction of the holes may however be unrecoverable, thereby | |
1245 | * causing a little loss of bandwidth. | |
1246 | * | |
1247 | * The last important point is detecting whether bfqq does need this | |
1248 | * bandwidth recovery. In this respect, the next function deems the | |
1249 | * process associated with bfqq greedy, and thus allows it to recover | |
1250 | * the hole, if: 1) the process is waiting for the arrival of a new | |
1251 | * request (which implies that bfqq expired for one of the above two | |
1252 | * reasons), and 2) such a request has arrived soon. The first | |
1253 | * condition is controlled through the flag non_blocking_wait_rq, | |
1254 | * while the second through the flag arrived_in_time. If both | |
1255 | * conditions hold, then the function computes the budget in the | |
1256 | * above-described special way, and signals that the in-service queue | |
1257 | * should be expired. Timestamp back-shifting is done later in | |
1258 | * __bfq_activate_entity. | |
44e44a1b PV |
1259 | * |
1260 | * 2. Reduce latency. Even if timestamps are not backshifted to let | |
1261 | * the process associated with bfqq recover a service hole, bfqq may | |
1262 | * however happen to have, after being (re)activated, a lower finish | |
1263 | * timestamp than the in-service queue. That is, the next budget of | |
1264 | * bfqq may have to be completed before the one of the in-service | |
1265 | * queue. If this is the case, then preempting the in-service queue | |
1266 | * allows this goal to be achieved, apart from the unpreemptible, | |
1267 | * outstanding requests mentioned above. | |
1268 | * | |
1269 | * Unfortunately, regardless of which of the above two goals one wants | |
1270 | * to achieve, service trees need first to be updated to know whether | |
1271 | * the in-service queue must be preempted. To have service trees | |
1272 | * correctly updated, the in-service queue must be expired and | |
1273 | * rescheduled, and bfqq must be scheduled too. This is one of the | |
1274 | * most costly operations (in future versions, the scheduling | |
1275 | * mechanism may be re-designed in such a way to make it possible to | |
1276 | * know whether preemption is needed without needing to update service | |
1277 | * trees). In addition, queue preemptions almost always cause random | |
1278 | * I/O, and thus loss of throughput. Because of these facts, the next | |
1279 | * function adopts the following simple scheme to avoid both costly | |
1280 | * operations and too frequent preemptions: it requests the expiration | |
1281 | * of the in-service queue (unconditionally) only for queues that need | |
1282 | * to recover a hole, or that either are weight-raised or deserve to | |
1283 | * be weight-raised. | |
aee69d78 PV |
1284 | */ |
1285 | static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, | |
1286 | struct bfq_queue *bfqq, | |
44e44a1b PV |
1287 | bool arrived_in_time, |
1288 | bool wr_or_deserves_wr) | |
aee69d78 PV |
1289 | { |
1290 | struct bfq_entity *entity = &bfqq->entity; | |
1291 | ||
1292 | if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) { | |
1293 | /* | |
1294 | * We do not clear the flag non_blocking_wait_rq here, as | |
1295 | * the latter is used in bfq_activate_bfqq to signal | |
1296 | * that timestamps need to be back-shifted (and is | |
1297 | * cleared right after). | |
1298 | */ | |
1299 | ||
1300 | /* | |
1301 | * In next assignment we rely on that either | |
1302 | * entity->service or entity->budget are not updated | |
1303 | * on expiration if bfqq is empty (see | |
1304 | * __bfq_bfqq_recalc_budget). Thus both quantities | |
1305 | * remain unchanged after such an expiration, and the | |
1306 | * following statement therefore assigns to | |
1307 | * entity->budget the remaining budget on such an | |
1308 | * expiration. For clarity, entity->service is not | |
1309 | * updated on expiration in any case, and, in normal | |
1310 | * operation, is reset only when bfqq is selected for | |
1311 | * service (see bfq_get_next_queue). | |
1312 | */ | |
1313 | entity->budget = min_t(unsigned long, | |
1314 | bfq_bfqq_budget_left(bfqq), | |
1315 | bfqq->max_budget); | |
1316 | ||
1317 | return true; | |
1318 | } | |
1319 | ||
1320 | entity->budget = max_t(unsigned long, bfqq->max_budget, | |
1321 | bfq_serv_to_charge(bfqq->next_rq, bfqq)); | |
1322 | bfq_clear_bfqq_non_blocking_wait_rq(bfqq); | |
44e44a1b PV |
1323 | return wr_or_deserves_wr; |
1324 | } | |
1325 | ||
4baa8bb1 PV |
1326 | /* |
1327 | * Return the farthest future time instant according to jiffies | |
1328 | * macros. | |
1329 | */ | |
1330 | static unsigned long bfq_greatest_from_now(void) | |
1331 | { | |
1332 | return jiffies + MAX_JIFFY_OFFSET; | |
1333 | } | |
1334 | ||
1335 | /* | |
1336 | * Return the farthest past time instant according to jiffies | |
1337 | * macros. | |
1338 | */ | |
1339 | static unsigned long bfq_smallest_from_now(void) | |
1340 | { | |
1341 | return jiffies - MAX_JIFFY_OFFSET; | |
1342 | } | |
1343 | ||
44e44a1b PV |
1344 | static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, |
1345 | struct bfq_queue *bfqq, | |
1346 | unsigned int old_wr_coeff, | |
1347 | bool wr_or_deserves_wr, | |
77b7dcea | 1348 | bool interactive, |
e1b2324d | 1349 | bool in_burst, |
77b7dcea | 1350 | bool soft_rt) |
44e44a1b PV |
1351 | { |
1352 | if (old_wr_coeff == 1 && wr_or_deserves_wr) { | |
1353 | /* start a weight-raising period */ | |
77b7dcea PV |
1354 | if (interactive) { |
1355 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; | |
1356 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); | |
1357 | } else { | |
4baa8bb1 PV |
1358 | /* |
1359 | * No interactive weight raising in progress | |
1360 | * here: assign minus infinity to | |
1361 | * wr_start_at_switch_to_srt, to make sure | |
1362 | * that, at the end of the soft-real-time | |
1363 | * weight raising periods that is starting | |
1364 | * now, no interactive weight-raising period | |
1365 | * may be wrongly considered as still in | |
1366 | * progress (and thus actually started by | |
1367 | * mistake). | |
1368 | */ | |
1369 | bfqq->wr_start_at_switch_to_srt = | |
1370 | bfq_smallest_from_now(); | |
77b7dcea PV |
1371 | bfqq->wr_coeff = bfqd->bfq_wr_coeff * |
1372 | BFQ_SOFTRT_WEIGHT_FACTOR; | |
1373 | bfqq->wr_cur_max_time = | |
1374 | bfqd->bfq_wr_rt_max_time; | |
1375 | } | |
44e44a1b PV |
1376 | |
1377 | /* | |
1378 | * If needed, further reduce budget to make sure it is | |
1379 | * close to bfqq's backlog, so as to reduce the | |
1380 | * scheduling-error component due to a too large | |
1381 | * budget. Do not care about throughput consequences, | |
1382 | * but only about latency. Finally, do not assign a | |
1383 | * too small budget either, to avoid increasing | |
1384 | * latency by causing too frequent expirations. | |
1385 | */ | |
1386 | bfqq->entity.budget = min_t(unsigned long, | |
1387 | bfqq->entity.budget, | |
1388 | 2 * bfq_min_budget(bfqd)); | |
1389 | } else if (old_wr_coeff > 1) { | |
77b7dcea PV |
1390 | if (interactive) { /* update wr coeff and duration */ |
1391 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; | |
1392 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); | |
e1b2324d AA |
1393 | } else if (in_burst) |
1394 | bfqq->wr_coeff = 1; | |
1395 | else if (soft_rt) { | |
77b7dcea PV |
1396 | /* |
1397 | * The application is now or still meeting the | |
1398 | * requirements for being deemed soft rt. We | |
1399 | * can then correctly and safely (re)charge | |
1400 | * the weight-raising duration for the | |
1401 | * application with the weight-raising | |
1402 | * duration for soft rt applications. | |
1403 | * | |
1404 | * In particular, doing this recharge now, i.e., | |
1405 | * before the weight-raising period for the | |
1406 | * application finishes, reduces the probability | |
1407 | * of the following negative scenario: | |
1408 | * 1) the weight of a soft rt application is | |
1409 | * raised at startup (as for any newly | |
1410 | * created application), | |
1411 | * 2) since the application is not interactive, | |
1412 | * at a certain time weight-raising is | |
1413 | * stopped for the application, | |
1414 | * 3) at that time the application happens to | |
1415 | * still have pending requests, and hence | |
1416 | * is destined to not have a chance to be | |
1417 | * deemed soft rt before these requests are | |
1418 | * completed (see the comments to the | |
1419 | * function bfq_bfqq_softrt_next_start() | |
1420 | * for details on soft rt detection), | |
1421 | * 4) these pending requests experience a high | |
1422 | * latency because the application is not | |
1423 | * weight-raised while they are pending. | |
1424 | */ | |
1425 | if (bfqq->wr_cur_max_time != | |
1426 | bfqd->bfq_wr_rt_max_time) { | |
1427 | bfqq->wr_start_at_switch_to_srt = | |
1428 | bfqq->last_wr_start_finish; | |
1429 | ||
1430 | bfqq->wr_cur_max_time = | |
1431 | bfqd->bfq_wr_rt_max_time; | |
1432 | bfqq->wr_coeff = bfqd->bfq_wr_coeff * | |
1433 | BFQ_SOFTRT_WEIGHT_FACTOR; | |
1434 | } | |
1435 | bfqq->last_wr_start_finish = jiffies; | |
1436 | } | |
44e44a1b PV |
1437 | } |
1438 | } | |
1439 | ||
1440 | static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, | |
1441 | struct bfq_queue *bfqq) | |
1442 | { | |
1443 | return bfqq->dispatched == 0 && | |
1444 | time_is_before_jiffies( | |
1445 | bfqq->budget_timeout + | |
1446 | bfqd->bfq_wr_min_idle_time); | |
aee69d78 PV |
1447 | } |
1448 | ||
1449 | static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, | |
1450 | struct bfq_queue *bfqq, | |
44e44a1b PV |
1451 | int old_wr_coeff, |
1452 | struct request *rq, | |
1453 | bool *interactive) | |
aee69d78 | 1454 | { |
e1b2324d AA |
1455 | bool soft_rt, in_burst, wr_or_deserves_wr, |
1456 | bfqq_wants_to_preempt, | |
44e44a1b | 1457 | idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), |
aee69d78 PV |
1458 | /* |
1459 | * See the comments on | |
1460 | * bfq_bfqq_update_budg_for_activation for | |
1461 | * details on the usage of the next variable. | |
1462 | */ | |
1463 | arrived_in_time = ktime_get_ns() <= | |
1464 | bfqq->ttime.last_end_request + | |
1465 | bfqd->bfq_slice_idle * 3; | |
1466 | ||
e21b7a0b | 1467 | |
aee69d78 | 1468 | /* |
44e44a1b PV |
1469 | * bfqq deserves to be weight-raised if: |
1470 | * - it is sync, | |
e1b2324d | 1471 | * - it does not belong to a large burst, |
36eca894 AA |
1472 | * - it has been idle for enough time or is soft real-time, |
1473 | * - is linked to a bfq_io_cq (it is not shared in any sense). | |
44e44a1b | 1474 | */ |
e1b2324d | 1475 | in_burst = bfq_bfqq_in_large_burst(bfqq); |
77b7dcea | 1476 | soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && |
e1b2324d | 1477 | !in_burst && |
77b7dcea | 1478 | time_is_before_jiffies(bfqq->soft_rt_next_start); |
e1b2324d | 1479 | *interactive = !in_burst && idle_for_long_time; |
44e44a1b PV |
1480 | wr_or_deserves_wr = bfqd->low_latency && |
1481 | (bfqq->wr_coeff > 1 || | |
36eca894 AA |
1482 | (bfq_bfqq_sync(bfqq) && |
1483 | bfqq->bic && (*interactive || soft_rt))); | |
44e44a1b PV |
1484 | |
1485 | /* | |
1486 | * Using the last flag, update budget and check whether bfqq | |
1487 | * may want to preempt the in-service queue. | |
aee69d78 PV |
1488 | */ |
1489 | bfqq_wants_to_preempt = | |
1490 | bfq_bfqq_update_budg_for_activation(bfqd, bfqq, | |
44e44a1b PV |
1491 | arrived_in_time, |
1492 | wr_or_deserves_wr); | |
aee69d78 | 1493 | |
e1b2324d AA |
1494 | /* |
1495 | * If bfqq happened to be activated in a burst, but has been | |
1496 | * idle for much more than an interactive queue, then we | |
1497 | * assume that, in the overall I/O initiated in the burst, the | |
1498 | * I/O associated with bfqq is finished. So bfqq does not need | |
1499 | * to be treated as a queue belonging to a burst | |
1500 | * anymore. Accordingly, we reset bfqq's in_large_burst flag | |
1501 | * if set, and remove bfqq from the burst list if it's | |
1502 | * there. We do not decrement burst_size, because the fact | |
1503 | * that bfqq does not need to belong to the burst list any | |
1504 | * more does not invalidate the fact that bfqq was created in | |
1505 | * a burst. | |
1506 | */ | |
1507 | if (likely(!bfq_bfqq_just_created(bfqq)) && | |
1508 | idle_for_long_time && | |
1509 | time_is_before_jiffies( | |
1510 | bfqq->budget_timeout + | |
1511 | msecs_to_jiffies(10000))) { | |
1512 | hlist_del_init(&bfqq->burst_list_node); | |
1513 | bfq_clear_bfqq_in_large_burst(bfqq); | |
1514 | } | |
1515 | ||
1516 | bfq_clear_bfqq_just_created(bfqq); | |
1517 | ||
1518 | ||
aee69d78 PV |
1519 | if (!bfq_bfqq_IO_bound(bfqq)) { |
1520 | if (arrived_in_time) { | |
1521 | bfqq->requests_within_timer++; | |
1522 | if (bfqq->requests_within_timer >= | |
1523 | bfqd->bfq_requests_within_timer) | |
1524 | bfq_mark_bfqq_IO_bound(bfqq); | |
1525 | } else | |
1526 | bfqq->requests_within_timer = 0; | |
1527 | } | |
1528 | ||
44e44a1b | 1529 | if (bfqd->low_latency) { |
36eca894 AA |
1530 | if (unlikely(time_is_after_jiffies(bfqq->split_time))) |
1531 | /* wraparound */ | |
1532 | bfqq->split_time = | |
1533 | jiffies - bfqd->bfq_wr_min_idle_time - 1; | |
1534 | ||
1535 | if (time_is_before_jiffies(bfqq->split_time + | |
1536 | bfqd->bfq_wr_min_idle_time)) { | |
1537 | bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, | |
1538 | old_wr_coeff, | |
1539 | wr_or_deserves_wr, | |
1540 | *interactive, | |
e1b2324d | 1541 | in_burst, |
36eca894 AA |
1542 | soft_rt); |
1543 | ||
1544 | if (old_wr_coeff != bfqq->wr_coeff) | |
1545 | bfqq->entity.prio_changed = 1; | |
1546 | } | |
44e44a1b PV |
1547 | } |
1548 | ||
77b7dcea PV |
1549 | bfqq->last_idle_bklogged = jiffies; |
1550 | bfqq->service_from_backlogged = 0; | |
1551 | bfq_clear_bfqq_softrt_update(bfqq); | |
1552 | ||
aee69d78 PV |
1553 | bfq_add_bfqq_busy(bfqd, bfqq); |
1554 | ||
1555 | /* | |
1556 | * Expire in-service queue only if preemption may be needed | |
1557 | * for guarantees. In this respect, the function | |
1558 | * next_queue_may_preempt just checks a simple, necessary | |
1559 | * condition, and not a sufficient condition based on | |
1560 | * timestamps. In fact, for the latter condition to be | |
1561 | * evaluated, timestamps would need first to be updated, and | |
1562 | * this operation is quite costly (see the comments on the | |
1563 | * function bfq_bfqq_update_budg_for_activation). | |
1564 | */ | |
1565 | if (bfqd->in_service_queue && bfqq_wants_to_preempt && | |
77b7dcea | 1566 | bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff && |
aee69d78 PV |
1567 | next_queue_may_preempt(bfqd)) |
1568 | bfq_bfqq_expire(bfqd, bfqd->in_service_queue, | |
1569 | false, BFQQE_PREEMPTED); | |
1570 | } | |
1571 | ||
1572 | static void bfq_add_request(struct request *rq) | |
1573 | { | |
1574 | struct bfq_queue *bfqq = RQ_BFQQ(rq); | |
1575 | struct bfq_data *bfqd = bfqq->bfqd; | |
1576 | struct request *next_rq, *prev; | |
44e44a1b PV |
1577 | unsigned int old_wr_coeff = bfqq->wr_coeff; |
1578 | bool interactive = false; | |
aee69d78 PV |
1579 | |
1580 | bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq)); | |
1581 | bfqq->queued[rq_is_sync(rq)]++; | |
1582 | bfqd->queued++; | |
1583 | ||
1584 | elv_rb_add(&bfqq->sort_list, rq); | |
1585 | ||
1586 | /* | |
1587 | * Check if this request is a better next-serve candidate. | |
1588 | */ | |
1589 | prev = bfqq->next_rq; | |
1590 | next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); | |
1591 | bfqq->next_rq = next_rq; | |
1592 | ||
36eca894 AA |
1593 | /* |
1594 | * Adjust priority tree position, if next_rq changes. | |
1595 | */ | |
1596 | if (prev != bfqq->next_rq) | |
1597 | bfq_pos_tree_add_move(bfqd, bfqq); | |
1598 | ||
aee69d78 | 1599 | if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */ |
44e44a1b PV |
1600 | bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, |
1601 | rq, &interactive); | |
1602 | else { | |
1603 | if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && | |
1604 | time_is_before_jiffies( | |
1605 | bfqq->last_wr_start_finish + | |
1606 | bfqd->bfq_wr_min_inter_arr_async)) { | |
1607 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; | |
1608 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); | |
1609 | ||
cfd69712 | 1610 | bfqd->wr_busy_queues++; |
44e44a1b PV |
1611 | bfqq->entity.prio_changed = 1; |
1612 | } | |
1613 | if (prev != bfqq->next_rq) | |
1614 | bfq_updated_next_req(bfqd, bfqq); | |
1615 | } | |
1616 | ||
1617 | /* | |
1618 | * Assign jiffies to last_wr_start_finish in the following | |
1619 | * cases: | |
1620 | * | |
1621 | * . if bfqq is not going to be weight-raised, because, for | |
1622 | * non weight-raised queues, last_wr_start_finish stores the | |
1623 | * arrival time of the last request; as of now, this piece | |
1624 | * of information is used only for deciding whether to | |
1625 | * weight-raise async queues | |
1626 | * | |
1627 | * . if bfqq is not weight-raised, because, if bfqq is now | |
1628 | * switching to weight-raised, then last_wr_start_finish | |
1629 | * stores the time when weight-raising starts | |
1630 | * | |
1631 | * . if bfqq is interactive, because, regardless of whether | |
1632 | * bfqq is currently weight-raised, the weight-raising | |
1633 | * period must start or restart (this case is considered | |
1634 | * separately because it is not detected by the above | |
1635 | * conditions, if bfqq is already weight-raised) | |
77b7dcea PV |
1636 | * |
1637 | * last_wr_start_finish has to be updated also if bfqq is soft | |
1638 | * real-time, because the weight-raising period is constantly | |
1639 | * restarted on idle-to-busy transitions for these queues, but | |
1640 | * this is already done in bfq_bfqq_handle_idle_busy_switch if | |
1641 | * needed. | |
44e44a1b PV |
1642 | */ |
1643 | if (bfqd->low_latency && | |
1644 | (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) | |
1645 | bfqq->last_wr_start_finish = jiffies; | |
aee69d78 PV |
1646 | } |
1647 | ||
1648 | static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, | |
1649 | struct bio *bio, | |
1650 | struct request_queue *q) | |
1651 | { | |
1652 | struct bfq_queue *bfqq = bfqd->bio_bfqq; | |
1653 | ||
1654 | ||
1655 | if (bfqq) | |
1656 | return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); | |
1657 | ||
1658 | return NULL; | |
1659 | } | |
1660 | ||
ab0e43e9 PV |
1661 | static sector_t get_sdist(sector_t last_pos, struct request *rq) |
1662 | { | |
1663 | if (last_pos) | |
1664 | return abs(blk_rq_pos(rq) - last_pos); | |
1665 | ||
1666 | return 0; | |
1667 | } | |
1668 | ||
aee69d78 PV |
1669 | #if 0 /* Still not clear if we can do without next two functions */ |
1670 | static void bfq_activate_request(struct request_queue *q, struct request *rq) | |
1671 | { | |
1672 | struct bfq_data *bfqd = q->elevator->elevator_data; | |
1673 | ||
1674 | bfqd->rq_in_driver++; | |
aee69d78 PV |
1675 | } |
1676 | ||
1677 | static void bfq_deactivate_request(struct request_queue *q, struct request *rq) | |
1678 | { | |
1679 | struct bfq_data *bfqd = q->elevator->elevator_data; | |
1680 | ||
1681 | bfqd->rq_in_driver--; | |
1682 | } | |
1683 | #endif | |
1684 | ||
1685 | static void bfq_remove_request(struct request_queue *q, | |
1686 | struct request *rq) | |
1687 | { | |
1688 | struct bfq_queue *bfqq = RQ_BFQQ(rq); | |
1689 | struct bfq_data *bfqd = bfqq->bfqd; | |
1690 | const int sync = rq_is_sync(rq); | |
1691 | ||
1692 | if (bfqq->next_rq == rq) { | |
1693 | bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); | |
1694 | bfq_updated_next_req(bfqd, bfqq); | |
1695 | } | |
1696 | ||
1697 | if (rq->queuelist.prev != &rq->queuelist) | |
1698 | list_del_init(&rq->queuelist); | |
1699 | bfqq->queued[sync]--; | |
1700 | bfqd->queued--; | |
1701 | elv_rb_del(&bfqq->sort_list, rq); | |
1702 | ||
1703 | elv_rqhash_del(q, rq); | |
1704 | if (q->last_merge == rq) | |
1705 | q->last_merge = NULL; | |
1706 | ||
1707 | if (RB_EMPTY_ROOT(&bfqq->sort_list)) { | |
1708 | bfqq->next_rq = NULL; | |
1709 | ||
1710 | if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { | |
e21b7a0b | 1711 | bfq_del_bfqq_busy(bfqd, bfqq, false); |
aee69d78 PV |
1712 | /* |
1713 | * bfqq emptied. In normal operation, when | |
1714 | * bfqq is empty, bfqq->entity.service and | |
1715 | * bfqq->entity.budget must contain, | |
1716 | * respectively, the service received and the | |
1717 | * budget used last time bfqq emptied. These | |
1718 | * facts do not hold in this case, as at least | |
1719 | * this last removal occurred while bfqq is | |
1720 | * not in service. To avoid inconsistencies, | |
1721 | * reset both bfqq->entity.service and | |
1722 | * bfqq->entity.budget, if bfqq has still a | |
1723 | * process that may issue I/O requests to it. | |
1724 | */ | |
1725 | bfqq->entity.budget = bfqq->entity.service = 0; | |
1726 | } | |
36eca894 AA |
1727 | |
1728 | /* | |
1729 | * Remove queue from request-position tree as it is empty. | |
1730 | */ | |
1731 | if (bfqq->pos_root) { | |
1732 | rb_erase(&bfqq->pos_node, bfqq->pos_root); | |
1733 | bfqq->pos_root = NULL; | |
1734 | } | |
05e90283 PV |
1735 | } else { |
1736 | bfq_pos_tree_add_move(bfqd, bfqq); | |
aee69d78 PV |
1737 | } |
1738 | ||
1739 | if (rq->cmd_flags & REQ_META) | |
1740 | bfqq->meta_pending--; | |
e21b7a0b | 1741 | |
aee69d78 PV |
1742 | } |
1743 | ||
1744 | static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) | |
1745 | { | |
1746 | struct request_queue *q = hctx->queue; | |
1747 | struct bfq_data *bfqd = q->elevator->elevator_data; | |
1748 | struct request *free = NULL; | |
1749 | /* | |
1750 | * bfq_bic_lookup grabs the queue_lock: invoke it now and | |
1751 | * store its return value for later use, to avoid nesting | |
1752 | * queue_lock inside the bfqd->lock. We assume that the bic | |
1753 | * returned by bfq_bic_lookup does not go away before | |
1754 | * bfqd->lock is taken. | |
1755 | */ | |
1756 | struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q); | |
1757 | bool ret; | |
1758 | ||
1759 | spin_lock_irq(&bfqd->lock); | |
1760 | ||
1761 | if (bic) | |
1762 | bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); | |
1763 | else | |
1764 | bfqd->bio_bfqq = NULL; | |
1765 | bfqd->bio_bic = bic; | |
1766 | ||
1767 | ret = blk_mq_sched_try_merge(q, bio, &free); | |
1768 | ||
1769 | if (free) | |
1770 | blk_mq_free_request(free); | |
1771 | spin_unlock_irq(&bfqd->lock); | |
1772 | ||
1773 | return ret; | |
1774 | } | |
1775 | ||
1776 | static int bfq_request_merge(struct request_queue *q, struct request **req, | |
1777 | struct bio *bio) | |
1778 | { | |
1779 | struct bfq_data *bfqd = q->elevator->elevator_data; | |
1780 | struct request *__rq; | |
1781 | ||
1782 | __rq = bfq_find_rq_fmerge(bfqd, bio, q); | |
1783 | if (__rq && elv_bio_merge_ok(__rq, bio)) { | |
1784 | *req = __rq; | |
1785 | return ELEVATOR_FRONT_MERGE; | |
1786 | } | |
1787 | ||
1788 | return ELEVATOR_NO_MERGE; | |
1789 | } | |
1790 | ||
1791 | static void bfq_request_merged(struct request_queue *q, struct request *req, | |
1792 | enum elv_merge type) | |
1793 | { | |
1794 | if (type == ELEVATOR_FRONT_MERGE && | |
1795 | rb_prev(&req->rb_node) && | |
1796 | blk_rq_pos(req) < | |
1797 | blk_rq_pos(container_of(rb_prev(&req->rb_node), | |
1798 | struct request, rb_node))) { | |
1799 | struct bfq_queue *bfqq = RQ_BFQQ(req); | |
1800 | struct bfq_data *bfqd = bfqq->bfqd; | |
1801 | struct request *prev, *next_rq; | |
1802 | ||
1803 | /* Reposition request in its sort_list */ | |
1804 | elv_rb_del(&bfqq->sort_list, req); | |
1805 | elv_rb_add(&bfqq->sort_list, req); | |
1806 | ||
1807 | /* Choose next request to be served for bfqq */ | |
1808 | prev = bfqq->next_rq; | |
1809 | next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, | |
1810 | bfqd->last_position); | |
1811 | bfqq->next_rq = next_rq; | |
1812 | /* | |
36eca894 AA |
1813 | * If next_rq changes, update both the queue's budget to |
1814 | * fit the new request and the queue's position in its | |
1815 | * rq_pos_tree. | |
aee69d78 | 1816 | */ |
36eca894 | 1817 | if (prev != bfqq->next_rq) { |
aee69d78 | 1818 | bfq_updated_next_req(bfqd, bfqq); |
36eca894 AA |
1819 | bfq_pos_tree_add_move(bfqd, bfqq); |
1820 | } | |
aee69d78 PV |
1821 | } |
1822 | } | |
1823 | ||
1824 | static void bfq_requests_merged(struct request_queue *q, struct request *rq, | |
1825 | struct request *next) | |
1826 | { | |
1827 | struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next); | |
1828 | ||
1829 | if (!RB_EMPTY_NODE(&rq->rb_node)) | |
e21b7a0b | 1830 | goto end; |
aee69d78 PV |
1831 | spin_lock_irq(&bfqq->bfqd->lock); |
1832 | ||
1833 | /* | |
1834 | * If next and rq belong to the same bfq_queue and next is older | |
1835 | * than rq, then reposition rq in the fifo (by substituting next | |
1836 | * with rq). Otherwise, if next and rq belong to different | |
1837 | * bfq_queues, never reposition rq: in fact, we would have to | |
1838 | * reposition it with respect to next's position in its own fifo, | |
1839 | * which would most certainly be too expensive with respect to | |
1840 | * the benefits. | |
1841 | */ | |
1842 | if (bfqq == next_bfqq && | |
1843 | !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && | |
1844 | next->fifo_time < rq->fifo_time) { | |
1845 | list_del_init(&rq->queuelist); | |
1846 | list_replace_init(&next->queuelist, &rq->queuelist); | |
1847 | rq->fifo_time = next->fifo_time; | |
1848 | } | |
1849 | ||
1850 | if (bfqq->next_rq == next) | |
1851 | bfqq->next_rq = rq; | |
1852 | ||
1853 | bfq_remove_request(q, next); | |
614822f8 | 1854 | bfqg_stats_update_io_remove(bfqq_group(bfqq), next->cmd_flags); |
aee69d78 PV |
1855 | |
1856 | spin_unlock_irq(&bfqq->bfqd->lock); | |
e21b7a0b AA |
1857 | end: |
1858 | bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); | |
aee69d78 PV |
1859 | } |
1860 | ||
44e44a1b PV |
1861 | /* Must be called with bfqq != NULL */ |
1862 | static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) | |
1863 | { | |
cfd69712 PV |
1864 | if (bfq_bfqq_busy(bfqq)) |
1865 | bfqq->bfqd->wr_busy_queues--; | |
44e44a1b PV |
1866 | bfqq->wr_coeff = 1; |
1867 | bfqq->wr_cur_max_time = 0; | |
77b7dcea | 1868 | bfqq->last_wr_start_finish = jiffies; |
44e44a1b PV |
1869 | /* |
1870 | * Trigger a weight change on the next invocation of | |
1871 | * __bfq_entity_update_weight_prio. | |
1872 | */ | |
1873 | bfqq->entity.prio_changed = 1; | |
1874 | } | |
1875 | ||
ea25da48 PV |
1876 | void bfq_end_wr_async_queues(struct bfq_data *bfqd, |
1877 | struct bfq_group *bfqg) | |
44e44a1b PV |
1878 | { |
1879 | int i, j; | |
1880 | ||
1881 | for (i = 0; i < 2; i++) | |
1882 | for (j = 0; j < IOPRIO_BE_NR; j++) | |
1883 | if (bfqg->async_bfqq[i][j]) | |
1884 | bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]); | |
1885 | if (bfqg->async_idle_bfqq) | |
1886 | bfq_bfqq_end_wr(bfqg->async_idle_bfqq); | |
1887 | } | |
1888 | ||
1889 | static void bfq_end_wr(struct bfq_data *bfqd) | |
1890 | { | |
1891 | struct bfq_queue *bfqq; | |
1892 | ||
1893 | spin_lock_irq(&bfqd->lock); | |
1894 | ||
1895 | list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) | |
1896 | bfq_bfqq_end_wr(bfqq); | |
1897 | list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) | |
1898 | bfq_bfqq_end_wr(bfqq); | |
1899 | bfq_end_wr_async(bfqd); | |
1900 | ||
1901 | spin_unlock_irq(&bfqd->lock); | |
1902 | } | |
1903 | ||
36eca894 AA |
1904 | static sector_t bfq_io_struct_pos(void *io_struct, bool request) |
1905 | { | |
1906 | if (request) | |
1907 | return blk_rq_pos(io_struct); | |
1908 | else | |
1909 | return ((struct bio *)io_struct)->bi_iter.bi_sector; | |
1910 | } | |
1911 | ||
1912 | static int bfq_rq_close_to_sector(void *io_struct, bool request, | |
1913 | sector_t sector) | |
1914 | { | |
1915 | return abs(bfq_io_struct_pos(io_struct, request) - sector) <= | |
1916 | BFQQ_CLOSE_THR; | |
1917 | } | |
1918 | ||
1919 | static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, | |
1920 | struct bfq_queue *bfqq, | |
1921 | sector_t sector) | |
1922 | { | |
1923 | struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; | |
1924 | struct rb_node *parent, *node; | |
1925 | struct bfq_queue *__bfqq; | |
1926 | ||
1927 | if (RB_EMPTY_ROOT(root)) | |
1928 | return NULL; | |
1929 | ||
1930 | /* | |
1931 | * First, if we find a request starting at the end of the last | |
1932 | * request, choose it. | |
1933 | */ | |
1934 | __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); | |
1935 | if (__bfqq) | |
1936 | return __bfqq; | |
1937 | ||
1938 | /* | |
1939 | * If the exact sector wasn't found, the parent of the NULL leaf | |
1940 | * will contain the closest sector (rq_pos_tree sorted by | |
1941 | * next_request position). | |
1942 | */ | |
1943 | __bfqq = rb_entry(parent, struct bfq_queue, pos_node); | |
1944 | if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) | |
1945 | return __bfqq; | |
1946 | ||
1947 | if (blk_rq_pos(__bfqq->next_rq) < sector) | |
1948 | node = rb_next(&__bfqq->pos_node); | |
1949 | else | |
1950 | node = rb_prev(&__bfqq->pos_node); | |
1951 | if (!node) | |
1952 | return NULL; | |
1953 | ||
1954 | __bfqq = rb_entry(node, struct bfq_queue, pos_node); | |
1955 | if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) | |
1956 | return __bfqq; | |
1957 | ||
1958 | return NULL; | |
1959 | } | |
1960 | ||
1961 | static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, | |
1962 | struct bfq_queue *cur_bfqq, | |
1963 | sector_t sector) | |
1964 | { | |
1965 | struct bfq_queue *bfqq; | |
1966 | ||
1967 | /* | |
1968 | * We shall notice if some of the queues are cooperating, | |
1969 | * e.g., working closely on the same area of the device. In | |
1970 | * that case, we can group them together and: 1) don't waste | |
1971 | * time idling, and 2) serve the union of their requests in | |
1972 | * the best possible order for throughput. | |
1973 | */ | |
1974 | bfqq = bfqq_find_close(bfqd, cur_bfqq, sector); | |
1975 | if (!bfqq || bfqq == cur_bfqq) | |
1976 | return NULL; | |
1977 | ||
1978 | return bfqq; | |
1979 | } | |
1980 | ||
1981 | static struct bfq_queue * | |
1982 | bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) | |
1983 | { | |
1984 | int process_refs, new_process_refs; | |
1985 | struct bfq_queue *__bfqq; | |
1986 | ||
1987 | /* | |
1988 | * If there are no process references on the new_bfqq, then it is | |
1989 | * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain | |
1990 | * may have dropped their last reference (not just their last process | |
1991 | * reference). | |
1992 | */ | |
1993 | if (!bfqq_process_refs(new_bfqq)) | |
1994 | return NULL; | |
1995 | ||
1996 | /* Avoid a circular list and skip interim queue merges. */ | |
1997 | while ((__bfqq = new_bfqq->new_bfqq)) { | |
1998 | if (__bfqq == bfqq) | |
1999 | return NULL; | |
2000 | new_bfqq = __bfqq; | |
2001 | } | |
2002 | ||
2003 | process_refs = bfqq_process_refs(bfqq); | |
2004 | new_process_refs = bfqq_process_refs(new_bfqq); | |
2005 | /* | |
2006 | * If the process for the bfqq has gone away, there is no | |
2007 | * sense in merging the queues. | |
2008 | */ | |
2009 | if (process_refs == 0 || new_process_refs == 0) | |
2010 | return NULL; | |
2011 | ||
2012 | bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", | |
2013 | new_bfqq->pid); | |
2014 | ||
2015 | /* | |
2016 | * Merging is just a redirection: the requests of the process | |
2017 | * owning one of the two queues are redirected to the other queue. | |
2018 | * The latter queue, in its turn, is set as shared if this is the | |
2019 | * first time that the requests of some process are redirected to | |
2020 | * it. | |
2021 | * | |
6fa3e8d3 PV |
2022 | * We redirect bfqq to new_bfqq and not the opposite, because |
2023 | * we are in the context of the process owning bfqq, thus we | |
2024 | * have the io_cq of this process. So we can immediately | |
2025 | * configure this io_cq to redirect the requests of the | |
2026 | * process to new_bfqq. In contrast, the io_cq of new_bfqq is | |
2027 | * not available any more (new_bfqq->bic == NULL). | |
36eca894 | 2028 | * |
6fa3e8d3 PV |
2029 | * Anyway, even in case new_bfqq coincides with the in-service |
2030 | * queue, redirecting requests the in-service queue is the | |
2031 | * best option, as we feed the in-service queue with new | |
2032 | * requests close to the last request served and, by doing so, | |
2033 | * are likely to increase the throughput. | |
36eca894 AA |
2034 | */ |
2035 | bfqq->new_bfqq = new_bfqq; | |
2036 | new_bfqq->ref += process_refs; | |
2037 | return new_bfqq; | |
2038 | } | |
2039 | ||
2040 | static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, | |
2041 | struct bfq_queue *new_bfqq) | |
2042 | { | |
7b8fa3b9 PV |
2043 | if (bfq_too_late_for_merging(new_bfqq)) |
2044 | return false; | |
2045 | ||
36eca894 AA |
2046 | if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) || |
2047 | (bfqq->ioprio_class != new_bfqq->ioprio_class)) | |
2048 | return false; | |
2049 | ||
2050 | /* | |
2051 | * If either of the queues has already been detected as seeky, | |
2052 | * then merging it with the other queue is unlikely to lead to | |
2053 | * sequential I/O. | |
2054 | */ | |
2055 | if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq)) | |
2056 | return false; | |
2057 | ||
2058 | /* | |
2059 | * Interleaved I/O is known to be done by (some) applications | |
2060 | * only for reads, so it does not make sense to merge async | |
2061 | * queues. | |
2062 | */ | |
2063 | if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq)) | |
2064 | return false; | |
2065 | ||
2066 | return true; | |
2067 | } | |
2068 | ||
36eca894 AA |
2069 | /* |
2070 | * Attempt to schedule a merge of bfqq with the currently in-service | |
2071 | * queue or with a close queue among the scheduled queues. Return | |
2072 | * NULL if no merge was scheduled, a pointer to the shared bfq_queue | |
2073 | * structure otherwise. | |
2074 | * | |
2075 | * The OOM queue is not allowed to participate to cooperation: in fact, since | |
2076 | * the requests temporarily redirected to the OOM queue could be redirected | |
2077 | * again to dedicated queues at any time, the state needed to correctly | |
2078 | * handle merging with the OOM queue would be quite complex and expensive | |
2079 | * to maintain. Besides, in such a critical condition as an out of memory, | |
2080 | * the benefits of queue merging may be little relevant, or even negligible. | |
2081 | * | |
36eca894 AA |
2082 | * WARNING: queue merging may impair fairness among non-weight raised |
2083 | * queues, for at least two reasons: 1) the original weight of a | |
2084 | * merged queue may change during the merged state, 2) even being the | |
2085 | * weight the same, a merged queue may be bloated with many more | |
2086 | * requests than the ones produced by its originally-associated | |
2087 | * process. | |
2088 | */ | |
2089 | static struct bfq_queue * | |
2090 | bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, | |
2091 | void *io_struct, bool request) | |
2092 | { | |
2093 | struct bfq_queue *in_service_bfqq, *new_bfqq; | |
2094 | ||
7b8fa3b9 PV |
2095 | /* |
2096 | * Prevent bfqq from being merged if it has been created too | |
2097 | * long ago. The idea is that true cooperating processes, and | |
2098 | * thus their associated bfq_queues, are supposed to be | |
2099 | * created shortly after each other. This is the case, e.g., | |
2100 | * for KVM/QEMU and dump I/O threads. Basing on this | |
2101 | * assumption, the following filtering greatly reduces the | |
2102 | * probability that two non-cooperating processes, which just | |
2103 | * happen to do close I/O for some short time interval, have | |
2104 | * their queues merged by mistake. | |
2105 | */ | |
2106 | if (bfq_too_late_for_merging(bfqq)) | |
2107 | return NULL; | |
2108 | ||
36eca894 AA |
2109 | if (bfqq->new_bfqq) |
2110 | return bfqq->new_bfqq; | |
2111 | ||
4403e4e4 | 2112 | if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) |
36eca894 AA |
2113 | return NULL; |
2114 | ||
2115 | /* If there is only one backlogged queue, don't search. */ | |
2116 | if (bfqd->busy_queues == 1) | |
2117 | return NULL; | |
2118 | ||
2119 | in_service_bfqq = bfqd->in_service_queue; | |
2120 | ||
4403e4e4 AR |
2121 | if (in_service_bfqq && in_service_bfqq != bfqq && |
2122 | likely(in_service_bfqq != &bfqd->oom_bfqq) && | |
2123 | bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && | |
36eca894 AA |
2124 | bfqq->entity.parent == in_service_bfqq->entity.parent && |
2125 | bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { | |
2126 | new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); | |
2127 | if (new_bfqq) | |
2128 | return new_bfqq; | |
2129 | } | |
2130 | /* | |
2131 | * Check whether there is a cooperator among currently scheduled | |
2132 | * queues. The only thing we need is that the bio/request is not | |
2133 | * NULL, as we need it to establish whether a cooperator exists. | |
2134 | */ | |
36eca894 AA |
2135 | new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, |
2136 | bfq_io_struct_pos(io_struct, request)); | |
2137 | ||
4403e4e4 | 2138 | if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && |
36eca894 AA |
2139 | bfq_may_be_close_cooperator(bfqq, new_bfqq)) |
2140 | return bfq_setup_merge(bfqq, new_bfqq); | |
2141 | ||
2142 | return NULL; | |
2143 | } | |
2144 | ||
2145 | static void bfq_bfqq_save_state(struct bfq_queue *bfqq) | |
2146 | { | |
2147 | struct bfq_io_cq *bic = bfqq->bic; | |
2148 | ||
2149 | /* | |
2150 | * If !bfqq->bic, the queue is already shared or its requests | |
2151 | * have already been redirected to a shared queue; both idle window | |
2152 | * and weight raising state have already been saved. Do nothing. | |
2153 | */ | |
2154 | if (!bic) | |
2155 | return; | |
2156 | ||
2157 | bic->saved_ttime = bfqq->ttime; | |
d5be3fef | 2158 | bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); |
36eca894 | 2159 | bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); |
e1b2324d AA |
2160 | bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); |
2161 | bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); | |
894df937 | 2162 | if (unlikely(bfq_bfqq_just_created(bfqq) && |
1be6e8a9 AR |
2163 | !bfq_bfqq_in_large_burst(bfqq) && |
2164 | bfqq->bfqd->low_latency)) { | |
894df937 PV |
2165 | /* |
2166 | * bfqq being merged right after being created: bfqq | |
2167 | * would have deserved interactive weight raising, but | |
2168 | * did not make it to be set in a weight-raised state, | |
2169 | * because of this early merge. Store directly the | |
2170 | * weight-raising state that would have been assigned | |
2171 | * to bfqq, so that to avoid that bfqq unjustly fails | |
2172 | * to enjoy weight raising if split soon. | |
2173 | */ | |
2174 | bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; | |
2175 | bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd); | |
2176 | bic->saved_last_wr_start_finish = jiffies; | |
2177 | } else { | |
2178 | bic->saved_wr_coeff = bfqq->wr_coeff; | |
2179 | bic->saved_wr_start_at_switch_to_srt = | |
2180 | bfqq->wr_start_at_switch_to_srt; | |
2181 | bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; | |
2182 | bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; | |
2183 | } | |
36eca894 AA |
2184 | } |
2185 | ||
36eca894 AA |
2186 | static void |
2187 | bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, | |
2188 | struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) | |
2189 | { | |
2190 | bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", | |
2191 | (unsigned long)new_bfqq->pid); | |
2192 | /* Save weight raising and idle window of the merged queues */ | |
2193 | bfq_bfqq_save_state(bfqq); | |
2194 | bfq_bfqq_save_state(new_bfqq); | |
2195 | if (bfq_bfqq_IO_bound(bfqq)) | |
2196 | bfq_mark_bfqq_IO_bound(new_bfqq); | |
2197 | bfq_clear_bfqq_IO_bound(bfqq); | |
2198 | ||
2199 | /* | |
2200 | * If bfqq is weight-raised, then let new_bfqq inherit | |
2201 | * weight-raising. To reduce false positives, neglect the case | |
2202 | * where bfqq has just been created, but has not yet made it | |
2203 | * to be weight-raised (which may happen because EQM may merge | |
2204 | * bfqq even before bfq_add_request is executed for the first | |
e1b2324d AA |
2205 | * time for bfqq). Handling this case would however be very |
2206 | * easy, thanks to the flag just_created. | |
36eca894 AA |
2207 | */ |
2208 | if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) { | |
2209 | new_bfqq->wr_coeff = bfqq->wr_coeff; | |
2210 | new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; | |
2211 | new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; | |
2212 | new_bfqq->wr_start_at_switch_to_srt = | |
2213 | bfqq->wr_start_at_switch_to_srt; | |
2214 | if (bfq_bfqq_busy(new_bfqq)) | |
2215 | bfqd->wr_busy_queues++; | |
2216 | new_bfqq->entity.prio_changed = 1; | |
2217 | } | |
2218 | ||
2219 | if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */ | |
2220 | bfqq->wr_coeff = 1; | |
2221 | bfqq->entity.prio_changed = 1; | |
2222 | if (bfq_bfqq_busy(bfqq)) | |
2223 | bfqd->wr_busy_queues--; | |
2224 | } | |
2225 | ||
2226 | bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", | |
2227 | bfqd->wr_busy_queues); | |
2228 | ||
36eca894 AA |
2229 | /* |
2230 | * Merge queues (that is, let bic redirect its requests to new_bfqq) | |
2231 | */ | |
2232 | bic_set_bfqq(bic, new_bfqq, 1); | |
2233 | bfq_mark_bfqq_coop(new_bfqq); | |
2234 | /* | |
2235 | * new_bfqq now belongs to at least two bics (it is a shared queue): | |
2236 | * set new_bfqq->bic to NULL. bfqq either: | |
2237 | * - does not belong to any bic any more, and hence bfqq->bic must | |
2238 | * be set to NULL, or | |
2239 | * - is a queue whose owning bics have already been redirected to a | |
2240 | * different queue, hence the queue is destined to not belong to | |
2241 | * any bic soon and bfqq->bic is already NULL (therefore the next | |
2242 | * assignment causes no harm). | |
2243 | */ | |
2244 | new_bfqq->bic = NULL; | |
2245 | bfqq->bic = NULL; | |
2246 | /* release process reference to bfqq */ | |
2247 | bfq_put_queue(bfqq); | |
2248 | } | |
2249 | ||
aee69d78 PV |
2250 | static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, |
2251 | struct bio *bio) | |
2252 | { | |
2253 | struct bfq_data *bfqd = q->elevator->elevator_data; | |
2254 | bool is_sync = op_is_sync(bio->bi_opf); | |
36eca894 | 2255 | struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; |
aee69d78 PV |
2256 | |
2257 | /* | |
2258 | * Disallow merge of a sync bio into an async request. | |
2259 | */ | |
2260 | if (is_sync && !rq_is_sync(rq)) | |
2261 | return false; | |
2262 | ||
2263 | /* | |
2264 | * Lookup the bfqq that this bio will be queued with. Allow | |
2265 | * merge only if rq is queued there. | |
2266 | */ | |
2267 | if (!bfqq) | |
2268 | return false; | |
2269 | ||
36eca894 AA |
2270 | /* |
2271 | * We take advantage of this function to perform an early merge | |
2272 | * of the queues of possible cooperating processes. | |
2273 | */ | |
2274 | new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); | |
2275 | if (new_bfqq) { | |
2276 | /* | |
2277 | * bic still points to bfqq, then it has not yet been | |
2278 | * redirected to some other bfq_queue, and a queue | |
2279 | * merge beween bfqq and new_bfqq can be safely | |
2280 | * fulfillled, i.e., bic can be redirected to new_bfqq | |
2281 | * and bfqq can be put. | |
2282 | */ | |
2283 | bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, | |
2284 | new_bfqq); | |
2285 | /* | |
2286 | * If we get here, bio will be queued into new_queue, | |
2287 | * so use new_bfqq to decide whether bio and rq can be | |
2288 | * merged. | |
2289 | */ | |
2290 | bfqq = new_bfqq; | |
2291 | ||
2292 | /* | |
2293 | * Change also bqfd->bio_bfqq, as | |
2294 | * bfqd->bio_bic now points to new_bfqq, and | |
2295 | * this function may be invoked again (and then may | |
2296 | * use again bqfd->bio_bfqq). | |
2297 | */ | |
2298 | bfqd->bio_bfqq = bfqq; | |
2299 | } | |
2300 | ||
aee69d78 PV |
2301 | return bfqq == RQ_BFQQ(rq); |
2302 | } | |
2303 | ||
44e44a1b PV |
2304 | /* |
2305 | * Set the maximum time for the in-service queue to consume its | |
2306 | * budget. This prevents seeky processes from lowering the throughput. | |
2307 | * In practice, a time-slice service scheme is used with seeky | |
2308 | * processes. | |
2309 | */ | |
2310 | static void bfq_set_budget_timeout(struct bfq_data *bfqd, | |
2311 | struct bfq_queue *bfqq) | |
2312 | { | |
77b7dcea PV |
2313 | unsigned int timeout_coeff; |
2314 | ||
2315 | if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) | |
2316 | timeout_coeff = 1; | |
2317 | else | |
2318 | timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; | |
2319 | ||
44e44a1b PV |
2320 | bfqd->last_budget_start = ktime_get(); |
2321 | ||
2322 | bfqq->budget_timeout = jiffies + | |
77b7dcea | 2323 | bfqd->bfq_timeout * timeout_coeff; |
44e44a1b PV |
2324 | } |
2325 | ||
aee69d78 PV |
2326 | static void __bfq_set_in_service_queue(struct bfq_data *bfqd, |
2327 | struct bfq_queue *bfqq) | |
2328 | { | |
2329 | if (bfqq) { | |
aee69d78 PV |
2330 | bfq_clear_bfqq_fifo_expire(bfqq); |
2331 | ||
2332 | bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8; | |
2333 | ||
77b7dcea PV |
2334 | if (time_is_before_jiffies(bfqq->last_wr_start_finish) && |
2335 | bfqq->wr_coeff > 1 && | |
2336 | bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && | |
2337 | time_is_before_jiffies(bfqq->budget_timeout)) { | |
2338 | /* | |
2339 | * For soft real-time queues, move the start | |
2340 | * of the weight-raising period forward by the | |
2341 | * time the queue has not received any | |
2342 | * service. Otherwise, a relatively long | |
2343 | * service delay is likely to cause the | |
2344 | * weight-raising period of the queue to end, | |
2345 | * because of the short duration of the | |
2346 | * weight-raising period of a soft real-time | |
2347 | * queue. It is worth noting that this move | |
2348 | * is not so dangerous for the other queues, | |
2349 | * because soft real-time queues are not | |
2350 | * greedy. | |
2351 | * | |
2352 | * To not add a further variable, we use the | |
2353 | * overloaded field budget_timeout to | |
2354 | * determine for how long the queue has not | |
2355 | * received service, i.e., how much time has | |
2356 | * elapsed since the queue expired. However, | |
2357 | * this is a little imprecise, because | |
2358 | * budget_timeout is set to jiffies if bfqq | |
2359 | * not only expires, but also remains with no | |
2360 | * request. | |
2361 | */ | |
2362 | if (time_after(bfqq->budget_timeout, | |
2363 | bfqq->last_wr_start_finish)) | |
2364 | bfqq->last_wr_start_finish += | |
2365 | jiffies - bfqq->budget_timeout; | |
2366 | else | |
2367 | bfqq->last_wr_start_finish = jiffies; | |
2368 | } | |
2369 | ||
44e44a1b | 2370 | bfq_set_budget_timeout(bfqd, bfqq); |
aee69d78 PV |
2371 | bfq_log_bfqq(bfqd, bfqq, |
2372 | "set_in_service_queue, cur-budget = %d", | |
2373 | bfqq->entity.budget); | |
2374 | } | |
2375 | ||
2376 | bfqd->in_service_queue = bfqq; | |
2377 | } | |
2378 | ||
2379 | /* | |
2380 | * Get and set a new queue for service. | |
2381 | */ | |
2382 | static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) | |
2383 | { | |
2384 | struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); | |
2385 | ||
2386 | __bfq_set_in_service_queue(bfqd, bfqq); | |
2387 | return bfqq; | |
2388 | } | |
2389 | ||
aee69d78 PV |
2390 | static void bfq_arm_slice_timer(struct bfq_data *bfqd) |
2391 | { | |
2392 | struct bfq_queue *bfqq = bfqd->in_service_queue; | |
aee69d78 PV |
2393 | u32 sl; |
2394 | ||
aee69d78 PV |
2395 | bfq_mark_bfqq_wait_request(bfqq); |
2396 | ||
2397 | /* | |
2398 | * We don't want to idle for seeks, but we do want to allow | |
2399 | * fair distribution of slice time for a process doing back-to-back | |
2400 | * seeks. So allow a little bit of time for him to submit a new rq. | |
2401 | */ | |
2402 | sl = bfqd->bfq_slice_idle; | |
2403 | /* | |
1de0c4cd AA |
2404 | * Unless the queue is being weight-raised or the scenario is |
2405 | * asymmetric, grant only minimum idle time if the queue | |
2406 | * is seeky. A long idling is preserved for a weight-raised | |
2407 | * queue, or, more in general, in an asymmetric scenario, | |
2408 | * because a long idling is needed for guaranteeing to a queue | |
2409 | * its reserved share of the throughput (in particular, it is | |
2410 | * needed if the queue has a higher weight than some other | |
2411 | * queue). | |
aee69d78 | 2412 | */ |
1de0c4cd AA |
2413 | if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && |
2414 | bfq_symmetric_scenario(bfqd)) | |
aee69d78 PV |
2415 | sl = min_t(u64, sl, BFQ_MIN_TT); |
2416 | ||
2417 | bfqd->last_idling_start = ktime_get(); | |
2418 | hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), | |
2419 | HRTIMER_MODE_REL); | |
e21b7a0b | 2420 | bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); |
aee69d78 PV |
2421 | } |
2422 | ||
ab0e43e9 PV |
2423 | /* |
2424 | * In autotuning mode, max_budget is dynamically recomputed as the | |
2425 | * amount of sectors transferred in timeout at the estimated peak | |
2426 | * rate. This enables BFQ to utilize a full timeslice with a full | |
2427 | * budget, even if the in-service queue is served at peak rate. And | |
2428 | * this maximises throughput with sequential workloads. | |
2429 | */ | |
2430 | static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) | |
2431 | { | |
2432 | return (u64)bfqd->peak_rate * USEC_PER_MSEC * | |
2433 | jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; | |
2434 | } | |
2435 | ||
44e44a1b PV |
2436 | /* |
2437 | * Update parameters related to throughput and responsiveness, as a | |
2438 | * function of the estimated peak rate. See comments on | |
2439 | * bfq_calc_max_budget(), and on T_slow and T_fast arrays. | |
2440 | */ | |
2441 | static void update_thr_responsiveness_params(struct bfq_data *bfqd) | |
2442 | { | |
2443 | int dev_type = blk_queue_nonrot(bfqd->queue); | |
2444 | ||
2445 | if (bfqd->bfq_user_max_budget == 0) | |
2446 | bfqd->bfq_max_budget = | |
2447 | bfq_calc_max_budget(bfqd); | |
2448 | ||
2449 | if (bfqd->device_speed == BFQ_BFQD_FAST && | |
2450 | bfqd->peak_rate < device_speed_thresh[dev_type]) { | |
2451 | bfqd->device_speed = BFQ_BFQD_SLOW; | |
2452 | bfqd->RT_prod = R_slow[dev_type] * | |
2453 | T_slow[dev_type]; | |
2454 | } else if (bfqd->device_speed == BFQ_BFQD_SLOW && | |
2455 | bfqd->peak_rate > device_speed_thresh[dev_type]) { | |
2456 | bfqd->device_speed = BFQ_BFQD_FAST; | |
2457 | bfqd->RT_prod = R_fast[dev_type] * | |
2458 | T_fast[dev_type]; | |
2459 | } | |
2460 | ||
2461 | bfq_log(bfqd, | |
2462 | "dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec", | |
2463 | dev_type == 0 ? "ROT" : "NONROT", | |
2464 | bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW", | |
2465 | bfqd->device_speed == BFQ_BFQD_FAST ? | |
2466 | (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT : | |
2467 | (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT, | |
2468 | (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>> | |
2469 | BFQ_RATE_SHIFT); | |
2470 | } | |
2471 | ||
ab0e43e9 PV |
2472 | static void bfq_reset_rate_computation(struct bfq_data *bfqd, |
2473 | struct request *rq) | |
2474 | { | |
2475 | if (rq != NULL) { /* new rq dispatch now, reset accordingly */ | |
2476 | bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns(); | |
2477 | bfqd->peak_rate_samples = 1; | |
2478 | bfqd->sequential_samples = 0; | |
2479 | bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = | |
2480 | blk_rq_sectors(rq); | |
2481 | } else /* no new rq dispatched, just reset the number of samples */ | |
2482 | bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ | |
2483 | ||
2484 | bfq_log(bfqd, | |
2485 | "reset_rate_computation at end, sample %u/%u tot_sects %llu", | |
2486 | bfqd->peak_rate_samples, bfqd->sequential_samples, | |
2487 | bfqd->tot_sectors_dispatched); | |
2488 | } | |
2489 | ||
2490 | static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) | |
2491 | { | |
2492 | u32 rate, weight, divisor; | |
2493 | ||
2494 | /* | |
2495 | * For the convergence property to hold (see comments on | |
2496 | * bfq_update_peak_rate()) and for the assessment to be | |
2497 | * reliable, a minimum number of samples must be present, and | |
2498 | * a minimum amount of time must have elapsed. If not so, do | |
2499 | * not compute new rate. Just reset parameters, to get ready | |
2500 | * for a new evaluation attempt. | |
2501 | */ | |
2502 | if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || | |
2503 | bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) | |
2504 | goto reset_computation; | |
2505 | ||
2506 | /* | |
2507 | * If a new request completion has occurred after last | |
2508 | * dispatch, then, to approximate the rate at which requests | |
2509 | * have been served by the device, it is more precise to | |
2510 | * extend the observation interval to the last completion. | |
2511 | */ | |
2512 | bfqd->delta_from_first = | |
2513 | max_t(u64, bfqd->delta_from_first, | |
2514 | bfqd->last_completion - bfqd->first_dispatch); | |
2515 | ||
2516 | /* | |
2517 | * Rate computed in sects/usec, and not sects/nsec, for | |
2518 | * precision issues. | |
2519 | */ | |
2520 | rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT, | |
2521 | div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); | |
2522 | ||
2523 | /* | |
2524 | * Peak rate not updated if: | |
2525 | * - the percentage of sequential dispatches is below 3/4 of the | |
2526 | * total, and rate is below the current estimated peak rate | |
2527 | * - rate is unreasonably high (> 20M sectors/sec) | |
2528 | */ | |
2529 | if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && | |
2530 | rate <= bfqd->peak_rate) || | |
2531 | rate > 20<<BFQ_RATE_SHIFT) | |
2532 | goto reset_computation; | |
2533 | ||
2534 | /* | |
2535 | * We have to update the peak rate, at last! To this purpose, | |
2536 | * we use a low-pass filter. We compute the smoothing constant | |
2537 | * of the filter as a function of the 'weight' of the new | |
2538 | * measured rate. | |
2539 | * | |
2540 | * As can be seen in next formulas, we define this weight as a | |
2541 | * quantity proportional to how sequential the workload is, | |
2542 | * and to how long the observation time interval is. | |
2543 | * | |
2544 | * The weight runs from 0 to 8. The maximum value of the | |
2545 | * weight, 8, yields the minimum value for the smoothing | |
2546 | * constant. At this minimum value for the smoothing constant, | |
2547 | * the measured rate contributes for half of the next value of | |
2548 | * the estimated peak rate. | |
2549 | * | |
2550 | * So, the first step is to compute the weight as a function | |
2551 | * of how sequential the workload is. Note that the weight | |
2552 | * cannot reach 9, because bfqd->sequential_samples cannot | |
2553 | * become equal to bfqd->peak_rate_samples, which, in its | |
2554 | * turn, holds true because bfqd->sequential_samples is not | |
2555 | * incremented for the first sample. | |
2556 | */ | |
2557 | weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; | |
2558 | ||
2559 | /* | |
2560 | * Second step: further refine the weight as a function of the | |
2561 | * duration of the observation interval. | |
2562 | */ | |
2563 | weight = min_t(u32, 8, | |
2564 | div_u64(weight * bfqd->delta_from_first, | |
2565 | BFQ_RATE_REF_INTERVAL)); | |
2566 | ||
2567 | /* | |
2568 | * Divisor ranging from 10, for minimum weight, to 2, for | |
2569 | * maximum weight. | |
2570 | */ | |
2571 | divisor = 10 - weight; | |
2572 | ||
2573 | /* | |
2574 | * Finally, update peak rate: | |
2575 | * | |
2576 | * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor | |
2577 | */ | |
2578 | bfqd->peak_rate *= divisor-1; | |
2579 | bfqd->peak_rate /= divisor; | |
2580 | rate /= divisor; /* smoothing constant alpha = 1/divisor */ | |
2581 | ||
2582 | bfqd->peak_rate += rate; | |
44e44a1b | 2583 | update_thr_responsiveness_params(bfqd); |
ab0e43e9 PV |
2584 | |
2585 | reset_computation: | |
2586 | bfq_reset_rate_computation(bfqd, rq); | |
2587 | } | |
2588 | ||
2589 | /* | |
2590 | * Update the read/write peak rate (the main quantity used for | |
2591 | * auto-tuning, see update_thr_responsiveness_params()). | |
2592 | * | |
2593 | * It is not trivial to estimate the peak rate (correctly): because of | |
2594 | * the presence of sw and hw queues between the scheduler and the | |
2595 | * device components that finally serve I/O requests, it is hard to | |
2596 | * say exactly when a given dispatched request is served inside the | |
2597 | * device, and for how long. As a consequence, it is hard to know | |
2598 | * precisely at what rate a given set of requests is actually served | |
2599 | * by the device. | |
2600 | * | |
2601 | * On the opposite end, the dispatch time of any request is trivially | |
2602 | * available, and, from this piece of information, the "dispatch rate" | |
2603 | * of requests can be immediately computed. So, the idea in the next | |
2604 | * function is to use what is known, namely request dispatch times | |
2605 | * (plus, when useful, request completion times), to estimate what is | |
2606 | * unknown, namely in-device request service rate. | |
2607 | * | |
2608 | * The main issue is that, because of the above facts, the rate at | |
2609 | * which a certain set of requests is dispatched over a certain time | |
2610 | * interval can vary greatly with respect to the rate at which the | |
2611 | * same requests are then served. But, since the size of any | |
2612 | * intermediate queue is limited, and the service scheme is lossless | |
2613 | * (no request is silently dropped), the following obvious convergence | |
2614 | * property holds: the number of requests dispatched MUST become | |
2615 | * closer and closer to the number of requests completed as the | |
2616 | * observation interval grows. This is the key property used in | |
2617 | * the next function to estimate the peak service rate as a function | |
2618 | * of the observed dispatch rate. The function assumes to be invoked | |
2619 | * on every request dispatch. | |
2620 | */ | |
2621 | static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) | |
2622 | { | |
2623 | u64 now_ns = ktime_get_ns(); | |
2624 | ||
2625 | if (bfqd->peak_rate_samples == 0) { /* first dispatch */ | |
2626 | bfq_log(bfqd, "update_peak_rate: goto reset, samples %d", | |
2627 | bfqd->peak_rate_samples); | |
2628 | bfq_reset_rate_computation(bfqd, rq); | |
2629 | goto update_last_values; /* will add one sample */ | |
2630 | } | |
2631 | ||
2632 | /* | |
2633 | * Device idle for very long: the observation interval lasting | |
2634 | * up to this dispatch cannot be a valid observation interval | |
2635 | * for computing a new peak rate (similarly to the late- | |
2636 | * completion event in bfq_completed_request()). Go to | |
2637 | * update_rate_and_reset to have the following three steps | |
2638 | * taken: | |
2639 | * - close the observation interval at the last (previous) | |
2640 | * request dispatch or completion | |
2641 | * - compute rate, if possible, for that observation interval | |
2642 | * - start a new observation interval with this dispatch | |
2643 | */ | |
2644 | if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && | |
2645 | bfqd->rq_in_driver == 0) | |
2646 | goto update_rate_and_reset; | |
2647 | ||
2648 | /* Update sampling information */ | |
2649 | bfqd->peak_rate_samples++; | |
2650 | ||
2651 | if ((bfqd->rq_in_driver > 0 || | |
2652 | now_ns - bfqd->last_completion < BFQ_MIN_TT) | |
2653 | && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR) | |
2654 | bfqd->sequential_samples++; | |
2655 | ||
2656 | bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); | |
2657 | ||
2658 | /* Reset max observed rq size every 32 dispatches */ | |
2659 | if (likely(bfqd->peak_rate_samples % 32)) | |
2660 | bfqd->last_rq_max_size = | |
2661 | max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); | |
2662 | else | |
2663 | bfqd->last_rq_max_size = blk_rq_sectors(rq); | |
2664 | ||
2665 | bfqd->delta_from_first = now_ns - bfqd->first_dispatch; | |
2666 | ||
2667 | /* Target observation interval not yet reached, go on sampling */ | |
2668 | if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) | |
2669 | goto update_last_values; | |
2670 | ||
2671 | update_rate_and_reset: | |
2672 | bfq_update_rate_reset(bfqd, rq); | |
2673 | update_last_values: | |
2674 | bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); | |
2675 | bfqd->last_dispatch = now_ns; | |
2676 | } | |
2677 | ||
aee69d78 PV |
2678 | /* |
2679 | * Remove request from internal lists. | |
2680 | */ | |
2681 | static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) | |
2682 | { | |
2683 | struct bfq_queue *bfqq = RQ_BFQQ(rq); | |
2684 | ||
2685 | /* | |
2686 | * For consistency, the next instruction should have been | |
2687 | * executed after removing the request from the queue and | |
2688 | * dispatching it. We execute instead this instruction before | |
2689 | * bfq_remove_request() (and hence introduce a temporary | |
2690 | * inconsistency), for efficiency. In fact, should this | |
2691 | * dispatch occur for a non in-service bfqq, this anticipated | |
2692 | * increment prevents two counters related to bfqq->dispatched | |
2693 | * from risking to be, first, uselessly decremented, and then | |
2694 | * incremented again when the (new) value of bfqq->dispatched | |
2695 | * happens to be taken into account. | |
2696 | */ | |
2697 | bfqq->dispatched++; | |
ab0e43e9 | 2698 | bfq_update_peak_rate(q->elevator->elevator_data, rq); |
aee69d78 PV |
2699 | |
2700 | bfq_remove_request(q, rq); | |
2701 | } | |
2702 | ||
2703 | static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) | |
2704 | { | |
36eca894 AA |
2705 | /* |
2706 | * If this bfqq is shared between multiple processes, check | |
2707 | * to make sure that those processes are still issuing I/Os | |
2708 | * within the mean seek distance. If not, it may be time to | |
2709 | * break the queues apart again. | |
2710 | */ | |
2711 | if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) | |
2712 | bfq_mark_bfqq_split_coop(bfqq); | |
2713 | ||
44e44a1b PV |
2714 | if (RB_EMPTY_ROOT(&bfqq->sort_list)) { |
2715 | if (bfqq->dispatched == 0) | |
2716 | /* | |
2717 | * Overloading budget_timeout field to store | |
2718 | * the time at which the queue remains with no | |
2719 | * backlog and no outstanding request; used by | |
2720 | * the weight-raising mechanism. | |
2721 | */ | |
2722 | bfqq->budget_timeout = jiffies; | |
2723 | ||
e21b7a0b | 2724 | bfq_del_bfqq_busy(bfqd, bfqq, true); |
36eca894 | 2725 | } else { |
80294c3b | 2726 | bfq_requeue_bfqq(bfqd, bfqq, true); |
36eca894 AA |
2727 | /* |
2728 | * Resort priority tree of potential close cooperators. | |
2729 | */ | |
2730 | bfq_pos_tree_add_move(bfqd, bfqq); | |
2731 | } | |
e21b7a0b AA |
2732 | |
2733 | /* | |
2734 | * All in-service entities must have been properly deactivated | |
2735 | * or requeued before executing the next function, which | |
2736 | * resets all in-service entites as no more in service. | |
2737 | */ | |
2738 | __bfq_bfqd_reset_in_service(bfqd); | |
aee69d78 PV |
2739 | } |
2740 | ||
2741 | /** | |
2742 | * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. | |
2743 | * @bfqd: device data. | |
2744 | * @bfqq: queue to update. | |
2745 | * @reason: reason for expiration. | |
2746 | * | |
2747 | * Handle the feedback on @bfqq budget at queue expiration. | |
2748 | * See the body for detailed comments. | |
2749 | */ | |
2750 | static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, | |
2751 | struct bfq_queue *bfqq, | |
2752 | enum bfqq_expiration reason) | |
2753 | { | |
2754 | struct request *next_rq; | |
2755 | int budget, min_budget; | |
2756 | ||
aee69d78 PV |
2757 | min_budget = bfq_min_budget(bfqd); |
2758 | ||
44e44a1b PV |
2759 | if (bfqq->wr_coeff == 1) |
2760 | budget = bfqq->max_budget; | |
2761 | else /* | |
2762 | * Use a constant, low budget for weight-raised queues, | |
2763 | * to help achieve a low latency. Keep it slightly higher | |
2764 | * than the minimum possible budget, to cause a little | |
2765 | * bit fewer expirations. | |
2766 | */ | |
2767 | budget = 2 * min_budget; | |
2768 | ||
aee69d78 PV |
2769 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", |
2770 | bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); | |
2771 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d", | |
2772 | budget, bfq_min_budget(bfqd)); | |
2773 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", | |
2774 | bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); | |
2775 | ||
44e44a1b | 2776 | if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { |
aee69d78 PV |
2777 | switch (reason) { |
2778 | /* | |
2779 | * Caveat: in all the following cases we trade latency | |
2780 | * for throughput. | |
2781 | */ | |
2782 | case BFQQE_TOO_IDLE: | |
54b60456 PV |
2783 | /* |
2784 | * This is the only case where we may reduce | |
2785 | * the budget: if there is no request of the | |
2786 | * process still waiting for completion, then | |
2787 | * we assume (tentatively) that the timer has | |
2788 | * expired because the batch of requests of | |
2789 | * the process could have been served with a | |
2790 | * smaller budget. Hence, betting that | |
2791 | * process will behave in the same way when it | |
2792 | * becomes backlogged again, we reduce its | |
2793 | * next budget. As long as we guess right, | |
2794 | * this budget cut reduces the latency | |
2795 | * experienced by the process. | |
2796 | * | |
2797 | * However, if there are still outstanding | |
2798 | * requests, then the process may have not yet | |
2799 | * issued its next request just because it is | |
2800 | * still waiting for the completion of some of | |
2801 | * the still outstanding ones. So in this | |
2802 | * subcase we do not reduce its budget, on the | |
2803 | * contrary we increase it to possibly boost | |
2804 | * the throughput, as discussed in the | |
2805 | * comments to the BUDGET_TIMEOUT case. | |
2806 | */ | |
2807 | if (bfqq->dispatched > 0) /* still outstanding reqs */ | |
2808 | budget = min(budget * 2, bfqd->bfq_max_budget); | |
2809 | else { | |
2810 | if (budget > 5 * min_budget) | |
2811 | budget -= 4 * min_budget; | |
2812 | else | |
2813 | budget = min_budget; | |
2814 | } | |
aee69d78 PV |
2815 | break; |
2816 | case BFQQE_BUDGET_TIMEOUT: | |
54b60456 PV |
2817 | /* |
2818 | * We double the budget here because it gives | |
2819 | * the chance to boost the throughput if this | |
2820 | * is not a seeky process (and has bumped into | |
2821 | * this timeout because of, e.g., ZBR). | |
2822 | */ | |
2823 | budget = min(budget * 2, bfqd->bfq_max_budget); | |
aee69d78 PV |
2824 | break; |
2825 | case BFQQE_BUDGET_EXHAUSTED: | |
2826 | /* | |
2827 | * The process still has backlog, and did not | |
2828 | * let either the budget timeout or the disk | |
2829 | * idling timeout expire. Hence it is not | |
2830 | * seeky, has a short thinktime and may be | |
2831 | * happy with a higher budget too. So | |
2832 | * definitely increase the budget of this good | |
2833 | * candidate to boost the disk throughput. | |
2834 | */ | |
54b60456 | 2835 | budget = min(budget * 4, bfqd->bfq_max_budget); |
aee69d78 PV |
2836 | break; |
2837 | case BFQQE_NO_MORE_REQUESTS: | |
2838 | /* | |
2839 | * For queues that expire for this reason, it | |
2840 | * is particularly important to keep the | |
2841 | * budget close to the actual service they | |
2842 | * need. Doing so reduces the timestamp | |
2843 | * misalignment problem described in the | |
2844 | * comments in the body of | |
2845 | * __bfq_activate_entity. In fact, suppose | |
2846 | * that a queue systematically expires for | |
2847 | * BFQQE_NO_MORE_REQUESTS and presents a | |
2848 | * new request in time to enjoy timestamp | |
2849 | * back-shifting. The larger the budget of the | |
2850 | * queue is with respect to the service the | |
2851 | * queue actually requests in each service | |
2852 | * slot, the more times the queue can be | |
2853 | * reactivated with the same virtual finish | |
2854 | * time. It follows that, even if this finish | |
2855 | * time is pushed to the system virtual time | |
2856 | * to reduce the consequent timestamp | |
2857 | * misalignment, the queue unjustly enjoys for | |
2858 | * many re-activations a lower finish time | |
2859 | * than all newly activated queues. | |
2860 | * | |
2861 | * The service needed by bfqq is measured | |
2862 | * quite precisely by bfqq->entity.service. | |
2863 | * Since bfqq does not enjoy device idling, | |
2864 | * bfqq->entity.service is equal to the number | |
2865 | * of sectors that the process associated with | |
2866 | * bfqq requested to read/write before waiting | |
2867 | * for request completions, or blocking for | |
2868 | * other reasons. | |
2869 | */ | |
2870 | budget = max_t(int, bfqq->entity.service, min_budget); | |
2871 | break; | |
2872 | default: | |
2873 | return; | |
2874 | } | |
44e44a1b | 2875 | } else if (!bfq_bfqq_sync(bfqq)) { |
aee69d78 PV |
2876 | /* |
2877 | * Async queues get always the maximum possible | |
2878 | * budget, as for them we do not care about latency | |
2879 | * (in addition, their ability to dispatch is limited | |
2880 | * by the charging factor). | |
2881 | */ | |
2882 | budget = bfqd->bfq_max_budget; | |
2883 | } | |
2884 | ||
2885 | bfqq->max_budget = budget; | |
2886 | ||
2887 | if (bfqd->budgets_assigned >= bfq_stats_min_budgets && | |
2888 | !bfqd->bfq_user_max_budget) | |
2889 | bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); | |
2890 | ||
2891 | /* | |
2892 | * If there is still backlog, then assign a new budget, making | |
2893 | * sure that it is large enough for the next request. Since | |
2894 | * the finish time of bfqq must be kept in sync with the | |
2895 | * budget, be sure to call __bfq_bfqq_expire() *after* this | |
2896 | * update. | |
2897 | * | |
2898 | * If there is no backlog, then no need to update the budget; | |
2899 | * it will be updated on the arrival of a new request. | |
2900 | */ | |
2901 | next_rq = bfqq->next_rq; | |
2902 | if (next_rq) | |
2903 | bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, | |
2904 | bfq_serv_to_charge(next_rq, bfqq)); | |
2905 | ||
2906 | bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d", | |
2907 | next_rq ? blk_rq_sectors(next_rq) : 0, | |
2908 | bfqq->entity.budget); | |
2909 | } | |
2910 | ||
aee69d78 | 2911 | /* |
ab0e43e9 PV |
2912 | * Return true if the process associated with bfqq is "slow". The slow |
2913 | * flag is used, in addition to the budget timeout, to reduce the | |
2914 | * amount of service provided to seeky processes, and thus reduce | |
2915 | * their chances to lower the throughput. More details in the comments | |
2916 | * on the function bfq_bfqq_expire(). | |
2917 | * | |
2918 | * An important observation is in order: as discussed in the comments | |
2919 | * on the function bfq_update_peak_rate(), with devices with internal | |
2920 | * queues, it is hard if ever possible to know when and for how long | |
2921 | * an I/O request is processed by the device (apart from the trivial | |
2922 | * I/O pattern where a new request is dispatched only after the | |
2923 | * previous one has been completed). This makes it hard to evaluate | |
2924 | * the real rate at which the I/O requests of each bfq_queue are | |
2925 | * served. In fact, for an I/O scheduler like BFQ, serving a | |
2926 | * bfq_queue means just dispatching its requests during its service | |
2927 | * slot (i.e., until the budget of the queue is exhausted, or the | |
2928 | * queue remains idle, or, finally, a timeout fires). But, during the | |
2929 | * service slot of a bfq_queue, around 100 ms at most, the device may | |
2930 | * be even still processing requests of bfq_queues served in previous | |
2931 | * service slots. On the opposite end, the requests of the in-service | |
2932 | * bfq_queue may be completed after the service slot of the queue | |
2933 | * finishes. | |
2934 | * | |
2935 | * Anyway, unless more sophisticated solutions are used | |
2936 | * (where possible), the sum of the sizes of the requests dispatched | |
2937 | * during the service slot of a bfq_queue is probably the only | |
2938 | * approximation available for the service received by the bfq_queue | |
2939 | * during its service slot. And this sum is the quantity used in this | |
2940 | * function to evaluate the I/O speed of a process. | |
aee69d78 | 2941 | */ |
ab0e43e9 PV |
2942 | static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
2943 | bool compensate, enum bfqq_expiration reason, | |
2944 | unsigned long *delta_ms) | |
aee69d78 | 2945 | { |
ab0e43e9 PV |
2946 | ktime_t delta_ktime; |
2947 | u32 delta_usecs; | |
2948 | bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */ | |
aee69d78 | 2949 | |
ab0e43e9 | 2950 | if (!bfq_bfqq_sync(bfqq)) |
aee69d78 PV |
2951 | return false; |
2952 | ||
2953 | if (compensate) | |
ab0e43e9 | 2954 | delta_ktime = bfqd->last_idling_start; |
aee69d78 | 2955 | else |
ab0e43e9 PV |
2956 | delta_ktime = ktime_get(); |
2957 | delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); | |
2958 | delta_usecs = ktime_to_us(delta_ktime); | |
aee69d78 PV |
2959 | |
2960 | /* don't use too short time intervals */ | |
ab0e43e9 PV |
2961 | if (delta_usecs < 1000) { |
2962 | if (blk_queue_nonrot(bfqd->queue)) | |
2963 | /* | |
2964 | * give same worst-case guarantees as idling | |
2965 | * for seeky | |
2966 | */ | |
2967 | *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; | |
2968 | else /* charge at least one seek */ | |
2969 | *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; | |
2970 | ||
2971 | return slow; | |
2972 | } | |
aee69d78 | 2973 | |
ab0e43e9 | 2974 | *delta_ms = delta_usecs / USEC_PER_MSEC; |
aee69d78 PV |
2975 | |
2976 | /* | |
ab0e43e9 PV |
2977 | * Use only long (> 20ms) intervals to filter out excessive |
2978 | * spikes in service rate estimation. | |
aee69d78 | 2979 | */ |
ab0e43e9 PV |
2980 | if (delta_usecs > 20000) { |
2981 | /* | |
2982 | * Caveat for rotational devices: processes doing I/O | |
2983 | * in the slower disk zones tend to be slow(er) even | |
2984 | * if not seeky. In this respect, the estimated peak | |
2985 | * rate is likely to be an average over the disk | |
2986 | * surface. Accordingly, to not be too harsh with | |
2987 | * unlucky processes, a process is deemed slow only if | |
2988 | * its rate has been lower than half of the estimated | |
2989 | * peak rate. | |
2990 | */ | |
2991 | slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; | |
aee69d78 PV |
2992 | } |
2993 | ||
ab0e43e9 | 2994 | bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); |
aee69d78 | 2995 | |
ab0e43e9 | 2996 | return slow; |
aee69d78 PV |
2997 | } |
2998 | ||
77b7dcea PV |
2999 | /* |
3000 | * To be deemed as soft real-time, an application must meet two | |
3001 | * requirements. First, the application must not require an average | |
3002 | * bandwidth higher than the approximate bandwidth required to playback or | |
3003 | * record a compressed high-definition video. | |
3004 | * The next function is invoked on the completion of the last request of a | |
3005 | * batch, to compute the next-start time instant, soft_rt_next_start, such | |
3006 | * that, if the next request of the application does not arrive before | |
3007 | * soft_rt_next_start, then the above requirement on the bandwidth is met. | |
3008 | * | |
3009 | * The second requirement is that the request pattern of the application is | |
3010 | * isochronous, i.e., that, after issuing a request or a batch of requests, | |
3011 | * the application stops issuing new requests until all its pending requests | |
3012 | * have been completed. After that, the application may issue a new batch, | |
3013 | * and so on. | |
3014 | * For this reason the next function is invoked to compute | |
3015 | * soft_rt_next_start only for applications that meet this requirement, | |
3016 | * whereas soft_rt_next_start is set to infinity for applications that do | |
3017 | * not. | |
3018 | * | |
a34b0244 PV |
3019 | * Unfortunately, even a greedy (i.e., I/O-bound) application may |
3020 | * happen to meet, occasionally or systematically, both the above | |
3021 | * bandwidth and isochrony requirements. This may happen at least in | |
3022 | * the following circumstances. First, if the CPU load is high. The | |
3023 | * application may stop issuing requests while the CPUs are busy | |
3024 | * serving other processes, then restart, then stop again for a while, | |
3025 | * and so on. The other circumstances are related to the storage | |
3026 | * device: the storage device is highly loaded or reaches a low-enough | |
3027 | * throughput with the I/O of the application (e.g., because the I/O | |
3028 | * is random and/or the device is slow). In all these cases, the | |
3029 | * I/O of the application may be simply slowed down enough to meet | |
3030 | * the bandwidth and isochrony requirements. To reduce the probability | |
3031 | * that greedy applications are deemed as soft real-time in these | |
3032 | * corner cases, a further rule is used in the computation of | |
3033 | * soft_rt_next_start: the return value of this function is forced to | |
3034 | * be higher than the maximum between the following two quantities. | |
3035 | * | |
3036 | * (a) Current time plus: (1) the maximum time for which the arrival | |
3037 | * of a request is waited for when a sync queue becomes idle, | |
3038 | * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We | |
3039 | * postpone for a moment the reason for adding a few extra | |
3040 | * jiffies; we get back to it after next item (b). Lower-bounding | |
3041 | * the return value of this function with the current time plus | |
3042 | * bfqd->bfq_slice_idle tends to filter out greedy applications, | |
3043 | * because the latter issue their next request as soon as possible | |
3044 | * after the last one has been completed. In contrast, a soft | |
3045 | * real-time application spends some time processing data, after a | |
3046 | * batch of its requests has been completed. | |
3047 | * | |
3048 | * (b) Current value of bfqq->soft_rt_next_start. As pointed out | |
3049 | * above, greedy applications may happen to meet both the | |
3050 | * bandwidth and isochrony requirements under heavy CPU or | |
3051 | * storage-device load. In more detail, in these scenarios, these | |
3052 | * applications happen, only for limited time periods, to do I/O | |
3053 | * slowly enough to meet all the requirements described so far, | |
3054 | * including the filtering in above item (a). These slow-speed | |
3055 | * time intervals are usually interspersed between other time | |
3056 | * intervals during which these applications do I/O at a very high | |
3057 | * speed. Fortunately, exactly because of the high speed of the | |
3058 | * I/O in the high-speed intervals, the values returned by this | |
3059 | * function happen to be so high, near the end of any such | |
3060 | * high-speed interval, to be likely to fall *after* the end of | |
3061 | * the low-speed time interval that follows. These high values are | |
3062 | * stored in bfqq->soft_rt_next_start after each invocation of | |
3063 | * this function. As a consequence, if the last value of | |
3064 | * bfqq->soft_rt_next_start is constantly used to lower-bound the | |
3065 | * next value that this function may return, then, from the very | |
3066 | * beginning of a low-speed interval, bfqq->soft_rt_next_start is | |
3067 | * likely to be constantly kept so high that any I/O request | |
3068 | * issued during the low-speed interval is considered as arriving | |
3069 | * to soon for the application to be deemed as soft | |
3070 | * real-time. Then, in the high-speed interval that follows, the | |
3071 | * application will not be deemed as soft real-time, just because | |
3072 | * it will do I/O at a high speed. And so on. | |
3073 | * | |
3074 | * Getting back to the filtering in item (a), in the following two | |
3075 | * cases this filtering might be easily passed by a greedy | |
3076 | * application, if the reference quantity was just | |
3077 | * bfqd->bfq_slice_idle: | |
3078 | * 1) HZ is so low that the duration of a jiffy is comparable to or | |
3079 | * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow | |
3080 | * devices with HZ=100. The time granularity may be so coarse | |
3081 | * that the approximation, in jiffies, of bfqd->bfq_slice_idle | |
3082 | * is rather lower than the exact value. | |
77b7dcea PV |
3083 | * 2) jiffies, instead of increasing at a constant rate, may stop increasing |
3084 | * for a while, then suddenly 'jump' by several units to recover the lost | |
3085 | * increments. This seems to happen, e.g., inside virtual machines. | |
a34b0244 PV |
3086 | * To address this issue, in the filtering in (a) we do not use as a |
3087 | * reference time interval just bfqd->bfq_slice_idle, but | |
3088 | * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the | |
3089 | * minimum number of jiffies for which the filter seems to be quite | |
3090 | * precise also in embedded systems and KVM/QEMU virtual machines. | |
77b7dcea PV |
3091 | */ |
3092 | static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, | |
3093 | struct bfq_queue *bfqq) | |
3094 | { | |
a34b0244 PV |
3095 | return max3(bfqq->soft_rt_next_start, |
3096 | bfqq->last_idle_bklogged + | |
3097 | HZ * bfqq->service_from_backlogged / | |
3098 | bfqd->bfq_wr_max_softrt_rate, | |
3099 | jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); | |
77b7dcea PV |
3100 | } |
3101 | ||
aee69d78 PV |
3102 | /** |
3103 | * bfq_bfqq_expire - expire a queue. | |
3104 | * @bfqd: device owning the queue. | |
3105 | * @bfqq: the queue to expire. | |
3106 | * @compensate: if true, compensate for the time spent idling. | |
3107 | * @reason: the reason causing the expiration. | |
3108 | * | |
c074170e PV |
3109 | * If the process associated with bfqq does slow I/O (e.g., because it |
3110 | * issues random requests), we charge bfqq with the time it has been | |
3111 | * in service instead of the service it has received (see | |
3112 | * bfq_bfqq_charge_time for details on how this goal is achieved). As | |
3113 | * a consequence, bfqq will typically get higher timestamps upon | |
3114 | * reactivation, and hence it will be rescheduled as if it had | |
3115 | * received more service than what it has actually received. In the | |
3116 | * end, bfqq receives less service in proportion to how slowly its | |
3117 | * associated process consumes its budgets (and hence how seriously it | |
3118 | * tends to lower the throughput). In addition, this time-charging | |
3119 | * strategy guarantees time fairness among slow processes. In | |
3120 | * contrast, if the process associated with bfqq is not slow, we | |
3121 | * charge bfqq exactly with the service it has received. | |
aee69d78 | 3122 | * |
c074170e PV |
3123 | * Charging time to the first type of queues and the exact service to |
3124 | * the other has the effect of using the WF2Q+ policy to schedule the | |
3125 | * former on a timeslice basis, without violating service domain | |
3126 | * guarantees among the latter. | |
aee69d78 | 3127 | */ |
ea25da48 PV |
3128 | void bfq_bfqq_expire(struct bfq_data *bfqd, |
3129 | struct bfq_queue *bfqq, | |
3130 | bool compensate, | |
3131 | enum bfqq_expiration reason) | |
aee69d78 PV |
3132 | { |
3133 | bool slow; | |
ab0e43e9 PV |
3134 | unsigned long delta = 0; |
3135 | struct bfq_entity *entity = &bfqq->entity; | |
aee69d78 PV |
3136 | int ref; |
3137 | ||
3138 | /* | |
ab0e43e9 | 3139 | * Check whether the process is slow (see bfq_bfqq_is_slow). |
aee69d78 | 3140 | */ |
ab0e43e9 | 3141 | slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); |
aee69d78 PV |
3142 | |
3143 | /* | |
c074170e PV |
3144 | * As above explained, charge slow (typically seeky) and |
3145 | * timed-out queues with the time and not the service | |
3146 | * received, to favor sequential workloads. | |
3147 | * | |
3148 | * Processes doing I/O in the slower disk zones will tend to | |
3149 | * be slow(er) even if not seeky. Therefore, since the | |
3150 | * estimated peak rate is actually an average over the disk | |
3151 | * surface, these processes may timeout just for bad luck. To | |
3152 | * avoid punishing them, do not charge time to processes that | |
3153 | * succeeded in consuming at least 2/3 of their budget. This | |
3154 | * allows BFQ to preserve enough elasticity to still perform | |
3155 | * bandwidth, and not time, distribution with little unlucky | |
3156 | * or quasi-sequential processes. | |
aee69d78 | 3157 | */ |
44e44a1b PV |
3158 | if (bfqq->wr_coeff == 1 && |
3159 | (slow || | |
3160 | (reason == BFQQE_BUDGET_TIMEOUT && | |
3161 | bfq_bfqq_budget_left(bfqq) >= entity->budget / 3))) | |
c074170e | 3162 | bfq_bfqq_charge_time(bfqd, bfqq, delta); |
aee69d78 PV |
3163 | |
3164 | if (reason == BFQQE_TOO_IDLE && | |
ab0e43e9 | 3165 | entity->service <= 2 * entity->budget / 10) |
aee69d78 PV |
3166 | bfq_clear_bfqq_IO_bound(bfqq); |
3167 | ||
44e44a1b PV |
3168 | if (bfqd->low_latency && bfqq->wr_coeff == 1) |
3169 | bfqq->last_wr_start_finish = jiffies; | |
3170 | ||
77b7dcea PV |
3171 | if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && |
3172 | RB_EMPTY_ROOT(&bfqq->sort_list)) { | |
3173 | /* | |
3174 | * If we get here, and there are no outstanding | |
3175 | * requests, then the request pattern is isochronous | |
3176 | * (see the comments on the function | |
3177 | * bfq_bfqq_softrt_next_start()). Thus we can compute | |
3178 | * soft_rt_next_start. If, instead, the queue still | |
3179 | * has outstanding requests, then we have to wait for | |
3180 | * the completion of all the outstanding requests to | |
3181 | * discover whether the request pattern is actually | |
3182 | * isochronous. | |
3183 | */ | |
3184 | if (bfqq->dispatched == 0) | |
3185 | bfqq->soft_rt_next_start = | |
3186 | bfq_bfqq_softrt_next_start(bfqd, bfqq); | |
3187 | else { | |
3188 | /* | |
3189 | * The application is still waiting for the | |
3190 | * completion of one or more requests: | |
3191 | * prevent it from possibly being incorrectly | |
3192 | * deemed as soft real-time by setting its | |
3193 | * soft_rt_next_start to infinity. In fact, | |
3194 | * without this assignment, the application | |
3195 | * would be incorrectly deemed as soft | |
3196 | * real-time if: | |
3197 | * 1) it issued a new request before the | |
3198 | * completion of all its in-flight | |
3199 | * requests, and | |
3200 | * 2) at that time, its soft_rt_next_start | |
3201 | * happened to be in the past. | |
3202 | */ | |
3203 | bfqq->soft_rt_next_start = | |
3204 | bfq_greatest_from_now(); | |
3205 | /* | |
3206 | * Schedule an update of soft_rt_next_start to when | |
3207 | * the task may be discovered to be isochronous. | |
3208 | */ | |
3209 | bfq_mark_bfqq_softrt_update(bfqq); | |
3210 | } | |
3211 | } | |
3212 | ||
aee69d78 | 3213 | bfq_log_bfqq(bfqd, bfqq, |
d5be3fef PV |
3214 | "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason, |
3215 | slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq)); | |
aee69d78 PV |
3216 | |
3217 | /* | |
3218 | * Increase, decrease or leave budget unchanged according to | |
3219 | * reason. | |
3220 | */ | |
3221 | __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); | |
3222 | ref = bfqq->ref; | |
3223 | __bfq_bfqq_expire(bfqd, bfqq); | |
3224 | ||
3225 | /* mark bfqq as waiting a request only if a bic still points to it */ | |
3226 | if (ref > 1 && !bfq_bfqq_busy(bfqq) && | |
3227 | reason != BFQQE_BUDGET_TIMEOUT && | |
3228 | reason != BFQQE_BUDGET_EXHAUSTED) | |
3229 | bfq_mark_bfqq_non_blocking_wait_rq(bfqq); | |
3230 | } | |
3231 | ||
3232 | /* | |
3233 | * Budget timeout is not implemented through a dedicated timer, but | |
3234 | * just checked on request arrivals and completions, as well as on | |
3235 | * idle timer expirations. | |
3236 | */ | |
3237 | static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) | |
3238 | { | |
44e44a1b | 3239 | return time_is_before_eq_jiffies(bfqq->budget_timeout); |
aee69d78 PV |
3240 | } |
3241 | ||
3242 | /* | |
3243 | * If we expire a queue that is actively waiting (i.e., with the | |
3244 | * device idled) for the arrival of a new request, then we may incur | |
3245 | * the timestamp misalignment problem described in the body of the | |
3246 | * function __bfq_activate_entity. Hence we return true only if this | |
3247 | * condition does not hold, or if the queue is slow enough to deserve | |
3248 | * only to be kicked off for preserving a high throughput. | |
3249 | */ | |
3250 | static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) | |
3251 | { | |
3252 | bfq_log_bfqq(bfqq->bfqd, bfqq, | |
3253 | "may_budget_timeout: wait_request %d left %d timeout %d", | |
3254 | bfq_bfqq_wait_request(bfqq), | |
3255 | bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, | |
3256 | bfq_bfqq_budget_timeout(bfqq)); | |
3257 | ||
3258 | return (!bfq_bfqq_wait_request(bfqq) || | |
3259 | bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) | |
3260 | && | |
3261 | bfq_bfqq_budget_timeout(bfqq); | |
3262 | } | |
3263 | ||
3264 | /* | |
3265 | * For a queue that becomes empty, device idling is allowed only if | |
44e44a1b PV |
3266 | * this function returns true for the queue. As a consequence, since |
3267 | * device idling plays a critical role in both throughput boosting and | |
3268 | * service guarantees, the return value of this function plays a | |
3269 | * critical role in both these aspects as well. | |
3270 | * | |
3271 | * In a nutshell, this function returns true only if idling is | |
3272 | * beneficial for throughput or, even if detrimental for throughput, | |
3273 | * idling is however necessary to preserve service guarantees (low | |
3274 | * latency, desired throughput distribution, ...). In particular, on | |
3275 | * NCQ-capable devices, this function tries to return false, so as to | |
3276 | * help keep the drives' internal queues full, whenever this helps the | |
3277 | * device boost the throughput without causing any service-guarantee | |
3278 | * issue. | |
3279 | * | |
3280 | * In more detail, the return value of this function is obtained by, | |
3281 | * first, computing a number of boolean variables that take into | |
3282 | * account throughput and service-guarantee issues, and, then, | |
3283 | * combining these variables in a logical expression. Most of the | |
3284 | * issues taken into account are not trivial. We discuss these issues | |
3285 | * individually while introducing the variables. | |
aee69d78 PV |
3286 | */ |
3287 | static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) | |
3288 | { | |
3289 | struct bfq_data *bfqd = bfqq->bfqd; | |
edaf9428 PV |
3290 | bool rot_without_queueing = |
3291 | !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, | |
3292 | bfqq_sequential_and_IO_bound, | |
3293 | idling_boosts_thr, idling_boosts_thr_without_issues, | |
e1b2324d | 3294 | idling_needed_for_service_guarantees, |
cfd69712 | 3295 | asymmetric_scenario; |
aee69d78 PV |
3296 | |
3297 | if (bfqd->strict_guarantees) | |
3298 | return true; | |
3299 | ||
d5be3fef PV |
3300 | /* |
3301 | * Idling is performed only if slice_idle > 0. In addition, we | |
3302 | * do not idle if | |
3303 | * (a) bfqq is async | |
3304 | * (b) bfqq is in the idle io prio class: in this case we do | |
3305 | * not idle because we want to minimize the bandwidth that | |
3306 | * queues in this class can steal to higher-priority queues | |
3307 | */ | |
3308 | if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || | |
3309 | bfq_class_idle(bfqq)) | |
3310 | return false; | |
3311 | ||
edaf9428 PV |
3312 | bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && |
3313 | bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); | |
3314 | ||
aee69d78 | 3315 | /* |
44e44a1b PV |
3316 | * The next variable takes into account the cases where idling |
3317 | * boosts the throughput. | |
3318 | * | |
e01eff01 PV |
3319 | * The value of the variable is computed considering, first, that |
3320 | * idling is virtually always beneficial for the throughput if: | |
edaf9428 PV |
3321 | * (a) the device is not NCQ-capable and rotational, or |
3322 | * (b) regardless of the presence of NCQ, the device is rotational and | |
3323 | * the request pattern for bfqq is I/O-bound and sequential, or | |
3324 | * (c) regardless of whether it is rotational, the device is | |
3325 | * not NCQ-capable and the request pattern for bfqq is | |
3326 | * I/O-bound and sequential. | |
bf2b79e7 PV |
3327 | * |
3328 | * Secondly, and in contrast to the above item (b), idling an | |
3329 | * NCQ-capable flash-based device would not boost the | |
e01eff01 | 3330 | * throughput even with sequential I/O; rather it would lower |
bf2b79e7 PV |
3331 | * the throughput in proportion to how fast the device |
3332 | * is. Accordingly, the next variable is true if any of the | |
edaf9428 PV |
3333 | * above conditions (a), (b) or (c) is true, and, in |
3334 | * particular, happens to be false if bfqd is an NCQ-capable | |
3335 | * flash-based device. | |
aee69d78 | 3336 | */ |
edaf9428 PV |
3337 | idling_boosts_thr = rot_without_queueing || |
3338 | ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && | |
3339 | bfqq_sequential_and_IO_bound); | |
aee69d78 | 3340 | |
cfd69712 PV |
3341 | /* |
3342 | * The value of the next variable, | |
3343 | * idling_boosts_thr_without_issues, is equal to that of | |
3344 | * idling_boosts_thr, unless a special case holds. In this | |
3345 | * special case, described below, idling may cause problems to | |
3346 | * weight-raised queues. | |
3347 | * | |
3348 | * When the request pool is saturated (e.g., in the presence | |
3349 | * of write hogs), if the processes associated with | |
3350 | * non-weight-raised queues ask for requests at a lower rate, | |
3351 | * then processes associated with weight-raised queues have a | |
3352 | * higher probability to get a request from the pool | |
3353 | * immediately (or at least soon) when they need one. Thus | |
3354 | * they have a higher probability to actually get a fraction | |
3355 | * of the device throughput proportional to their high | |
3356 | * weight. This is especially true with NCQ-capable drives, | |
3357 | * which enqueue several requests in advance, and further | |
3358 | * reorder internally-queued requests. | |
3359 | * | |
3360 | * For this reason, we force to false the value of | |
3361 | * idling_boosts_thr_without_issues if there are weight-raised | |
3362 | * busy queues. In this case, and if bfqq is not weight-raised, | |
3363 | * this guarantees that the device is not idled for bfqq (if, | |
3364 | * instead, bfqq is weight-raised, then idling will be | |
3365 | * guaranteed by another variable, see below). Combined with | |
3366 | * the timestamping rules of BFQ (see [1] for details), this | |
3367 | * behavior causes bfqq, and hence any sync non-weight-raised | |
3368 | * queue, to get a lower number of requests served, and thus | |
3369 | * to ask for a lower number of requests from the request | |
3370 | * pool, before the busy weight-raised queues get served | |
3371 | * again. This often mitigates starvation problems in the | |
3372 | * presence of heavy write workloads and NCQ, thereby | |
3373 | * guaranteeing a higher application and system responsiveness | |
3374 | * in these hostile scenarios. | |
3375 | */ | |
3376 | idling_boosts_thr_without_issues = idling_boosts_thr && | |
3377 | bfqd->wr_busy_queues == 0; | |
3378 | ||
aee69d78 | 3379 | /* |
bf2b79e7 PV |
3380 | * There is then a case where idling must be performed not |
3381 | * for throughput concerns, but to preserve service | |
3382 | * guarantees. | |
3383 | * | |
3384 | * To introduce this case, we can note that allowing the drive | |
3385 | * to enqueue more than one request at a time, and hence | |
44e44a1b | 3386 | * delegating de facto final scheduling decisions to the |
bf2b79e7 | 3387 | * drive's internal scheduler, entails loss of control on the |
44e44a1b | 3388 | * actual request service order. In particular, the critical |
bf2b79e7 | 3389 | * situation is when requests from different processes happen |
44e44a1b PV |
3390 | * to be present, at the same time, in the internal queue(s) |
3391 | * of the drive. In such a situation, the drive, by deciding | |
3392 | * the service order of the internally-queued requests, does | |
3393 | * determine also the actual throughput distribution among | |
3394 | * these processes. But the drive typically has no notion or | |
3395 | * concern about per-process throughput distribution, and | |
3396 | * makes its decisions only on a per-request basis. Therefore, | |
3397 | * the service distribution enforced by the drive's internal | |
3398 | * scheduler is likely to coincide with the desired | |
3399 | * device-throughput distribution only in a completely | |
bf2b79e7 PV |
3400 | * symmetric scenario where: |
3401 | * (i) each of these processes must get the same throughput as | |
3402 | * the others; | |
3403 | * (ii) all these processes have the same I/O pattern | |
3404 | (either sequential or random). | |
3405 | * In fact, in such a scenario, the drive will tend to treat | |
3406 | * the requests of each of these processes in about the same | |
3407 | * way as the requests of the others, and thus to provide | |
3408 | * each of these processes with about the same throughput | |
3409 | * (which is exactly the desired throughput distribution). In | |
3410 | * contrast, in any asymmetric scenario, device idling is | |
3411 | * certainly needed to guarantee that bfqq receives its | |
3412 | * assigned fraction of the device throughput (see [1] for | |
3413 | * details). | |
3414 | * | |
3415 | * We address this issue by controlling, actually, only the | |
3416 | * symmetry sub-condition (i), i.e., provided that | |
3417 | * sub-condition (i) holds, idling is not performed, | |
3418 | * regardless of whether sub-condition (ii) holds. In other | |
3419 | * words, only if sub-condition (i) holds, then idling is | |
3420 | * allowed, and the device tends to be prevented from queueing | |
3421 | * many requests, possibly of several processes. The reason | |
3422 | * for not controlling also sub-condition (ii) is that we | |
3423 | * exploit preemption to preserve guarantees in case of | |
3424 | * symmetric scenarios, even if (ii) does not hold, as | |
3425 | * explained in the next two paragraphs. | |
3426 | * | |
3427 | * Even if a queue, say Q, is expired when it remains idle, Q | |
3428 | * can still preempt the new in-service queue if the next | |
3429 | * request of Q arrives soon (see the comments on | |
3430 | * bfq_bfqq_update_budg_for_activation). If all queues and | |
3431 | * groups have the same weight, this form of preemption, | |
3432 | * combined with the hole-recovery heuristic described in the | |
3433 | * comments on function bfq_bfqq_update_budg_for_activation, | |
3434 | * are enough to preserve a correct bandwidth distribution in | |
3435 | * the mid term, even without idling. In fact, even if not | |
3436 | * idling allows the internal queues of the device to contain | |
3437 | * many requests, and thus to reorder requests, we can rather | |
3438 | * safely assume that the internal scheduler still preserves a | |
3439 | * minimum of mid-term fairness. The motivation for using | |
3440 | * preemption instead of idling is that, by not idling, | |
3441 | * service guarantees are preserved without minimally | |
3442 | * sacrificing throughput. In other words, both a high | |
3443 | * throughput and its desired distribution are obtained. | |
3444 | * | |
3445 | * More precisely, this preemption-based, idleless approach | |
3446 | * provides fairness in terms of IOPS, and not sectors per | |
3447 | * second. This can be seen with a simple example. Suppose | |
3448 | * that there are two queues with the same weight, but that | |
3449 | * the first queue receives requests of 8 sectors, while the | |
3450 | * second queue receives requests of 1024 sectors. In | |
3451 | * addition, suppose that each of the two queues contains at | |
3452 | * most one request at a time, which implies that each queue | |
3453 | * always remains idle after it is served. Finally, after | |
3454 | * remaining idle, each queue receives very quickly a new | |
3455 | * request. It follows that the two queues are served | |
3456 | * alternatively, preempting each other if needed. This | |
3457 | * implies that, although both queues have the same weight, | |
3458 | * the queue with large requests receives a service that is | |
3459 | * 1024/8 times as high as the service received by the other | |
3460 | * queue. | |
44e44a1b | 3461 | * |
bf2b79e7 PV |
3462 | * On the other hand, device idling is performed, and thus |
3463 | * pure sector-domain guarantees are provided, for the | |
3464 | * following queues, which are likely to need stronger | |
3465 | * throughput guarantees: weight-raised queues, and queues | |
3466 | * with a higher weight than other queues. When such queues | |
3467 | * are active, sub-condition (i) is false, which triggers | |
3468 | * device idling. | |
44e44a1b | 3469 | * |
bf2b79e7 PV |
3470 | * According to the above considerations, the next variable is |
3471 | * true (only) if sub-condition (i) holds. To compute the | |
3472 | * value of this variable, we not only use the return value of | |
3473 | * the function bfq_symmetric_scenario(), but also check | |
3474 | * whether bfqq is being weight-raised, because | |
3475 | * bfq_symmetric_scenario() does not take into account also | |
3476 | * weight-raised queues (see comments on | |
3477 | * bfq_weights_tree_add()). | |
44e44a1b PV |
3478 | * |
3479 | * As a side note, it is worth considering that the above | |
3480 | * device-idling countermeasures may however fail in the | |
3481 | * following unlucky scenario: if idling is (correctly) | |
bf2b79e7 PV |
3482 | * disabled in a time period during which all symmetry |
3483 | * sub-conditions hold, and hence the device is allowed to | |
44e44a1b PV |
3484 | * enqueue many requests, but at some later point in time some |
3485 | * sub-condition stops to hold, then it may become impossible | |
3486 | * to let requests be served in the desired order until all | |
3487 | * the requests already queued in the device have been served. | |
3488 | */ | |
bf2b79e7 PV |
3489 | asymmetric_scenario = bfqq->wr_coeff > 1 || |
3490 | !bfq_symmetric_scenario(bfqd); | |
44e44a1b | 3491 | |
e1b2324d AA |
3492 | /* |
3493 | * Finally, there is a case where maximizing throughput is the | |
3494 | * best choice even if it may cause unfairness toward | |
3495 | * bfqq. Such a case is when bfqq became active in a burst of | |
3496 | * queue activations. Queues that became active during a large | |
3497 | * burst benefit only from throughput, as discussed in the | |
3498 | * comments on bfq_handle_burst. Thus, if bfqq became active | |
3499 | * in a burst and not idling the device maximizes throughput, | |
3500 | * then the device must no be idled, because not idling the | |
3501 | * device provides bfqq and all other queues in the burst with | |
3502 | * maximum benefit. Combining this and the above case, we can | |
3503 | * now establish when idling is actually needed to preserve | |
3504 | * service guarantees. | |
3505 | */ | |
3506 | idling_needed_for_service_guarantees = | |
3507 | asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq); | |
3508 | ||
44e44a1b | 3509 | /* |
d5be3fef PV |
3510 | * We have now all the components we need to compute the |
3511 | * return value of the function, which is true only if idling | |
3512 | * either boosts the throughput (without issues), or is | |
3513 | * necessary to preserve service guarantees. | |
aee69d78 | 3514 | */ |
d5be3fef PV |
3515 | return idling_boosts_thr_without_issues || |
3516 | idling_needed_for_service_guarantees; | |
aee69d78 PV |
3517 | } |
3518 | ||
3519 | /* | |
3520 | * If the in-service queue is empty but the function bfq_bfqq_may_idle | |
3521 | * returns true, then: | |
3522 | * 1) the queue must remain in service and cannot be expired, and | |
3523 | * 2) the device must be idled to wait for the possible arrival of a new | |
3524 | * request for the queue. | |
3525 | * See the comments on the function bfq_bfqq_may_idle for the reasons | |
3526 | * why performing device idling is the best choice to boost the throughput | |
3527 | * and preserve service guarantees when bfq_bfqq_may_idle itself | |
3528 | * returns true. | |
3529 | */ | |
3530 | static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) | |
3531 | { | |
d5be3fef | 3532 | return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq); |
aee69d78 PV |
3533 | } |
3534 | ||
3535 | /* | |
3536 | * Select a queue for service. If we have a current queue in service, | |
3537 | * check whether to continue servicing it, or retrieve and set a new one. | |
3538 | */ | |
3539 | static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) | |
3540 | { | |
3541 | struct bfq_queue *bfqq; | |
3542 | struct request *next_rq; | |
3543 | enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT; | |
3544 | ||
3545 | bfqq = bfqd->in_service_queue; | |
3546 | if (!bfqq) | |
3547 | goto new_queue; | |
3548 | ||
3549 | bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); | |
3550 | ||
3551 | if (bfq_may_expire_for_budg_timeout(bfqq) && | |
3552 | !bfq_bfqq_wait_request(bfqq) && | |
3553 | !bfq_bfqq_must_idle(bfqq)) | |
3554 | goto expire; | |
3555 | ||
3556 | check_queue: | |
3557 | /* | |
3558 | * This loop is rarely executed more than once. Even when it | |
3559 | * happens, it is much more convenient to re-execute this loop | |
3560 | * than to return NULL and trigger a new dispatch to get a | |
3561 | * request served. | |
3562 | */ | |
3563 | next_rq = bfqq->next_rq; | |
3564 | /* | |
3565 | * If bfqq has requests queued and it has enough budget left to | |
3566 | * serve them, keep the queue, otherwise expire it. | |
3567 | */ | |
3568 | if (next_rq) { | |
3569 | if (bfq_serv_to_charge(next_rq, bfqq) > | |
3570 | bfq_bfqq_budget_left(bfqq)) { | |
3571 | /* | |
3572 | * Expire the queue for budget exhaustion, | |
3573 | * which makes sure that the next budget is | |
3574 | * enough to serve the next request, even if | |
3575 | * it comes from the fifo expired path. | |
3576 | */ | |
3577 | reason = BFQQE_BUDGET_EXHAUSTED; | |
3578 | goto expire; | |
3579 | } else { | |
3580 | /* | |
3581 | * The idle timer may be pending because we may | |
3582 | * not disable disk idling even when a new request | |
3583 | * arrives. | |
3584 | */ | |
3585 | if (bfq_bfqq_wait_request(bfqq)) { | |
3586 | /* | |
3587 | * If we get here: 1) at least a new request | |
3588 | * has arrived but we have not disabled the | |
3589 | * timer because the request was too small, | |
3590 | * 2) then the block layer has unplugged | |
3591 | * the device, causing the dispatch to be | |
3592 | * invoked. | |
3593 | * | |
3594 | * Since the device is unplugged, now the | |
3595 | * requests are probably large enough to | |
3596 | * provide a reasonable throughput. | |
3597 | * So we disable idling. | |
3598 | */ | |
3599 | bfq_clear_bfqq_wait_request(bfqq); | |
3600 | hrtimer_try_to_cancel(&bfqd->idle_slice_timer); | |
3601 | } | |
3602 | goto keep_queue; | |
3603 | } | |
3604 | } | |
3605 | ||
3606 | /* | |
3607 | * No requests pending. However, if the in-service queue is idling | |
3608 | * for a new request, or has requests waiting for a completion and | |
3609 | * may idle after their completion, then keep it anyway. | |
3610 | */ | |
3611 | if (bfq_bfqq_wait_request(bfqq) || | |
3612 | (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) { | |
3613 | bfqq = NULL; | |
3614 | goto keep_queue; | |
3615 | } | |
3616 | ||
3617 | reason = BFQQE_NO_MORE_REQUESTS; | |
3618 | expire: | |
3619 | bfq_bfqq_expire(bfqd, bfqq, false, reason); | |
3620 | new_queue: | |
3621 | bfqq = bfq_set_in_service_queue(bfqd); | |
3622 | if (bfqq) { | |
3623 | bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); | |
3624 | goto check_queue; | |
3625 | } | |
3626 | keep_queue: | |
3627 | if (bfqq) | |
3628 | bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); | |
3629 | else | |
3630 | bfq_log(bfqd, "select_queue: no queue returned"); | |
3631 | ||
3632 | return bfqq; | |
3633 | } | |
3634 | ||
44e44a1b PV |
3635 | static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
3636 | { | |
3637 | struct bfq_entity *entity = &bfqq->entity; | |
3638 | ||
3639 | if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ | |
3640 | bfq_log_bfqq(bfqd, bfqq, | |
3641 | "raising period dur %u/%u msec, old coeff %u, w %d(%d)", | |
3642 | jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), | |
3643 | jiffies_to_msecs(bfqq->wr_cur_max_time), | |
3644 | bfqq->wr_coeff, | |
3645 | bfqq->entity.weight, bfqq->entity.orig_weight); | |
3646 | ||
3647 | if (entity->prio_changed) | |
3648 | bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); | |
3649 | ||
3650 | /* | |
e1b2324d AA |
3651 | * If the queue was activated in a burst, or too much |
3652 | * time has elapsed from the beginning of this | |
3653 | * weight-raising period, then end weight raising. | |
44e44a1b | 3654 | */ |
e1b2324d AA |
3655 | if (bfq_bfqq_in_large_burst(bfqq)) |
3656 | bfq_bfqq_end_wr(bfqq); | |
3657 | else if (time_is_before_jiffies(bfqq->last_wr_start_finish + | |
3658 | bfqq->wr_cur_max_time)) { | |
77b7dcea PV |
3659 | if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || |
3660 | time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + | |
e1b2324d | 3661 | bfq_wr_duration(bfqd))) |
77b7dcea PV |
3662 | bfq_bfqq_end_wr(bfqq); |
3663 | else { | |
3e2bdd6d | 3664 | switch_back_to_interactive_wr(bfqq, bfqd); |
77b7dcea PV |
3665 | bfqq->entity.prio_changed = 1; |
3666 | } | |
44e44a1b PV |
3667 | } |
3668 | } | |
431b17f9 PV |
3669 | /* |
3670 | * To improve latency (for this or other queues), immediately | |
3671 | * update weight both if it must be raised and if it must be | |
3672 | * lowered. Since, entity may be on some active tree here, and | |
3673 | * might have a pending change of its ioprio class, invoke | |
3674 | * next function with the last parameter unset (see the | |
3675 | * comments on the function). | |
3676 | */ | |
44e44a1b | 3677 | if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) |
431b17f9 PV |
3678 | __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity), |
3679 | entity, false); | |
44e44a1b PV |
3680 | } |
3681 | ||
aee69d78 PV |
3682 | /* |
3683 | * Dispatch next request from bfqq. | |
3684 | */ | |
3685 | static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, | |
3686 | struct bfq_queue *bfqq) | |
3687 | { | |
3688 | struct request *rq = bfqq->next_rq; | |
3689 | unsigned long service_to_charge; | |
3690 | ||
3691 | service_to_charge = bfq_serv_to_charge(rq, bfqq); | |
3692 | ||
3693 | bfq_bfqq_served(bfqq, service_to_charge); | |
3694 | ||
3695 | bfq_dispatch_remove(bfqd->queue, rq); | |
3696 | ||
44e44a1b PV |
3697 | /* |
3698 | * If weight raising has to terminate for bfqq, then next | |
3699 | * function causes an immediate update of bfqq's weight, | |
3700 | * without waiting for next activation. As a consequence, on | |
3701 | * expiration, bfqq will be timestamped as if has never been | |
3702 | * weight-raised during this service slot, even if it has | |
3703 | * received part or even most of the service as a | |
3704 | * weight-raised queue. This inflates bfqq's timestamps, which | |
3705 | * is beneficial, as bfqq is then more willing to leave the | |
3706 | * device immediately to possible other weight-raised queues. | |
3707 | */ | |
3708 | bfq_update_wr_data(bfqd, bfqq); | |
3709 | ||
aee69d78 PV |
3710 | /* |
3711 | * Expire bfqq, pretending that its budget expired, if bfqq | |
3712 | * belongs to CLASS_IDLE and other queues are waiting for | |
3713 | * service. | |
3714 | */ | |
3715 | if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq)) | |
3716 | goto expire; | |
3717 | ||
3718 | return rq; | |
3719 | ||
3720 | expire: | |
3721 | bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED); | |
3722 | return rq; | |
3723 | } | |
3724 | ||
3725 | static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) | |
3726 | { | |
3727 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; | |
3728 | ||
3729 | /* | |
3730 | * Avoiding lock: a race on bfqd->busy_queues should cause at | |
3731 | * most a call to dispatch for nothing | |
3732 | */ | |
3733 | return !list_empty_careful(&bfqd->dispatch) || | |
3734 | bfqd->busy_queues > 0; | |
3735 | } | |
3736 | ||
3737 | static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) | |
3738 | { | |
3739 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; | |
3740 | struct request *rq = NULL; | |
3741 | struct bfq_queue *bfqq = NULL; | |
3742 | ||
3743 | if (!list_empty(&bfqd->dispatch)) { | |
3744 | rq = list_first_entry(&bfqd->dispatch, struct request, | |
3745 | queuelist); | |
3746 | list_del_init(&rq->queuelist); | |
3747 | ||
3748 | bfqq = RQ_BFQQ(rq); | |
3749 | ||
3750 | if (bfqq) { | |
3751 | /* | |
3752 | * Increment counters here, because this | |
3753 | * dispatch does not follow the standard | |
3754 | * dispatch flow (where counters are | |
3755 | * incremented) | |
3756 | */ | |
3757 | bfqq->dispatched++; | |
3758 | ||
3759 | goto inc_in_driver_start_rq; | |
3760 | } | |
3761 | ||
3762 | /* | |
8993d445 CB |
3763 | * We exploit the bfq_finish_request hook to decrement |
3764 | * rq_in_driver, but bfq_finish_request will not be | |
aee69d78 PV |
3765 | * invoked on this request. So, to avoid unbalance, |
3766 | * just start this request, without incrementing | |
3767 | * rq_in_driver. As a negative consequence, | |
3768 | * rq_in_driver is deceptively lower than it should be | |
3769 | * while this request is in service. This may cause | |
3770 | * bfq_schedule_dispatch to be invoked uselessly. | |
3771 | * | |
3772 | * As for implementing an exact solution, the | |
8993d445 CB |
3773 | * bfq_finish_request hook, if defined, is probably |
3774 | * invoked also on this request. So, by exploiting | |
3775 | * this hook, we could 1) increment rq_in_driver here, | |
3776 | * and 2) decrement it in bfq_finish_request. Such a | |
3777 | * solution would let the value of the counter be | |
3778 | * always accurate, but it would entail using an extra | |
3779 | * interface function. This cost seems higher than the | |
3780 | * benefit, being the frequency of non-elevator-private | |
aee69d78 PV |
3781 | * requests very low. |
3782 | */ | |
3783 | goto start_rq; | |
3784 | } | |
3785 | ||
3786 | bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); | |
3787 | ||
3788 | if (bfqd->busy_queues == 0) | |
3789 | goto exit; | |
3790 | ||
3791 | /* | |
3792 | * Force device to serve one request at a time if | |
3793 | * strict_guarantees is true. Forcing this service scheme is | |
3794 | * currently the ONLY way to guarantee that the request | |
3795 | * service order enforced by the scheduler is respected by a | |
3796 | * queueing device. Otherwise the device is free even to make | |
3797 | * some unlucky request wait for as long as the device | |
3798 | * wishes. | |
3799 | * | |
3800 | * Of course, serving one request at at time may cause loss of | |
3801 | * throughput. | |
3802 | */ | |
3803 | if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) | |
3804 | goto exit; | |
3805 | ||
3806 | bfqq = bfq_select_queue(bfqd); | |
3807 | if (!bfqq) | |
3808 | goto exit; | |
3809 | ||
3810 | rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq); | |
3811 | ||
3812 | if (rq) { | |
3813 | inc_in_driver_start_rq: | |
3814 | bfqd->rq_in_driver++; | |
3815 | start_rq: | |
3816 | rq->rq_flags |= RQF_STARTED; | |
3817 | } | |
3818 | exit: | |
3819 | return rq; | |
3820 | } | |
3821 | ||
a33801e8 | 3822 | #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
9b25bd03 PV |
3823 | static void bfq_update_dispatch_stats(struct request_queue *q, |
3824 | struct request *rq, | |
3825 | struct bfq_queue *in_serv_queue, | |
3826 | bool idle_timer_disabled) | |
3827 | { | |
3828 | struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL; | |
aee69d78 | 3829 | |
24bfd19b | 3830 | if (!idle_timer_disabled && !bfqq) |
9b25bd03 | 3831 | return; |
24bfd19b PV |
3832 | |
3833 | /* | |
3834 | * rq and bfqq are guaranteed to exist until this function | |
3835 | * ends, for the following reasons. First, rq can be | |
3836 | * dispatched to the device, and then can be completed and | |
3837 | * freed, only after this function ends. Second, rq cannot be | |
3838 | * merged (and thus freed because of a merge) any longer, | |
3839 | * because it has already started. Thus rq cannot be freed | |
3840 | * before this function ends, and, since rq has a reference to | |
3841 | * bfqq, the same guarantee holds for bfqq too. | |
3842 | * | |
3843 | * In addition, the following queue lock guarantees that | |
3844 | * bfqq_group(bfqq) exists as well. | |
3845 | */ | |
9b25bd03 | 3846 | spin_lock_irq(q->queue_lock); |
24bfd19b PV |
3847 | if (idle_timer_disabled) |
3848 | /* | |
3849 | * Since the idle timer has been disabled, | |
3850 | * in_serv_queue contained some request when | |
3851 | * __bfq_dispatch_request was invoked above, which | |
3852 | * implies that rq was picked exactly from | |
3853 | * in_serv_queue. Thus in_serv_queue == bfqq, and is | |
3854 | * therefore guaranteed to exist because of the above | |
3855 | * arguments. | |
3856 | */ | |
3857 | bfqg_stats_update_idle_time(bfqq_group(in_serv_queue)); | |
3858 | if (bfqq) { | |
3859 | struct bfq_group *bfqg = bfqq_group(bfqq); | |
3860 | ||
3861 | bfqg_stats_update_avg_queue_size(bfqg); | |
3862 | bfqg_stats_set_start_empty_time(bfqg); | |
3863 | bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); | |
3864 | } | |
9b25bd03 PV |
3865 | spin_unlock_irq(q->queue_lock); |
3866 | } | |
3867 | #else | |
3868 | static inline void bfq_update_dispatch_stats(struct request_queue *q, | |
3869 | struct request *rq, | |
3870 | struct bfq_queue *in_serv_queue, | |
3871 | bool idle_timer_disabled) {} | |
24bfd19b PV |
3872 | #endif |
3873 | ||
9b25bd03 PV |
3874 | static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) |
3875 | { | |
3876 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; | |
3877 | struct request *rq; | |
3878 | struct bfq_queue *in_serv_queue; | |
3879 | bool waiting_rq, idle_timer_disabled; | |
3880 | ||
3881 | spin_lock_irq(&bfqd->lock); | |
3882 | ||
3883 | in_serv_queue = bfqd->in_service_queue; | |
3884 | waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); | |
3885 | ||
3886 | rq = __bfq_dispatch_request(hctx); | |
3887 | ||
3888 | idle_timer_disabled = | |
3889 | waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); | |
3890 | ||
3891 | spin_unlock_irq(&bfqd->lock); | |
3892 | ||
3893 | bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, | |
3894 | idle_timer_disabled); | |
3895 | ||
aee69d78 PV |
3896 | return rq; |
3897 | } | |
3898 | ||
3899 | /* | |
3900 | * Task holds one reference to the queue, dropped when task exits. Each rq | |
3901 | * in-flight on this queue also holds a reference, dropped when rq is freed. | |
3902 | * | |
3903 | * Scheduler lock must be held here. Recall not to use bfqq after calling | |
3904 | * this function on it. | |
3905 | */ | |
ea25da48 | 3906 | void bfq_put_queue(struct bfq_queue *bfqq) |
aee69d78 | 3907 | { |
e21b7a0b AA |
3908 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
3909 | struct bfq_group *bfqg = bfqq_group(bfqq); | |
3910 | #endif | |
3911 | ||
aee69d78 PV |
3912 | if (bfqq->bfqd) |
3913 | bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", | |
3914 | bfqq, bfqq->ref); | |
3915 | ||
3916 | bfqq->ref--; | |
3917 | if (bfqq->ref) | |
3918 | return; | |
3919 | ||
99fead8d | 3920 | if (!hlist_unhashed(&bfqq->burst_list_node)) { |
e1b2324d | 3921 | hlist_del_init(&bfqq->burst_list_node); |
99fead8d PV |
3922 | /* |
3923 | * Decrement also burst size after the removal, if the | |
3924 | * process associated with bfqq is exiting, and thus | |
3925 | * does not contribute to the burst any longer. This | |
3926 | * decrement helps filter out false positives of large | |
3927 | * bursts, when some short-lived process (often due to | |
3928 | * the execution of commands by some service) happens | |
3929 | * to start and exit while a complex application is | |
3930 | * starting, and thus spawning several processes that | |
3931 | * do I/O (and that *must not* be treated as a large | |
3932 | * burst, see comments on bfq_handle_burst). | |
3933 | * | |
3934 | * In particular, the decrement is performed only if: | |
3935 | * 1) bfqq is not a merged queue, because, if it is, | |
3936 | * then this free of bfqq is not triggered by the exit | |
3937 | * of the process bfqq is associated with, but exactly | |
3938 | * by the fact that bfqq has just been merged. | |
3939 | * 2) burst_size is greater than 0, to handle | |
3940 | * unbalanced decrements. Unbalanced decrements may | |
3941 | * happen in te following case: bfqq is inserted into | |
3942 | * the current burst list--without incrementing | |
3943 | * bust_size--because of a split, but the current | |
3944 | * burst list is not the burst list bfqq belonged to | |
3945 | * (see comments on the case of a split in | |
3946 | * bfq_set_request). | |
3947 | */ | |
3948 | if (bfqq->bic && bfqq->bfqd->burst_size > 0) | |
3949 | bfqq->bfqd->burst_size--; | |
7cb04004 | 3950 | } |
e21b7a0b | 3951 | |
aee69d78 | 3952 | kmem_cache_free(bfq_pool, bfqq); |
e21b7a0b | 3953 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
8f9bebc3 | 3954 | bfqg_and_blkg_put(bfqg); |
e21b7a0b | 3955 | #endif |
aee69d78 PV |
3956 | } |
3957 | ||
36eca894 AA |
3958 | static void bfq_put_cooperator(struct bfq_queue *bfqq) |
3959 | { | |
3960 | struct bfq_queue *__bfqq, *next; | |
3961 | ||
3962 | /* | |
3963 | * If this queue was scheduled to merge with another queue, be | |
3964 | * sure to drop the reference taken on that queue (and others in | |
3965 | * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. | |
3966 | */ | |
3967 | __bfqq = bfqq->new_bfqq; | |
3968 | while (__bfqq) { | |
3969 | if (__bfqq == bfqq) | |
3970 | break; | |
3971 | next = __bfqq->new_bfqq; | |
3972 | bfq_put_queue(__bfqq); | |
3973 | __bfqq = next; | |
3974 | } | |
3975 | } | |
3976 | ||
aee69d78 PV |
3977 | static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
3978 | { | |
3979 | if (bfqq == bfqd->in_service_queue) { | |
3980 | __bfq_bfqq_expire(bfqd, bfqq); | |
3981 | bfq_schedule_dispatch(bfqd); | |
3982 | } | |
3983 | ||
3984 | bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); | |
3985 | ||
36eca894 AA |
3986 | bfq_put_cooperator(bfqq); |
3987 | ||
aee69d78 PV |
3988 | bfq_put_queue(bfqq); /* release process reference */ |
3989 | } | |
3990 | ||
3991 | static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) | |
3992 | { | |
3993 | struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); | |
3994 | struct bfq_data *bfqd; | |
3995 | ||
3996 | if (bfqq) | |
3997 | bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ | |
3998 | ||
3999 | if (bfqq && bfqd) { | |
4000 | unsigned long flags; | |
4001 | ||
4002 | spin_lock_irqsave(&bfqd->lock, flags); | |
4003 | bfq_exit_bfqq(bfqd, bfqq); | |
4004 | bic_set_bfqq(bic, NULL, is_sync); | |
6fa3e8d3 | 4005 | spin_unlock_irqrestore(&bfqd->lock, flags); |
aee69d78 PV |
4006 | } |
4007 | } | |
4008 | ||
4009 | static void bfq_exit_icq(struct io_cq *icq) | |
4010 | { | |
4011 | struct bfq_io_cq *bic = icq_to_bic(icq); | |
4012 | ||
4013 | bfq_exit_icq_bfqq(bic, true); | |
4014 | bfq_exit_icq_bfqq(bic, false); | |
4015 | } | |
4016 | ||
4017 | /* | |
4018 | * Update the entity prio values; note that the new values will not | |
4019 | * be used until the next (re)activation. | |
4020 | */ | |
4021 | static void | |
4022 | bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) | |
4023 | { | |
4024 | struct task_struct *tsk = current; | |
4025 | int ioprio_class; | |
4026 | struct bfq_data *bfqd = bfqq->bfqd; | |
4027 | ||
4028 | if (!bfqd) | |
4029 | return; | |
4030 | ||
4031 | ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); | |
4032 | switch (ioprio_class) { | |
4033 | default: | |
4034 | dev_err(bfqq->bfqd->queue->backing_dev_info->dev, | |
4035 | "bfq: bad prio class %d\n", ioprio_class); | |
fa393d1b | 4036 | /* fall through */ |
aee69d78 PV |
4037 | case IOPRIO_CLASS_NONE: |
4038 | /* | |
4039 | * No prio set, inherit CPU scheduling settings. | |
4040 | */ | |
4041 | bfqq->new_ioprio = task_nice_ioprio(tsk); | |
4042 | bfqq->new_ioprio_class = task_nice_ioclass(tsk); | |
4043 | break; | |
4044 | case IOPRIO_CLASS_RT: | |
4045 | bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); | |
4046 | bfqq->new_ioprio_class = IOPRIO_CLASS_RT; | |
4047 | break; | |
4048 | case IOPRIO_CLASS_BE: | |
4049 | bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); | |
4050 | bfqq->new_ioprio_class = IOPRIO_CLASS_BE; | |
4051 | break; | |
4052 | case IOPRIO_CLASS_IDLE: | |
4053 | bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; | |
4054 | bfqq->new_ioprio = 7; | |
aee69d78 PV |
4055 | break; |
4056 | } | |
4057 | ||
4058 | if (bfqq->new_ioprio >= IOPRIO_BE_NR) { | |
4059 | pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", | |
4060 | bfqq->new_ioprio); | |
4061 | bfqq->new_ioprio = IOPRIO_BE_NR; | |
4062 | } | |
4063 | ||
4064 | bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); | |
4065 | bfqq->entity.prio_changed = 1; | |
4066 | } | |
4067 | ||
ea25da48 PV |
4068 | static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, |
4069 | struct bio *bio, bool is_sync, | |
4070 | struct bfq_io_cq *bic); | |
4071 | ||
aee69d78 PV |
4072 | static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) |
4073 | { | |
4074 | struct bfq_data *bfqd = bic_to_bfqd(bic); | |
4075 | struct bfq_queue *bfqq; | |
4076 | int ioprio = bic->icq.ioc->ioprio; | |
4077 | ||
4078 | /* | |
4079 | * This condition may trigger on a newly created bic, be sure to | |
4080 | * drop the lock before returning. | |
4081 | */ | |
4082 | if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) | |
4083 | return; | |
4084 | ||
4085 | bic->ioprio = ioprio; | |
4086 | ||
4087 | bfqq = bic_to_bfqq(bic, false); | |
4088 | if (bfqq) { | |
4089 | /* release process reference on this queue */ | |
4090 | bfq_put_queue(bfqq); | |
4091 | bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); | |
4092 | bic_set_bfqq(bic, bfqq, false); | |
4093 | } | |
4094 | ||
4095 | bfqq = bic_to_bfqq(bic, true); | |
4096 | if (bfqq) | |
4097 | bfq_set_next_ioprio_data(bfqq, bic); | |
4098 | } | |
4099 | ||
4100 | static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, | |
4101 | struct bfq_io_cq *bic, pid_t pid, int is_sync) | |
4102 | { | |
4103 | RB_CLEAR_NODE(&bfqq->entity.rb_node); | |
4104 | INIT_LIST_HEAD(&bfqq->fifo); | |
e1b2324d | 4105 | INIT_HLIST_NODE(&bfqq->burst_list_node); |
aee69d78 PV |
4106 | |
4107 | bfqq->ref = 0; | |
4108 | bfqq->bfqd = bfqd; | |
4109 | ||
4110 | if (bic) | |
4111 | bfq_set_next_ioprio_data(bfqq, bic); | |
4112 | ||
4113 | if (is_sync) { | |
d5be3fef PV |
4114 | /* |
4115 | * No need to mark as has_short_ttime if in | |
4116 | * idle_class, because no device idling is performed | |
4117 | * for queues in idle class | |
4118 | */ | |
aee69d78 | 4119 | if (!bfq_class_idle(bfqq)) |
d5be3fef PV |
4120 | /* tentatively mark as has_short_ttime */ |
4121 | bfq_mark_bfqq_has_short_ttime(bfqq); | |
aee69d78 | 4122 | bfq_mark_bfqq_sync(bfqq); |
e1b2324d | 4123 | bfq_mark_bfqq_just_created(bfqq); |
aee69d78 PV |
4124 | } else |
4125 | bfq_clear_bfqq_sync(bfqq); | |
4126 | ||
4127 | /* set end request to minus infinity from now */ | |
4128 | bfqq->ttime.last_end_request = ktime_get_ns() + 1; | |
4129 | ||
4130 | bfq_mark_bfqq_IO_bound(bfqq); | |
4131 | ||
4132 | bfqq->pid = pid; | |
4133 | ||
4134 | /* Tentative initial value to trade off between thr and lat */ | |
54b60456 | 4135 | bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; |
aee69d78 | 4136 | bfqq->budget_timeout = bfq_smallest_from_now(); |
aee69d78 | 4137 | |
44e44a1b | 4138 | bfqq->wr_coeff = 1; |
36eca894 | 4139 | bfqq->last_wr_start_finish = jiffies; |
77b7dcea | 4140 | bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); |
36eca894 | 4141 | bfqq->split_time = bfq_smallest_from_now(); |
77b7dcea PV |
4142 | |
4143 | /* | |
a34b0244 PV |
4144 | * To not forget the possibly high bandwidth consumed by a |
4145 | * process/queue in the recent past, | |
4146 | * bfq_bfqq_softrt_next_start() returns a value at least equal | |
4147 | * to the current value of bfqq->soft_rt_next_start (see | |
4148 | * comments on bfq_bfqq_softrt_next_start). Set | |
4149 | * soft_rt_next_start to now, to mean that bfqq has consumed | |
4150 | * no bandwidth so far. | |
77b7dcea | 4151 | */ |
a34b0244 | 4152 | bfqq->soft_rt_next_start = jiffies; |
44e44a1b | 4153 | |
aee69d78 PV |
4154 | /* first request is almost certainly seeky */ |
4155 | bfqq->seek_history = 1; | |
4156 | } | |
4157 | ||
4158 | static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, | |
e21b7a0b | 4159 | struct bfq_group *bfqg, |
aee69d78 PV |
4160 | int ioprio_class, int ioprio) |
4161 | { | |
4162 | switch (ioprio_class) { | |
4163 | case IOPRIO_CLASS_RT: | |
e21b7a0b | 4164 | return &bfqg->async_bfqq[0][ioprio]; |
aee69d78 PV |
4165 | case IOPRIO_CLASS_NONE: |
4166 | ioprio = IOPRIO_NORM; | |
4167 | /* fall through */ | |
4168 | case IOPRIO_CLASS_BE: | |
e21b7a0b | 4169 | return &bfqg->async_bfqq[1][ioprio]; |
aee69d78 | 4170 | case IOPRIO_CLASS_IDLE: |
e21b7a0b | 4171 | return &bfqg->async_idle_bfqq; |
aee69d78 PV |
4172 | default: |
4173 | return NULL; | |
4174 | } | |
4175 | } | |
4176 | ||
4177 | static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, | |
4178 | struct bio *bio, bool is_sync, | |
4179 | struct bfq_io_cq *bic) | |
4180 | { | |
4181 | const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); | |
4182 | const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); | |
4183 | struct bfq_queue **async_bfqq = NULL; | |
4184 | struct bfq_queue *bfqq; | |
e21b7a0b | 4185 | struct bfq_group *bfqg; |
aee69d78 PV |
4186 | |
4187 | rcu_read_lock(); | |
4188 | ||
e21b7a0b AA |
4189 | bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio)); |
4190 | if (!bfqg) { | |
4191 | bfqq = &bfqd->oom_bfqq; | |
4192 | goto out; | |
4193 | } | |
4194 | ||
aee69d78 | 4195 | if (!is_sync) { |
e21b7a0b | 4196 | async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, |
aee69d78 PV |
4197 | ioprio); |
4198 | bfqq = *async_bfqq; | |
4199 | if (bfqq) | |
4200 | goto out; | |
4201 | } | |
4202 | ||
4203 | bfqq = kmem_cache_alloc_node(bfq_pool, | |
4204 | GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, | |
4205 | bfqd->queue->node); | |
4206 | ||
4207 | if (bfqq) { | |
4208 | bfq_init_bfqq(bfqd, bfqq, bic, current->pid, | |
4209 | is_sync); | |
e21b7a0b | 4210 | bfq_init_entity(&bfqq->entity, bfqg); |
aee69d78 PV |
4211 | bfq_log_bfqq(bfqd, bfqq, "allocated"); |
4212 | } else { | |
4213 | bfqq = &bfqd->oom_bfqq; | |
4214 | bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); | |
4215 | goto out; | |
4216 | } | |
4217 | ||
4218 | /* | |
4219 | * Pin the queue now that it's allocated, scheduler exit will | |
4220 | * prune it. | |
4221 | */ | |
4222 | if (async_bfqq) { | |
e21b7a0b AA |
4223 | bfqq->ref++; /* |
4224 | * Extra group reference, w.r.t. sync | |
4225 | * queue. This extra reference is removed | |
4226 | * only if bfqq->bfqg disappears, to | |
4227 | * guarantee that this queue is not freed | |
4228 | * until its group goes away. | |
4229 | */ | |
4230 | bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", | |
aee69d78 PV |
4231 | bfqq, bfqq->ref); |
4232 | *async_bfqq = bfqq; | |
4233 | } | |
4234 | ||
4235 | out: | |
4236 | bfqq->ref++; /* get a process reference to this queue */ | |
4237 | bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); | |
4238 | rcu_read_unlock(); | |
4239 | return bfqq; | |
4240 | } | |
4241 | ||
4242 | static void bfq_update_io_thinktime(struct bfq_data *bfqd, | |
4243 | struct bfq_queue *bfqq) | |
4244 | { | |
4245 | struct bfq_ttime *ttime = &bfqq->ttime; | |
4246 | u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request; | |
4247 | ||
4248 | elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle); | |
4249 | ||
4250 | ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8; | |
4251 | ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); | |
4252 | ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, | |
4253 | ttime->ttime_samples); | |
4254 | } | |
4255 | ||
4256 | static void | |
4257 | bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, | |
4258 | struct request *rq) | |
4259 | { | |
aee69d78 | 4260 | bfqq->seek_history <<= 1; |
ab0e43e9 PV |
4261 | bfqq->seek_history |= |
4262 | get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR && | |
aee69d78 PV |
4263 | (!blk_queue_nonrot(bfqd->queue) || |
4264 | blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); | |
4265 | } | |
4266 | ||
d5be3fef PV |
4267 | static void bfq_update_has_short_ttime(struct bfq_data *bfqd, |
4268 | struct bfq_queue *bfqq, | |
4269 | struct bfq_io_cq *bic) | |
aee69d78 | 4270 | { |
d5be3fef | 4271 | bool has_short_ttime = true; |
aee69d78 | 4272 | |
d5be3fef PV |
4273 | /* |
4274 | * No need to update has_short_ttime if bfqq is async or in | |
4275 | * idle io prio class, or if bfq_slice_idle is zero, because | |
4276 | * no device idling is performed for bfqq in this case. | |
4277 | */ | |
4278 | if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) || | |
4279 | bfqd->bfq_slice_idle == 0) | |
aee69d78 PV |
4280 | return; |
4281 | ||
36eca894 AA |
4282 | /* Idle window just restored, statistics are meaningless. */ |
4283 | if (time_is_after_eq_jiffies(bfqq->split_time + | |
4284 | bfqd->bfq_wr_min_idle_time)) | |
4285 | return; | |
4286 | ||
d5be3fef PV |
4287 | /* Think time is infinite if no process is linked to |
4288 | * bfqq. Otherwise check average think time to | |
4289 | * decide whether to mark as has_short_ttime | |
4290 | */ | |
aee69d78 | 4291 | if (atomic_read(&bic->icq.ioc->active_ref) == 0 || |
d5be3fef PV |
4292 | (bfq_sample_valid(bfqq->ttime.ttime_samples) && |
4293 | bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) | |
4294 | has_short_ttime = false; | |
4295 | ||
4296 | bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", | |
4297 | has_short_ttime); | |
aee69d78 | 4298 | |
d5be3fef PV |
4299 | if (has_short_ttime) |
4300 | bfq_mark_bfqq_has_short_ttime(bfqq); | |
aee69d78 | 4301 | else |
d5be3fef | 4302 | bfq_clear_bfqq_has_short_ttime(bfqq); |
aee69d78 PV |
4303 | } |
4304 | ||
4305 | /* | |
4306 | * Called when a new fs request (rq) is added to bfqq. Check if there's | |
4307 | * something we should do about it. | |
4308 | */ | |
4309 | static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, | |
4310 | struct request *rq) | |
4311 | { | |
4312 | struct bfq_io_cq *bic = RQ_BIC(rq); | |
4313 | ||
4314 | if (rq->cmd_flags & REQ_META) | |
4315 | bfqq->meta_pending++; | |
4316 | ||
4317 | bfq_update_io_thinktime(bfqd, bfqq); | |
d5be3fef | 4318 | bfq_update_has_short_ttime(bfqd, bfqq, bic); |
aee69d78 | 4319 | bfq_update_io_seektime(bfqd, bfqq, rq); |
aee69d78 PV |
4320 | |
4321 | bfq_log_bfqq(bfqd, bfqq, | |
d5be3fef PV |
4322 | "rq_enqueued: has_short_ttime=%d (seeky %d)", |
4323 | bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); | |
aee69d78 PV |
4324 | |
4325 | bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); | |
4326 | ||
4327 | if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { | |
4328 | bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && | |
4329 | blk_rq_sectors(rq) < 32; | |
4330 | bool budget_timeout = bfq_bfqq_budget_timeout(bfqq); | |
4331 | ||
4332 | /* | |
4333 | * There is just this request queued: if the request | |
4334 | * is small and the queue is not to be expired, then | |
4335 | * just exit. | |
4336 | * | |
4337 | * In this way, if the device is being idled to wait | |
4338 | * for a new request from the in-service queue, we | |
4339 | * avoid unplugging the device and committing the | |
4340 | * device to serve just a small request. On the | |
4341 | * contrary, we wait for the block layer to decide | |
4342 | * when to unplug the device: hopefully, new requests | |
4343 | * will be merged to this one quickly, then the device | |
4344 | * will be unplugged and larger requests will be | |
4345 | * dispatched. | |
4346 | */ | |
4347 | if (small_req && !budget_timeout) | |
4348 | return; | |
4349 | ||
4350 | /* | |
4351 | * A large enough request arrived, or the queue is to | |
4352 | * be expired: in both cases disk idling is to be | |
4353 | * stopped, so clear wait_request flag and reset | |
4354 | * timer. | |
4355 | */ | |
4356 | bfq_clear_bfqq_wait_request(bfqq); | |
4357 | hrtimer_try_to_cancel(&bfqd->idle_slice_timer); | |
4358 | ||
4359 | /* | |
4360 | * The queue is not empty, because a new request just | |
4361 | * arrived. Hence we can safely expire the queue, in | |
4362 | * case of budget timeout, without risking that the | |
4363 | * timestamps of the queue are not updated correctly. | |
4364 | * See [1] for more details. | |
4365 | */ | |
4366 | if (budget_timeout) | |
4367 | bfq_bfqq_expire(bfqd, bfqq, false, | |
4368 | BFQQE_BUDGET_TIMEOUT); | |
4369 | } | |
4370 | } | |
4371 | ||
24bfd19b PV |
4372 | /* returns true if it causes the idle timer to be disabled */ |
4373 | static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) | |
aee69d78 | 4374 | { |
36eca894 AA |
4375 | struct bfq_queue *bfqq = RQ_BFQQ(rq), |
4376 | *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); | |
24bfd19b | 4377 | bool waiting, idle_timer_disabled = false; |
36eca894 AA |
4378 | |
4379 | if (new_bfqq) { | |
4380 | if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq) | |
4381 | new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1); | |
4382 | /* | |
4383 | * Release the request's reference to the old bfqq | |
4384 | * and make sure one is taken to the shared queue. | |
4385 | */ | |
4386 | new_bfqq->allocated++; | |
4387 | bfqq->allocated--; | |
4388 | new_bfqq->ref++; | |
4389 | /* | |
4390 | * If the bic associated with the process | |
4391 | * issuing this request still points to bfqq | |
4392 | * (and thus has not been already redirected | |
4393 | * to new_bfqq or even some other bfq_queue), | |
4394 | * then complete the merge and redirect it to | |
4395 | * new_bfqq. | |
4396 | */ | |
4397 | if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) | |
4398 | bfq_merge_bfqqs(bfqd, RQ_BIC(rq), | |
4399 | bfqq, new_bfqq); | |
894df937 PV |
4400 | |
4401 | bfq_clear_bfqq_just_created(bfqq); | |
36eca894 AA |
4402 | /* |
4403 | * rq is about to be enqueued into new_bfqq, | |
4404 | * release rq reference on bfqq | |
4405 | */ | |
4406 | bfq_put_queue(bfqq); | |
4407 | rq->elv.priv[1] = new_bfqq; | |
4408 | bfqq = new_bfqq; | |
4409 | } | |
aee69d78 | 4410 | |
24bfd19b | 4411 | waiting = bfqq && bfq_bfqq_wait_request(bfqq); |
aee69d78 | 4412 | bfq_add_request(rq); |
24bfd19b | 4413 | idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq); |
aee69d78 PV |
4414 | |
4415 | rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; | |
4416 | list_add_tail(&rq->queuelist, &bfqq->fifo); | |
4417 | ||
4418 | bfq_rq_enqueued(bfqd, bfqq, rq); | |
24bfd19b PV |
4419 | |
4420 | return idle_timer_disabled; | |
aee69d78 PV |
4421 | } |
4422 | ||
9b25bd03 PV |
4423 | #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
4424 | static void bfq_update_insert_stats(struct request_queue *q, | |
4425 | struct bfq_queue *bfqq, | |
4426 | bool idle_timer_disabled, | |
4427 | unsigned int cmd_flags) | |
4428 | { | |
4429 | if (!bfqq) | |
4430 | return; | |
4431 | ||
4432 | /* | |
4433 | * bfqq still exists, because it can disappear only after | |
4434 | * either it is merged with another queue, or the process it | |
4435 | * is associated with exits. But both actions must be taken by | |
4436 | * the same process currently executing this flow of | |
4437 | * instructions. | |
4438 | * | |
4439 | * In addition, the following queue lock guarantees that | |
4440 | * bfqq_group(bfqq) exists as well. | |
4441 | */ | |
4442 | spin_lock_irq(q->queue_lock); | |
4443 | bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); | |
4444 | if (idle_timer_disabled) | |
4445 | bfqg_stats_update_idle_time(bfqq_group(bfqq)); | |
4446 | spin_unlock_irq(q->queue_lock); | |
4447 | } | |
4448 | #else | |
4449 | static inline void bfq_update_insert_stats(struct request_queue *q, | |
4450 | struct bfq_queue *bfqq, | |
4451 | bool idle_timer_disabled, | |
4452 | unsigned int cmd_flags) {} | |
4453 | #endif | |
4454 | ||
aee69d78 PV |
4455 | static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
4456 | bool at_head) | |
4457 | { | |
4458 | struct request_queue *q = hctx->queue; | |
4459 | struct bfq_data *bfqd = q->elevator->elevator_data; | |
614822f8 | 4460 | struct bfq_queue *bfqq = RQ_BFQQ(rq); |
24bfd19b PV |
4461 | bool idle_timer_disabled = false; |
4462 | unsigned int cmd_flags; | |
aee69d78 PV |
4463 | |
4464 | spin_lock_irq(&bfqd->lock); | |
4465 | if (blk_mq_sched_try_insert_merge(q, rq)) { | |
4466 | spin_unlock_irq(&bfqd->lock); | |
4467 | return; | |
4468 | } | |
4469 | ||
4470 | spin_unlock_irq(&bfqd->lock); | |
4471 | ||
4472 | blk_mq_sched_request_inserted(rq); | |
4473 | ||
4474 | spin_lock_irq(&bfqd->lock); | |
4475 | if (at_head || blk_rq_is_passthrough(rq)) { | |
4476 | if (at_head) | |
4477 | list_add(&rq->queuelist, &bfqd->dispatch); | |
4478 | else | |
4479 | list_add_tail(&rq->queuelist, &bfqd->dispatch); | |
4480 | } else { | |
24bfd19b | 4481 | idle_timer_disabled = __bfq_insert_request(bfqd, rq); |
614822f8 LM |
4482 | /* |
4483 | * Update bfqq, because, if a queue merge has occurred | |
4484 | * in __bfq_insert_request, then rq has been | |
4485 | * redirected into a new queue. | |
4486 | */ | |
4487 | bfqq = RQ_BFQQ(rq); | |
aee69d78 PV |
4488 | |
4489 | if (rq_mergeable(rq)) { | |
4490 | elv_rqhash_add(q, rq); | |
4491 | if (!q->last_merge) | |
4492 | q->last_merge = rq; | |
4493 | } | |
4494 | } | |
4495 | ||
24bfd19b PV |
4496 | /* |
4497 | * Cache cmd_flags before releasing scheduler lock, because rq | |
4498 | * may disappear afterwards (for example, because of a request | |
4499 | * merge). | |
4500 | */ | |
4501 | cmd_flags = rq->cmd_flags; | |
9b25bd03 | 4502 | |
6fa3e8d3 | 4503 | spin_unlock_irq(&bfqd->lock); |
24bfd19b | 4504 | |
9b25bd03 PV |
4505 | bfq_update_insert_stats(q, bfqq, idle_timer_disabled, |
4506 | cmd_flags); | |
aee69d78 PV |
4507 | } |
4508 | ||
4509 | static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, | |
4510 | struct list_head *list, bool at_head) | |
4511 | { | |
4512 | while (!list_empty(list)) { | |
4513 | struct request *rq; | |
4514 | ||
4515 | rq = list_first_entry(list, struct request, queuelist); | |
4516 | list_del_init(&rq->queuelist); | |
4517 | bfq_insert_request(hctx, rq, at_head); | |
4518 | } | |
4519 | } | |
4520 | ||
4521 | static void bfq_update_hw_tag(struct bfq_data *bfqd) | |
4522 | { | |
4523 | bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, | |
4524 | bfqd->rq_in_driver); | |
4525 | ||
4526 | if (bfqd->hw_tag == 1) | |
4527 | return; | |
4528 | ||
4529 | /* | |
4530 | * This sample is valid if the number of outstanding requests | |
4531 | * is large enough to allow a queueing behavior. Note that the | |
4532 | * sum is not exact, as it's not taking into account deactivated | |
4533 | * requests. | |
4534 | */ | |
4535 | if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) | |
4536 | return; | |
4537 | ||
4538 | if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) | |
4539 | return; | |
4540 | ||
4541 | bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; | |
4542 | bfqd->max_rq_in_driver = 0; | |
4543 | bfqd->hw_tag_samples = 0; | |
4544 | } | |
4545 | ||
4546 | static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) | |
4547 | { | |
ab0e43e9 PV |
4548 | u64 now_ns; |
4549 | u32 delta_us; | |
4550 | ||
aee69d78 PV |
4551 | bfq_update_hw_tag(bfqd); |
4552 | ||
4553 | bfqd->rq_in_driver--; | |
4554 | bfqq->dispatched--; | |
4555 | ||
44e44a1b PV |
4556 | if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { |
4557 | /* | |
4558 | * Set budget_timeout (which we overload to store the | |
4559 | * time at which the queue remains with no backlog and | |
4560 | * no outstanding request; used by the weight-raising | |
4561 | * mechanism). | |
4562 | */ | |
4563 | bfqq->budget_timeout = jiffies; | |
1de0c4cd AA |
4564 | |
4565 | bfq_weights_tree_remove(bfqd, &bfqq->entity, | |
4566 | &bfqd->queue_weights_tree); | |
44e44a1b PV |
4567 | } |
4568 | ||
ab0e43e9 PV |
4569 | now_ns = ktime_get_ns(); |
4570 | ||
4571 | bfqq->ttime.last_end_request = now_ns; | |
4572 | ||
4573 | /* | |
4574 | * Using us instead of ns, to get a reasonable precision in | |
4575 | * computing rate in next check. | |
4576 | */ | |
4577 | delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); | |
4578 | ||
4579 | /* | |
4580 | * If the request took rather long to complete, and, according | |
4581 | * to the maximum request size recorded, this completion latency | |
4582 | * implies that the request was certainly served at a very low | |
4583 | * rate (less than 1M sectors/sec), then the whole observation | |
4584 | * interval that lasts up to this time instant cannot be a | |
4585 | * valid time interval for computing a new peak rate. Invoke | |
4586 | * bfq_update_rate_reset to have the following three steps | |
4587 | * taken: | |
4588 | * - close the observation interval at the last (previous) | |
4589 | * request dispatch or completion | |
4590 | * - compute rate, if possible, for that observation interval | |
4591 | * - reset to zero samples, which will trigger a proper | |
4592 | * re-initialization of the observation interval on next | |
4593 | * dispatch | |
4594 | */ | |
4595 | if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC && | |
4596 | (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us < | |
4597 | 1UL<<(BFQ_RATE_SHIFT - 10)) | |
4598 | bfq_update_rate_reset(bfqd, NULL); | |
4599 | bfqd->last_completion = now_ns; | |
aee69d78 | 4600 | |
77b7dcea PV |
4601 | /* |
4602 | * If we are waiting to discover whether the request pattern | |
4603 | * of the task associated with the queue is actually | |
4604 | * isochronous, and both requisites for this condition to hold | |
4605 | * are now satisfied, then compute soft_rt_next_start (see the | |
4606 | * comments on the function bfq_bfqq_softrt_next_start()). We | |
4607 | * schedule this delayed check when bfqq expires, if it still | |
4608 | * has in-flight requests. | |
4609 | */ | |
4610 | if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && | |
4611 | RB_EMPTY_ROOT(&bfqq->sort_list)) | |
4612 | bfqq->soft_rt_next_start = | |
4613 | bfq_bfqq_softrt_next_start(bfqd, bfqq); | |
4614 | ||
aee69d78 PV |
4615 | /* |
4616 | * If this is the in-service queue, check if it needs to be expired, | |
4617 | * or if we want to idle in case it has no pending requests. | |
4618 | */ | |
4619 | if (bfqd->in_service_queue == bfqq) { | |
44e44a1b | 4620 | if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) { |
aee69d78 PV |
4621 | bfq_arm_slice_timer(bfqd); |
4622 | return; | |
4623 | } else if (bfq_may_expire_for_budg_timeout(bfqq)) | |
4624 | bfq_bfqq_expire(bfqd, bfqq, false, | |
4625 | BFQQE_BUDGET_TIMEOUT); | |
4626 | else if (RB_EMPTY_ROOT(&bfqq->sort_list) && | |
4627 | (bfqq->dispatched == 0 || | |
4628 | !bfq_bfqq_may_idle(bfqq))) | |
4629 | bfq_bfqq_expire(bfqd, bfqq, false, | |
4630 | BFQQE_NO_MORE_REQUESTS); | |
4631 | } | |
3f7cb4f4 HT |
4632 | |
4633 | if (!bfqd->rq_in_driver) | |
4634 | bfq_schedule_dispatch(bfqd); | |
aee69d78 PV |
4635 | } |
4636 | ||
8993d445 | 4637 | static void bfq_finish_request_body(struct bfq_queue *bfqq) |
aee69d78 PV |
4638 | { |
4639 | bfqq->allocated--; | |
4640 | ||
4641 | bfq_put_queue(bfqq); | |
4642 | } | |
4643 | ||
7b9e9361 | 4644 | static void bfq_finish_request(struct request *rq) |
aee69d78 | 4645 | { |
5bbf4e5a CH |
4646 | struct bfq_queue *bfqq; |
4647 | struct bfq_data *bfqd; | |
4648 | ||
4649 | if (!rq->elv.icq) | |
4650 | return; | |
4651 | ||
4652 | bfqq = RQ_BFQQ(rq); | |
4653 | bfqd = bfqq->bfqd; | |
aee69d78 | 4654 | |
e21b7a0b AA |
4655 | if (rq->rq_flags & RQF_STARTED) |
4656 | bfqg_stats_update_completion(bfqq_group(bfqq), | |
4657 | rq_start_time_ns(rq), | |
4658 | rq_io_start_time_ns(rq), | |
4659 | rq->cmd_flags); | |
aee69d78 PV |
4660 | |
4661 | if (likely(rq->rq_flags & RQF_STARTED)) { | |
4662 | unsigned long flags; | |
4663 | ||
4664 | spin_lock_irqsave(&bfqd->lock, flags); | |
4665 | ||
4666 | bfq_completed_request(bfqq, bfqd); | |
8993d445 | 4667 | bfq_finish_request_body(bfqq); |
aee69d78 | 4668 | |
6fa3e8d3 | 4669 | spin_unlock_irqrestore(&bfqd->lock, flags); |
aee69d78 PV |
4670 | } else { |
4671 | /* | |
4672 | * Request rq may be still/already in the scheduler, | |
4673 | * in which case we need to remove it. And we cannot | |
4674 | * defer such a check and removal, to avoid | |
4675 | * inconsistencies in the time interval from the end | |
4676 | * of this function to the start of the deferred work. | |
4677 | * This situation seems to occur only in process | |
4678 | * context, as a consequence of a merge. In the | |
4679 | * current version of the code, this implies that the | |
4680 | * lock is held. | |
4681 | */ | |
4682 | ||
614822f8 | 4683 | if (!RB_EMPTY_NODE(&rq->rb_node)) { |
7b9e9361 | 4684 | bfq_remove_request(rq->q, rq); |
614822f8 LM |
4685 | bfqg_stats_update_io_remove(bfqq_group(bfqq), |
4686 | rq->cmd_flags); | |
4687 | } | |
8993d445 | 4688 | bfq_finish_request_body(bfqq); |
aee69d78 PV |
4689 | } |
4690 | ||
4691 | rq->elv.priv[0] = NULL; | |
4692 | rq->elv.priv[1] = NULL; | |
4693 | } | |
4694 | ||
36eca894 AA |
4695 | /* |
4696 | * Returns NULL if a new bfqq should be allocated, or the old bfqq if this | |
4697 | * was the last process referring to that bfqq. | |
4698 | */ | |
4699 | static struct bfq_queue * | |
4700 | bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) | |
4701 | { | |
4702 | bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); | |
4703 | ||
4704 | if (bfqq_process_refs(bfqq) == 1) { | |
4705 | bfqq->pid = current->pid; | |
4706 | bfq_clear_bfqq_coop(bfqq); | |
4707 | bfq_clear_bfqq_split_coop(bfqq); | |
4708 | return bfqq; | |
4709 | } | |
4710 | ||
4711 | bic_set_bfqq(bic, NULL, 1); | |
4712 | ||
4713 | bfq_put_cooperator(bfqq); | |
4714 | ||
4715 | bfq_put_queue(bfqq); | |
4716 | return NULL; | |
4717 | } | |
4718 | ||
4719 | static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, | |
4720 | struct bfq_io_cq *bic, | |
4721 | struct bio *bio, | |
4722 | bool split, bool is_sync, | |
4723 | bool *new_queue) | |
4724 | { | |
4725 | struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); | |
4726 | ||
4727 | if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) | |
4728 | return bfqq; | |
4729 | ||
4730 | if (new_queue) | |
4731 | *new_queue = true; | |
4732 | ||
4733 | if (bfqq) | |
4734 | bfq_put_queue(bfqq); | |
4735 | bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); | |
4736 | ||
4737 | bic_set_bfqq(bic, bfqq, is_sync); | |
e1b2324d AA |
4738 | if (split && is_sync) { |
4739 | if ((bic->was_in_burst_list && bfqd->large_burst) || | |
4740 | bic->saved_in_large_burst) | |
4741 | bfq_mark_bfqq_in_large_burst(bfqq); | |
4742 | else { | |
4743 | bfq_clear_bfqq_in_large_burst(bfqq); | |
4744 | if (bic->was_in_burst_list) | |
99fead8d PV |
4745 | /* |
4746 | * If bfqq was in the current | |
4747 | * burst list before being | |
4748 | * merged, then we have to add | |
4749 | * it back. And we do not need | |
4750 | * to increase burst_size, as | |
4751 | * we did not decrement | |
4752 | * burst_size when we removed | |
4753 | * bfqq from the burst list as | |
4754 | * a consequence of a merge | |
4755 | * (see comments in | |
4756 | * bfq_put_queue). In this | |
4757 | * respect, it would be rather | |
4758 | * costly to know whether the | |
4759 | * current burst list is still | |
4760 | * the same burst list from | |
4761 | * which bfqq was removed on | |
4762 | * the merge. To avoid this | |
4763 | * cost, if bfqq was in a | |
4764 | * burst list, then we add | |
4765 | * bfqq to the current burst | |
4766 | * list without any further | |
4767 | * check. This can cause | |
4768 | * inappropriate insertions, | |
4769 | * but rarely enough to not | |
4770 | * harm the detection of large | |
4771 | * bursts significantly. | |
4772 | */ | |
e1b2324d AA |
4773 | hlist_add_head(&bfqq->burst_list_node, |
4774 | &bfqd->burst_list); | |
4775 | } | |
36eca894 | 4776 | bfqq->split_time = jiffies; |
e1b2324d | 4777 | } |
36eca894 AA |
4778 | |
4779 | return bfqq; | |
4780 | } | |
4781 | ||
aee69d78 PV |
4782 | /* |
4783 | * Allocate bfq data structures associated with this request. | |
4784 | */ | |
5bbf4e5a | 4785 | static void bfq_prepare_request(struct request *rq, struct bio *bio) |
aee69d78 | 4786 | { |
5bbf4e5a | 4787 | struct request_queue *q = rq->q; |
aee69d78 | 4788 | struct bfq_data *bfqd = q->elevator->elevator_data; |
9f210738 | 4789 | struct bfq_io_cq *bic; |
aee69d78 PV |
4790 | const int is_sync = rq_is_sync(rq); |
4791 | struct bfq_queue *bfqq; | |
36eca894 | 4792 | bool new_queue = false; |
13c931bd | 4793 | bool bfqq_already_existing = false, split = false; |
aee69d78 | 4794 | |
9f210738 | 4795 | if (!rq->elv.icq) |
5bbf4e5a | 4796 | return; |
9f210738 | 4797 | bic = icq_to_bic(rq->elv.icq); |
aee69d78 | 4798 | |
9f210738 | 4799 | spin_lock_irq(&bfqd->lock); |
aee69d78 | 4800 | |
8c9ff1ad CIK |
4801 | bfq_check_ioprio_change(bic, bio); |
4802 | ||
e21b7a0b AA |
4803 | bfq_bic_update_cgroup(bic, bio); |
4804 | ||
36eca894 AA |
4805 | bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, |
4806 | &new_queue); | |
4807 | ||
4808 | if (likely(!new_queue)) { | |
4809 | /* If the queue was seeky for too long, break it apart. */ | |
4810 | if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { | |
4811 | bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); | |
e1b2324d AA |
4812 | |
4813 | /* Update bic before losing reference to bfqq */ | |
4814 | if (bfq_bfqq_in_large_burst(bfqq)) | |
4815 | bic->saved_in_large_burst = true; | |
4816 | ||
36eca894 | 4817 | bfqq = bfq_split_bfqq(bic, bfqq); |
6fa3e8d3 | 4818 | split = true; |
36eca894 AA |
4819 | |
4820 | if (!bfqq) | |
4821 | bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, | |
4822 | true, is_sync, | |
4823 | NULL); | |
13c931bd PV |
4824 | else |
4825 | bfqq_already_existing = true; | |
36eca894 | 4826 | } |
aee69d78 PV |
4827 | } |
4828 | ||
4829 | bfqq->allocated++; | |
4830 | bfqq->ref++; | |
4831 | bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", | |
4832 | rq, bfqq, bfqq->ref); | |
4833 | ||
4834 | rq->elv.priv[0] = bic; | |
4835 | rq->elv.priv[1] = bfqq; | |
4836 | ||
36eca894 AA |
4837 | /* |
4838 | * If a bfq_queue has only one process reference, it is owned | |
4839 | * by only this bic: we can then set bfqq->bic = bic. in | |
4840 | * addition, if the queue has also just been split, we have to | |
4841 | * resume its state. | |
4842 | */ | |
4843 | if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { | |
4844 | bfqq->bic = bic; | |
6fa3e8d3 | 4845 | if (split) { |
36eca894 AA |
4846 | /* |
4847 | * The queue has just been split from a shared | |
4848 | * queue: restore the idle window and the | |
4849 | * possible weight raising period. | |
4850 | */ | |
13c931bd PV |
4851 | bfq_bfqq_resume_state(bfqq, bfqd, bic, |
4852 | bfqq_already_existing); | |
36eca894 AA |
4853 | } |
4854 | } | |
4855 | ||
e1b2324d AA |
4856 | if (unlikely(bfq_bfqq_just_created(bfqq))) |
4857 | bfq_handle_burst(bfqd, bfqq); | |
4858 | ||
6fa3e8d3 | 4859 | spin_unlock_irq(&bfqd->lock); |
aee69d78 PV |
4860 | } |
4861 | ||
4862 | static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) | |
4863 | { | |
4864 | struct bfq_data *bfqd = bfqq->bfqd; | |
4865 | enum bfqq_expiration reason; | |
4866 | unsigned long flags; | |
4867 | ||
4868 | spin_lock_irqsave(&bfqd->lock, flags); | |
4869 | bfq_clear_bfqq_wait_request(bfqq); | |
4870 | ||
4871 | if (bfqq != bfqd->in_service_queue) { | |
4872 | spin_unlock_irqrestore(&bfqd->lock, flags); | |
4873 | return; | |
4874 | } | |
4875 | ||
4876 | if (bfq_bfqq_budget_timeout(bfqq)) | |
4877 | /* | |
4878 | * Also here the queue can be safely expired | |
4879 | * for budget timeout without wasting | |
4880 | * guarantees | |
4881 | */ | |
4882 | reason = BFQQE_BUDGET_TIMEOUT; | |
4883 | else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) | |
4884 | /* | |
4885 | * The queue may not be empty upon timer expiration, | |
4886 | * because we may not disable the timer when the | |
4887 | * first request of the in-service queue arrives | |
4888 | * during disk idling. | |
4889 | */ | |
4890 | reason = BFQQE_TOO_IDLE; | |
4891 | else | |
4892 | goto schedule_dispatch; | |
4893 | ||
4894 | bfq_bfqq_expire(bfqd, bfqq, true, reason); | |
4895 | ||
4896 | schedule_dispatch: | |
6fa3e8d3 | 4897 | spin_unlock_irqrestore(&bfqd->lock, flags); |
aee69d78 PV |
4898 | bfq_schedule_dispatch(bfqd); |
4899 | } | |
4900 | ||
4901 | /* | |
4902 | * Handler of the expiration of the timer running if the in-service queue | |
4903 | * is idling inside its time slice. | |
4904 | */ | |
4905 | static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) | |
4906 | { | |
4907 | struct bfq_data *bfqd = container_of(timer, struct bfq_data, | |
4908 | idle_slice_timer); | |
4909 | struct bfq_queue *bfqq = bfqd->in_service_queue; | |
4910 | ||
4911 | /* | |
4912 | * Theoretical race here: the in-service queue can be NULL or | |
4913 | * different from the queue that was idling if a new request | |
4914 | * arrives for the current queue and there is a full dispatch | |
4915 | * cycle that changes the in-service queue. This can hardly | |
4916 | * happen, but in the worst case we just expire a queue too | |
4917 | * early. | |
4918 | */ | |
4919 | if (bfqq) | |
4920 | bfq_idle_slice_timer_body(bfqq); | |
4921 | ||
4922 | return HRTIMER_NORESTART; | |
4923 | } | |
4924 | ||
4925 | static void __bfq_put_async_bfqq(struct bfq_data *bfqd, | |
4926 | struct bfq_queue **bfqq_ptr) | |
4927 | { | |
4928 | struct bfq_queue *bfqq = *bfqq_ptr; | |
4929 | ||
4930 | bfq_log(bfqd, "put_async_bfqq: %p", bfqq); | |
4931 | if (bfqq) { | |
e21b7a0b AA |
4932 | bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); |
4933 | ||
aee69d78 PV |
4934 | bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", |
4935 | bfqq, bfqq->ref); | |
4936 | bfq_put_queue(bfqq); | |
4937 | *bfqq_ptr = NULL; | |
4938 | } | |
4939 | } | |
4940 | ||
4941 | /* | |
e21b7a0b AA |
4942 | * Release all the bfqg references to its async queues. If we are |
4943 | * deallocating the group these queues may still contain requests, so | |
4944 | * we reparent them to the root cgroup (i.e., the only one that will | |
4945 | * exist for sure until all the requests on a device are gone). | |
aee69d78 | 4946 | */ |
ea25da48 | 4947 | void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) |
aee69d78 PV |
4948 | { |
4949 | int i, j; | |
4950 | ||
4951 | for (i = 0; i < 2; i++) | |
4952 | for (j = 0; j < IOPRIO_BE_NR; j++) | |
e21b7a0b | 4953 | __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); |
aee69d78 | 4954 | |
e21b7a0b | 4955 | __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); |
aee69d78 PV |
4956 | } |
4957 | ||
4958 | static void bfq_exit_queue(struct elevator_queue *e) | |
4959 | { | |
4960 | struct bfq_data *bfqd = e->elevator_data; | |
4961 | struct bfq_queue *bfqq, *n; | |
4962 | ||
4963 | hrtimer_cancel(&bfqd->idle_slice_timer); | |
4964 | ||
4965 | spin_lock_irq(&bfqd->lock); | |
4966 | list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) | |
e21b7a0b | 4967 | bfq_deactivate_bfqq(bfqd, bfqq, false, false); |
aee69d78 PV |
4968 | spin_unlock_irq(&bfqd->lock); |
4969 | ||
4970 | hrtimer_cancel(&bfqd->idle_slice_timer); | |
4971 | ||
8abef10b | 4972 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
0d52af59 PV |
4973 | /* release oom-queue reference to root group */ |
4974 | bfqg_and_blkg_put(bfqd->root_group); | |
4975 | ||
e21b7a0b AA |
4976 | blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); |
4977 | #else | |
4978 | spin_lock_irq(&bfqd->lock); | |
4979 | bfq_put_async_queues(bfqd, bfqd->root_group); | |
4980 | kfree(bfqd->root_group); | |
4981 | spin_unlock_irq(&bfqd->lock); | |
4982 | #endif | |
4983 | ||
aee69d78 PV |
4984 | kfree(bfqd); |
4985 | } | |
4986 | ||
e21b7a0b AA |
4987 | static void bfq_init_root_group(struct bfq_group *root_group, |
4988 | struct bfq_data *bfqd) | |
4989 | { | |
4990 | int i; | |
4991 | ||
4992 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | |
4993 | root_group->entity.parent = NULL; | |
4994 | root_group->my_entity = NULL; | |
4995 | root_group->bfqd = bfqd; | |
4996 | #endif | |
36eca894 | 4997 | root_group->rq_pos_tree = RB_ROOT; |
e21b7a0b AA |
4998 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) |
4999 | root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; | |
5000 | root_group->sched_data.bfq_class_idle_last_service = jiffies; | |
5001 | } | |
5002 | ||
aee69d78 PV |
5003 | static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) |
5004 | { | |
5005 | struct bfq_data *bfqd; | |
5006 | struct elevator_queue *eq; | |
aee69d78 PV |
5007 | |
5008 | eq = elevator_alloc(q, e); | |
5009 | if (!eq) | |
5010 | return -ENOMEM; | |
5011 | ||
5012 | bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); | |
5013 | if (!bfqd) { | |
5014 | kobject_put(&eq->kobj); | |
5015 | return -ENOMEM; | |
5016 | } | |
5017 | eq->elevator_data = bfqd; | |
5018 | ||
e21b7a0b AA |
5019 | spin_lock_irq(q->queue_lock); |
5020 | q->elevator = eq; | |
5021 | spin_unlock_irq(q->queue_lock); | |
5022 | ||
aee69d78 PV |
5023 | /* |
5024 | * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. | |
5025 | * Grab a permanent reference to it, so that the normal code flow | |
5026 | * will not attempt to free it. | |
5027 | */ | |
5028 | bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); | |
5029 | bfqd->oom_bfqq.ref++; | |
5030 | bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; | |
5031 | bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; | |
5032 | bfqd->oom_bfqq.entity.new_weight = | |
5033 | bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); | |
e1b2324d AA |
5034 | |
5035 | /* oom_bfqq does not participate to bursts */ | |
5036 | bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); | |
5037 | ||
aee69d78 PV |
5038 | /* |
5039 | * Trigger weight initialization, according to ioprio, at the | |
5040 | * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio | |
5041 | * class won't be changed any more. | |
5042 | */ | |
5043 | bfqd->oom_bfqq.entity.prio_changed = 1; | |
5044 | ||
5045 | bfqd->queue = q; | |
5046 | ||
e21b7a0b | 5047 | INIT_LIST_HEAD(&bfqd->dispatch); |
aee69d78 PV |
5048 | |
5049 | hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, | |
5050 | HRTIMER_MODE_REL); | |
5051 | bfqd->idle_slice_timer.function = bfq_idle_slice_timer; | |
5052 | ||
1de0c4cd AA |
5053 | bfqd->queue_weights_tree = RB_ROOT; |
5054 | bfqd->group_weights_tree = RB_ROOT; | |
5055 | ||
aee69d78 PV |
5056 | INIT_LIST_HEAD(&bfqd->active_list); |
5057 | INIT_LIST_HEAD(&bfqd->idle_list); | |
e1b2324d | 5058 | INIT_HLIST_HEAD(&bfqd->burst_list); |
aee69d78 PV |
5059 | |
5060 | bfqd->hw_tag = -1; | |
5061 | ||
5062 | bfqd->bfq_max_budget = bfq_default_max_budget; | |
5063 | ||
5064 | bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; | |
5065 | bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; | |
5066 | bfqd->bfq_back_max = bfq_back_max; | |
5067 | bfqd->bfq_back_penalty = bfq_back_penalty; | |
5068 | bfqd->bfq_slice_idle = bfq_slice_idle; | |
aee69d78 PV |
5069 | bfqd->bfq_timeout = bfq_timeout; |
5070 | ||
5071 | bfqd->bfq_requests_within_timer = 120; | |
5072 | ||
e1b2324d AA |
5073 | bfqd->bfq_large_burst_thresh = 8; |
5074 | bfqd->bfq_burst_interval = msecs_to_jiffies(180); | |
5075 | ||
44e44a1b PV |
5076 | bfqd->low_latency = true; |
5077 | ||
5078 | /* | |
5079 | * Trade-off between responsiveness and fairness. | |
5080 | */ | |
5081 | bfqd->bfq_wr_coeff = 30; | |
77b7dcea | 5082 | bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); |
44e44a1b PV |
5083 | bfqd->bfq_wr_max_time = 0; |
5084 | bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); | |
5085 | bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); | |
77b7dcea PV |
5086 | bfqd->bfq_wr_max_softrt_rate = 7000; /* |
5087 | * Approximate rate required | |
5088 | * to playback or record a | |
5089 | * high-definition compressed | |
5090 | * video. | |
5091 | */ | |
cfd69712 | 5092 | bfqd->wr_busy_queues = 0; |
44e44a1b PV |
5093 | |
5094 | /* | |
5095 | * Begin by assuming, optimistically, that the device is a | |
5096 | * high-speed one, and that its peak rate is equal to 2/3 of | |
5097 | * the highest reference rate. | |
5098 | */ | |
5099 | bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] * | |
5100 | T_fast[blk_queue_nonrot(bfqd->queue)]; | |
5101 | bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3; | |
5102 | bfqd->device_speed = BFQ_BFQD_FAST; | |
5103 | ||
aee69d78 | 5104 | spin_lock_init(&bfqd->lock); |
aee69d78 | 5105 | |
e21b7a0b AA |
5106 | /* |
5107 | * The invocation of the next bfq_create_group_hierarchy | |
5108 | * function is the head of a chain of function calls | |
5109 | * (bfq_create_group_hierarchy->blkcg_activate_policy-> | |
5110 | * blk_mq_freeze_queue) that may lead to the invocation of the | |
5111 | * has_work hook function. For this reason, | |
5112 | * bfq_create_group_hierarchy is invoked only after all | |
5113 | * scheduler data has been initialized, apart from the fields | |
5114 | * that can be initialized only after invoking | |
5115 | * bfq_create_group_hierarchy. This, in particular, enables | |
5116 | * has_work to correctly return false. Of course, to avoid | |
5117 | * other inconsistencies, the blk-mq stack must then refrain | |
5118 | * from invoking further scheduler hooks before this init | |
5119 | * function is finished. | |
5120 | */ | |
5121 | bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); | |
5122 | if (!bfqd->root_group) | |
5123 | goto out_free; | |
5124 | bfq_init_root_group(bfqd->root_group, bfqd); | |
5125 | bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); | |
5126 | ||
b5dc5d4d | 5127 | wbt_disable_default(q); |
aee69d78 | 5128 | return 0; |
e21b7a0b AA |
5129 | |
5130 | out_free: | |
5131 | kfree(bfqd); | |
5132 | kobject_put(&eq->kobj); | |
5133 | return -ENOMEM; | |
aee69d78 PV |
5134 | } |
5135 | ||
5136 | static void bfq_slab_kill(void) | |
5137 | { | |
5138 | kmem_cache_destroy(bfq_pool); | |
5139 | } | |
5140 | ||
5141 | static int __init bfq_slab_setup(void) | |
5142 | { | |
5143 | bfq_pool = KMEM_CACHE(bfq_queue, 0); | |
5144 | if (!bfq_pool) | |
5145 | return -ENOMEM; | |
5146 | return 0; | |
5147 | } | |
5148 | ||
5149 | static ssize_t bfq_var_show(unsigned int var, char *page) | |
5150 | { | |
5151 | return sprintf(page, "%u\n", var); | |
5152 | } | |
5153 | ||
2f79136b | 5154 | static int bfq_var_store(unsigned long *var, const char *page) |
aee69d78 PV |
5155 | { |
5156 | unsigned long new_val; | |
5157 | int ret = kstrtoul(page, 10, &new_val); | |
5158 | ||
2f79136b BVA |
5159 | if (ret) |
5160 | return ret; | |
5161 | *var = new_val; | |
5162 | return 0; | |
aee69d78 PV |
5163 | } |
5164 | ||
5165 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | |
5166 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ | |
5167 | { \ | |
5168 | struct bfq_data *bfqd = e->elevator_data; \ | |
5169 | u64 __data = __VAR; \ | |
5170 | if (__CONV == 1) \ | |
5171 | __data = jiffies_to_msecs(__data); \ | |
5172 | else if (__CONV == 2) \ | |
5173 | __data = div_u64(__data, NSEC_PER_MSEC); \ | |
5174 | return bfq_var_show(__data, (page)); \ | |
5175 | } | |
5176 | SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2); | |
5177 | SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2); | |
5178 | SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); | |
5179 | SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); | |
5180 | SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2); | |
5181 | SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); | |
5182 | SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1); | |
5183 | SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0); | |
44e44a1b | 5184 | SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); |
aee69d78 PV |
5185 | #undef SHOW_FUNCTION |
5186 | ||
5187 | #define USEC_SHOW_FUNCTION(__FUNC, __VAR) \ | |
5188 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ | |
5189 | { \ | |
5190 | struct bfq_data *bfqd = e->elevator_data; \ | |
5191 | u64 __data = __VAR; \ | |
5192 | __data = div_u64(__data, NSEC_PER_USEC); \ | |
5193 | return bfq_var_show(__data, (page)); \ | |
5194 | } | |
5195 | USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle); | |
5196 | #undef USEC_SHOW_FUNCTION | |
5197 | ||
5198 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | |
5199 | static ssize_t \ | |
5200 | __FUNC(struct elevator_queue *e, const char *page, size_t count) \ | |
5201 | { \ | |
5202 | struct bfq_data *bfqd = e->elevator_data; \ | |
1530486c | 5203 | unsigned long __data, __min = (MIN), __max = (MAX); \ |
2f79136b BVA |
5204 | int ret; \ |
5205 | \ | |
5206 | ret = bfq_var_store(&__data, (page)); \ | |
5207 | if (ret) \ | |
5208 | return ret; \ | |
1530486c BVA |
5209 | if (__data < __min) \ |
5210 | __data = __min; \ | |
5211 | else if (__data > __max) \ | |
5212 | __data = __max; \ | |
aee69d78 PV |
5213 | if (__CONV == 1) \ |
5214 | *(__PTR) = msecs_to_jiffies(__data); \ | |
5215 | else if (__CONV == 2) \ | |
5216 | *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ | |
5217 | else \ | |
5218 | *(__PTR) = __data; \ | |
235f8da1 | 5219 | return count; \ |
aee69d78 PV |
5220 | } |
5221 | STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, | |
5222 | INT_MAX, 2); | |
5223 | STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, | |
5224 | INT_MAX, 2); | |
5225 | STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); | |
5226 | STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, | |
5227 | INT_MAX, 0); | |
5228 | STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); | |
5229 | #undef STORE_FUNCTION | |
5230 | ||
5231 | #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ | |
5232 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ | |
5233 | { \ | |
5234 | struct bfq_data *bfqd = e->elevator_data; \ | |
1530486c | 5235 | unsigned long __data, __min = (MIN), __max = (MAX); \ |
2f79136b BVA |
5236 | int ret; \ |
5237 | \ | |
5238 | ret = bfq_var_store(&__data, (page)); \ | |
5239 | if (ret) \ | |
5240 | return ret; \ | |
1530486c BVA |
5241 | if (__data < __min) \ |
5242 | __data = __min; \ | |
5243 | else if (__data > __max) \ | |
5244 | __data = __max; \ | |
aee69d78 | 5245 | *(__PTR) = (u64)__data * NSEC_PER_USEC; \ |
235f8da1 | 5246 | return count; \ |
aee69d78 PV |
5247 | } |
5248 | USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, | |
5249 | UINT_MAX); | |
5250 | #undef USEC_STORE_FUNCTION | |
5251 | ||
aee69d78 PV |
5252 | static ssize_t bfq_max_budget_store(struct elevator_queue *e, |
5253 | const char *page, size_t count) | |
5254 | { | |
5255 | struct bfq_data *bfqd = e->elevator_data; | |
2f79136b BVA |
5256 | unsigned long __data; |
5257 | int ret; | |
235f8da1 | 5258 | |
2f79136b BVA |
5259 | ret = bfq_var_store(&__data, (page)); |
5260 | if (ret) | |
5261 | return ret; | |
aee69d78 PV |
5262 | |
5263 | if (__data == 0) | |
ab0e43e9 | 5264 | bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); |
aee69d78 PV |
5265 | else { |
5266 | if (__data > INT_MAX) | |
5267 | __data = INT_MAX; | |
5268 | bfqd->bfq_max_budget = __data; | |
5269 | } | |
5270 | ||
5271 | bfqd->bfq_user_max_budget = __data; | |
5272 | ||
235f8da1 | 5273 | return count; |
aee69d78 PV |
5274 | } |
5275 | ||
5276 | /* | |
5277 | * Leaving this name to preserve name compatibility with cfq | |
5278 | * parameters, but this timeout is used for both sync and async. | |
5279 | */ | |
5280 | static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, | |
5281 | const char *page, size_t count) | |
5282 | { | |
5283 | struct bfq_data *bfqd = e->elevator_data; | |
2f79136b BVA |
5284 | unsigned long __data; |
5285 | int ret; | |
235f8da1 | 5286 | |
2f79136b BVA |
5287 | ret = bfq_var_store(&__data, (page)); |
5288 | if (ret) | |
5289 | return ret; | |
aee69d78 PV |
5290 | |
5291 | if (__data < 1) | |
5292 | __data = 1; | |
5293 | else if (__data > INT_MAX) | |
5294 | __data = INT_MAX; | |
5295 | ||
5296 | bfqd->bfq_timeout = msecs_to_jiffies(__data); | |
5297 | if (bfqd->bfq_user_max_budget == 0) | |
ab0e43e9 | 5298 | bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); |
aee69d78 | 5299 | |
235f8da1 | 5300 | return count; |
aee69d78 PV |
5301 | } |
5302 | ||
5303 | static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, | |
5304 | const char *page, size_t count) | |
5305 | { | |
5306 | struct bfq_data *bfqd = e->elevator_data; | |
2f79136b BVA |
5307 | unsigned long __data; |
5308 | int ret; | |
235f8da1 | 5309 | |
2f79136b BVA |
5310 | ret = bfq_var_store(&__data, (page)); |
5311 | if (ret) | |
5312 | return ret; | |
aee69d78 PV |
5313 | |
5314 | if (__data > 1) | |
5315 | __data = 1; | |
5316 | if (!bfqd->strict_guarantees && __data == 1 | |
5317 | && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) | |
5318 | bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; | |
5319 | ||
5320 | bfqd->strict_guarantees = __data; | |
5321 | ||
235f8da1 | 5322 | return count; |
aee69d78 PV |
5323 | } |
5324 | ||
44e44a1b PV |
5325 | static ssize_t bfq_low_latency_store(struct elevator_queue *e, |
5326 | const char *page, size_t count) | |
5327 | { | |
5328 | struct bfq_data *bfqd = e->elevator_data; | |
2f79136b BVA |
5329 | unsigned long __data; |
5330 | int ret; | |
235f8da1 | 5331 | |
2f79136b BVA |
5332 | ret = bfq_var_store(&__data, (page)); |
5333 | if (ret) | |
5334 | return ret; | |
44e44a1b PV |
5335 | |
5336 | if (__data > 1) | |
5337 | __data = 1; | |
5338 | if (__data == 0 && bfqd->low_latency != 0) | |
5339 | bfq_end_wr(bfqd); | |
5340 | bfqd->low_latency = __data; | |
5341 | ||
235f8da1 | 5342 | return count; |
44e44a1b PV |
5343 | } |
5344 | ||
aee69d78 PV |
5345 | #define BFQ_ATTR(name) \ |
5346 | __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store) | |
5347 | ||
5348 | static struct elv_fs_entry bfq_attrs[] = { | |
5349 | BFQ_ATTR(fifo_expire_sync), | |
5350 | BFQ_ATTR(fifo_expire_async), | |
5351 | BFQ_ATTR(back_seek_max), | |
5352 | BFQ_ATTR(back_seek_penalty), | |
5353 | BFQ_ATTR(slice_idle), | |
5354 | BFQ_ATTR(slice_idle_us), | |
5355 | BFQ_ATTR(max_budget), | |
5356 | BFQ_ATTR(timeout_sync), | |
5357 | BFQ_ATTR(strict_guarantees), | |
44e44a1b | 5358 | BFQ_ATTR(low_latency), |
aee69d78 PV |
5359 | __ATTR_NULL |
5360 | }; | |
5361 | ||
5362 | static struct elevator_type iosched_bfq_mq = { | |
5363 | .ops.mq = { | |
a52a69ea | 5364 | .limit_depth = bfq_limit_depth, |
5bbf4e5a | 5365 | .prepare_request = bfq_prepare_request, |
7b9e9361 | 5366 | .finish_request = bfq_finish_request, |
aee69d78 PV |
5367 | .exit_icq = bfq_exit_icq, |
5368 | .insert_requests = bfq_insert_requests, | |
5369 | .dispatch_request = bfq_dispatch_request, | |
5370 | .next_request = elv_rb_latter_request, | |
5371 | .former_request = elv_rb_former_request, | |
5372 | .allow_merge = bfq_allow_bio_merge, | |
5373 | .bio_merge = bfq_bio_merge, | |
5374 | .request_merge = bfq_request_merge, | |
5375 | .requests_merged = bfq_requests_merged, | |
5376 | .request_merged = bfq_request_merged, | |
5377 | .has_work = bfq_has_work, | |
5378 | .init_sched = bfq_init_queue, | |
5379 | .exit_sched = bfq_exit_queue, | |
5380 | }, | |
5381 | ||
5382 | .uses_mq = true, | |
5383 | .icq_size = sizeof(struct bfq_io_cq), | |
5384 | .icq_align = __alignof__(struct bfq_io_cq), | |
5385 | .elevator_attrs = bfq_attrs, | |
5386 | .elevator_name = "bfq", | |
5387 | .elevator_owner = THIS_MODULE, | |
5388 | }; | |
26b4cf24 | 5389 | MODULE_ALIAS("bfq-iosched"); |
aee69d78 PV |
5390 | |
5391 | static int __init bfq_init(void) | |
5392 | { | |
5393 | int ret; | |
5394 | ||
e21b7a0b AA |
5395 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
5396 | ret = blkcg_policy_register(&blkcg_policy_bfq); | |
5397 | if (ret) | |
5398 | return ret; | |
5399 | #endif | |
5400 | ||
aee69d78 PV |
5401 | ret = -ENOMEM; |
5402 | if (bfq_slab_setup()) | |
5403 | goto err_pol_unreg; | |
5404 | ||
44e44a1b PV |
5405 | /* |
5406 | * Times to load large popular applications for the typical | |
5407 | * systems installed on the reference devices (see the | |
5408 | * comments before the definitions of the next two | |
5409 | * arrays). Actually, we use slightly slower values, as the | |
5410 | * estimated peak rate tends to be smaller than the actual | |
5411 | * peak rate. The reason for this last fact is that estimates | |
5412 | * are computed over much shorter time intervals than the long | |
5413 | * intervals typically used for benchmarking. Why? First, to | |
5414 | * adapt more quickly to variations. Second, because an I/O | |
5415 | * scheduler cannot rely on a peak-rate-evaluation workload to | |
5416 | * be run for a long time. | |
5417 | */ | |
5418 | T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */ | |
5419 | T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */ | |
5420 | T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */ | |
5421 | T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */ | |
5422 | ||
5423 | /* | |
5424 | * Thresholds that determine the switch between speed classes | |
5425 | * (see the comments before the definition of the array | |
5426 | * device_speed_thresh). These thresholds are biased towards | |
5427 | * transitions to the fast class. This is safer than the | |
5428 | * opposite bias. In fact, a wrong transition to the slow | |
5429 | * class results in short weight-raising periods, because the | |
5430 | * speed of the device then tends to be higher that the | |
5431 | * reference peak rate. On the opposite end, a wrong | |
5432 | * transition to the fast class tends to increase | |
5433 | * weight-raising periods, because of the opposite reason. | |
5434 | */ | |
5435 | device_speed_thresh[0] = (4 * R_slow[0]) / 3; | |
5436 | device_speed_thresh[1] = (4 * R_slow[1]) / 3; | |
5437 | ||
aee69d78 PV |
5438 | ret = elv_register(&iosched_bfq_mq); |
5439 | if (ret) | |
37dcd657 | 5440 | goto slab_kill; |
aee69d78 PV |
5441 | |
5442 | return 0; | |
5443 | ||
37dcd657 | 5444 | slab_kill: |
5445 | bfq_slab_kill(); | |
aee69d78 | 5446 | err_pol_unreg: |
e21b7a0b AA |
5447 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
5448 | blkcg_policy_unregister(&blkcg_policy_bfq); | |
5449 | #endif | |
aee69d78 PV |
5450 | return ret; |
5451 | } | |
5452 | ||
5453 | static void __exit bfq_exit(void) | |
5454 | { | |
5455 | elv_unregister(&iosched_bfq_mq); | |
e21b7a0b AA |
5456 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
5457 | blkcg_policy_unregister(&blkcg_policy_bfq); | |
5458 | #endif | |
aee69d78 PV |
5459 | bfq_slab_kill(); |
5460 | } | |
5461 | ||
5462 | module_init(bfq_init); | |
5463 | module_exit(bfq_exit); | |
5464 | ||
5465 | MODULE_AUTHOR("Paolo Valente"); | |
5466 | MODULE_LICENSE("GPL"); | |
5467 | MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler"); |