Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
320ae51f JA |
2 | #ifndef INT_BLK_MQ_H |
3 | #define INT_BLK_MQ_H | |
4 | ||
90110e04 | 5 | #include <linux/blk-mq.h> |
cf43e6be JA |
6 | #include "blk-stat.h" |
7 | ||
24d2f903 CH |
8 | struct blk_mq_tag_set; |
9 | ||
1db4909e ML |
10 | struct blk_mq_ctxs { |
11 | struct kobject kobj; | |
12 | struct blk_mq_ctx __percpu *queue_ctx; | |
13 | }; | |
14 | ||
fe644072 LW |
15 | /** |
16 | * struct blk_mq_ctx - State for a software queue facing the submitting CPUs | |
17 | */ | |
320ae51f JA |
18 | struct blk_mq_ctx { |
19 | struct { | |
20 | spinlock_t lock; | |
c16d6b5a ML |
21 | struct list_head rq_lists[HCTX_MAX_TYPES]; |
22 | } ____cacheline_aligned_in_smp; | |
320ae51f JA |
23 | |
24 | unsigned int cpu; | |
f31967f0 | 25 | unsigned short index_hw[HCTX_MAX_TYPES]; |
8ccdf4a3 | 26 | struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; |
320ae51f | 27 | |
320ae51f | 28 | struct request_queue *queue; |
1db4909e | 29 | struct blk_mq_ctxs *ctxs; |
320ae51f | 30 | struct kobject kobj; |
4bb659b1 | 31 | } ____cacheline_aligned_in_smp; |
320ae51f | 32 | |
bebe84eb CH |
33 | enum { |
34 | BLK_MQ_NO_TAG = -1U, | |
35 | BLK_MQ_TAG_MIN = 1, | |
36 | BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, | |
37 | }; | |
38 | ||
3dff6155 JG |
39 | #define BLK_MQ_CPU_WORK_BATCH (8) |
40 | ||
710fa378 CH |
41 | typedef unsigned int __bitwise blk_insert_t; |
42 | #define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01) | |
43 | ||
3e08773c | 44 | void blk_mq_submit_bio(struct bio *bio); |
5a72e899 JA |
45 | int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, |
46 | unsigned int flags); | |
c7e2d94b | 47 | void blk_mq_exit_queue(struct request_queue *q); |
e3a2b3f9 | 48 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
aed3ea94 | 49 | void blk_mq_wake_waiters(struct request_queue *q); |
1fd40b5e | 50 | bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, |
e093b784 | 51 | bool); |
2c3ad667 | 52 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); |
b347689f ML |
53 | struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, |
54 | struct blk_mq_ctx *start); | |
2e315dc0 | 55 | void blk_mq_put_rq_ref(struct request *rq); |
2c3ad667 JA |
56 | |
57 | /* | |
58 | * Internal helpers for allocating/freeing the request map | |
59 | */ | |
cc71a6f4 JA |
60 | void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, |
61 | unsigned int hctx_idx); | |
e155b0c2 | 62 | void blk_mq_free_rq_map(struct blk_mq_tags *tags); |
63064be1 JG |
63 | struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, |
64 | unsigned int hctx_idx, unsigned int depth); | |
645db34e JG |
65 | void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, |
66 | struct blk_mq_tags *tags, | |
67 | unsigned int hctx_idx); | |
396eaf21 | 68 | |
320ae51f JA |
69 | /* |
70 | * CPU -> queue mappings | |
71 | */ | |
ed76e329 | 72 | extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); |
320ae51f | 73 | |
b3c661b1 JA |
74 | /* |
75 | * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue | |
76 | * @q: request queue | |
e20ba6e1 | 77 | * @type: the hctx type index |
b3c661b1 JA |
78 | * @cpu: CPU |
79 | */ | |
80 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, | |
e20ba6e1 | 81 | enum hctx_type type, |
b3c661b1 | 82 | unsigned int cpu) |
7d7e0f90 | 83 | { |
4e5cc99e | 84 | return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); |
7d7e0f90 CH |
85 | } |
86 | ||
16458cf3 | 87 | static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf) |
ff2c5660 | 88 | { |
e20ba6e1 CH |
89 | enum hctx_type type = HCTX_TYPE_DEFAULT; |
90 | ||
bb94aea1 | 91 | /* |
6ce913fe | 92 | * The caller ensure that if REQ_POLLED, poll must be enabled. |
bb94aea1 | 93 | */ |
7e923f40 | 94 | if (opf & REQ_POLLED) |
e20ba6e1 | 95 | type = HCTX_TYPE_POLL; |
7e923f40 | 96 | else if ((opf & REQ_OP_MASK) == REQ_OP_READ) |
e20ba6e1 | 97 | type = HCTX_TYPE_READ; |
b637108a ML |
98 | return type; |
99 | } | |
100 | ||
101 | /* | |
102 | * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue | |
7e923f40 | 103 | * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED). |
b637108a ML |
104 | * @ctx: software queue cpu ctx |
105 | */ | |
61667cb6 | 106 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf, |
b637108a ML |
107 | struct blk_mq_ctx *ctx) |
108 | { | |
7e923f40 | 109 | return ctx->hctxs[blk_mq_get_hctx_type(opf)]; |
ff2c5660 JA |
110 | } |
111 | ||
67aec14c JA |
112 | /* |
113 | * sysfs helpers | |
114 | */ | |
737f98cf | 115 | extern void blk_mq_sysfs_init(struct request_queue *q); |
7ea5fe31 | 116 | extern void blk_mq_sysfs_deinit(struct request_queue *q); |
8682b92e CH |
117 | int blk_mq_sysfs_register(struct gendisk *disk); |
118 | void blk_mq_sysfs_unregister(struct gendisk *disk); | |
eaa870f9 CH |
119 | int blk_mq_sysfs_register_hctxs(struct request_queue *q); |
120 | void blk_mq_sysfs_unregister_hctxs(struct request_queue *q); | |
868f2f0b | 121 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); |
47c122e3 | 122 | void blk_mq_free_plug_rqs(struct blk_plug *plug); |
dbb6f764 | 123 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
67aec14c | 124 | |
2a19b28f ML |
125 | void blk_mq_cancel_work_sync(struct request_queue *q); |
126 | ||
e09aae7e ML |
127 | void blk_mq_release(struct request_queue *q); |
128 | ||
1aecfe48 ML |
129 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, |
130 | unsigned int cpu) | |
131 | { | |
132 | return per_cpu_ptr(q->queue_ctx, cpu); | |
133 | } | |
134 | ||
135 | /* | |
136 | * This assumes per-cpu software queueing queues. They could be per-node | |
137 | * as well, for instance. For now this is hardcoded as-is. Note that we don't | |
138 | * care about preemption, since we know the ctx's are persistent. This does | |
139 | * mean that we can't rely on ctx always matching the currently running CPU. | |
140 | */ | |
141 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) | |
142 | { | |
c05f4220 | 143 | return __blk_mq_get_ctx(q, raw_smp_processor_id()); |
1aecfe48 ML |
144 | } |
145 | ||
cb96a42c ML |
146 | struct blk_mq_alloc_data { |
147 | /* input parameter */ | |
148 | struct request_queue *q; | |
9a95e4ef | 149 | blk_mq_req_flags_t flags; |
229a9287 | 150 | unsigned int shallow_depth; |
16458cf3 | 151 | blk_opf_t cmd_flags; |
ecaf97f4 | 152 | req_flags_t rq_flags; |
cb96a42c | 153 | |
47c122e3 JA |
154 | /* allocate multiple requests/tags in one go */ |
155 | unsigned int nr_tags; | |
a3396b99 | 156 | struct rq_list *cached_rqs; |
47c122e3 | 157 | |
cb96a42c ML |
158 | /* input & output parameter */ |
159 | struct blk_mq_ctx *ctx; | |
160 | struct blk_mq_hw_ctx *hctx; | |
161 | }; | |
162 | ||
bebe84eb | 163 | struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, |
ce32496e | 164 | unsigned int reserved_tags, unsigned int flags, int node); |
bebe84eb | 165 | void blk_mq_free_tags(struct blk_mq_tags *tags); |
bebe84eb CH |
166 | |
167 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); | |
168 | unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, | |
169 | unsigned int *offset); | |
170 | void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, | |
171 | unsigned int tag); | |
172 | void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); | |
173 | int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, | |
174 | struct blk_mq_tags **tags, unsigned int depth, bool can_grow); | |
175 | void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, | |
176 | unsigned int size); | |
177 | void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); | |
178 | ||
179 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); | |
180 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, | |
181 | void *priv); | |
182 | void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, | |
183 | void *priv); | |
184 | ||
185 | static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt, | |
186 | struct blk_mq_hw_ctx *hctx) | |
187 | { | |
188 | if (!hctx) | |
189 | return &bt->ws[0]; | |
190 | return sbq_wait_ptr(bt, &hctx->wait_index); | |
191 | } | |
192 | ||
193 | void __blk_mq_tag_busy(struct blk_mq_hw_ctx *); | |
194 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); | |
195 | ||
196 | static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) | |
197 | { | |
198 | if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) | |
199 | __blk_mq_tag_busy(hctx); | |
200 | } | |
201 | ||
202 | static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) | |
203 | { | |
204 | if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) | |
205 | __blk_mq_tag_idle(hctx); | |
206 | } | |
207 | ||
208 | static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, | |
209 | unsigned int tag) | |
210 | { | |
211 | return tag < tags->nr_reserved_tags; | |
212 | } | |
213 | ||
079a2e3e | 214 | static inline bool blk_mq_is_shared_tags(unsigned int flags) |
32bc15af JG |
215 | { |
216 | return flags & BLK_MQ_F_TAG_HCTX_SHARED; | |
217 | } | |
218 | ||
4941115b JA |
219 | static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) |
220 | { | |
dd6216bb CH |
221 | if (data->rq_flags & RQF_SCHED_TAGS) |
222 | return data->hctx->sched_tags; | |
223 | return data->hctx->tags; | |
4941115b JA |
224 | } |
225 | ||
5d1b25c1 BVA |
226 | static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) |
227 | { | |
96a9fe64 MS |
228 | /* Fast path: hardware queue is not stopped most of the time. */ |
229 | if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state))) | |
230 | return false; | |
231 | ||
232 | /* | |
233 | * This barrier is used to order adding of dispatch list before and | |
234 | * the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier | |
235 | * in blk_mq_start_stopped_hw_queue() so that dispatch code could | |
236 | * either see BLK_MQ_S_STOPPED is cleared or dispatch list is not | |
237 | * empty to avoid missing dispatching requests. | |
238 | */ | |
239 | smp_mb(); | |
240 | ||
5d1b25c1 BVA |
241 | return test_bit(BLK_MQ_S_STOPPED, &hctx->state); |
242 | } | |
243 | ||
19c66e59 ML |
244 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) |
245 | { | |
246 | return hctx->nr_ctx && hctx->tags; | |
247 | } | |
248 | ||
6b6c3a97 | 249 | void blk_mq_in_driver_rw(struct block_device *part, unsigned int inflight[2]); |
f299b7c7 | 250 | |
2a5a24aa ML |
251 | static inline void blk_mq_put_dispatch_budget(struct request_queue *q, |
252 | int budget_token) | |
de148297 | 253 | { |
de148297 | 254 | if (q->mq_ops->put_budget) |
2a5a24aa | 255 | q->mq_ops->put_budget(q, budget_token); |
de148297 ML |
256 | } |
257 | ||
2a5a24aa | 258 | static inline int blk_mq_get_dispatch_budget(struct request_queue *q) |
de148297 | 259 | { |
de148297 | 260 | if (q->mq_ops->get_budget) |
65c76369 | 261 | return q->mq_ops->get_budget(q); |
2a5a24aa ML |
262 | return 0; |
263 | } | |
264 | ||
265 | static inline void blk_mq_set_rq_budget_token(struct request *rq, int token) | |
266 | { | |
267 | if (token < 0) | |
268 | return; | |
269 | ||
270 | if (rq->q->mq_ops->set_rq_budget_token) | |
271 | rq->q->mq_ops->set_rq_budget_token(rq, token); | |
272 | } | |
273 | ||
274 | static inline int blk_mq_get_rq_budget_token(struct request *rq) | |
275 | { | |
276 | if (rq->q->mq_ops->get_rq_budget_token) | |
277 | return rq->q->mq_ops->get_rq_budget_token(rq); | |
278 | return -1; | |
de148297 ML |
279 | } |
280 | ||
b8643d68 CZ |
281 | static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx, |
282 | int val) | |
bccf5e26 | 283 | { |
079a2e3e | 284 | if (blk_mq_is_shared_tags(hctx->flags)) |
b8643d68 | 285 | atomic_add(val, &hctx->queue->nr_active_requests_shared_tags); |
bccf5e26 | 286 | else |
b8643d68 CZ |
287 | atomic_add(val, &hctx->nr_active); |
288 | } | |
289 | ||
290 | static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) | |
291 | { | |
292 | __blk_mq_add_active_requests(hctx, 1); | |
bccf5e26 JG |
293 | } |
294 | ||
3b87c6ea ML |
295 | static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx, |
296 | int val) | |
bccf5e26 | 297 | { |
079a2e3e | 298 | if (blk_mq_is_shared_tags(hctx->flags)) |
3b87c6ea | 299 | atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); |
bccf5e26 | 300 | else |
3b87c6ea ML |
301 | atomic_sub(val, &hctx->nr_active); |
302 | } | |
303 | ||
304 | static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) | |
305 | { | |
306 | __blk_mq_sub_active_requests(hctx, 1); | |
bccf5e26 JG |
307 | } |
308 | ||
b8643d68 CZ |
309 | static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx, |
310 | int val) | |
311 | { | |
312 | if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) | |
313 | __blk_mq_add_active_requests(hctx, val); | |
314 | } | |
315 | ||
316 | static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) | |
317 | { | |
318 | if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) | |
319 | __blk_mq_inc_active_requests(hctx); | |
320 | } | |
321 | ||
322 | static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx, | |
323 | int val) | |
324 | { | |
325 | if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) | |
326 | __blk_mq_sub_active_requests(hctx, val); | |
327 | } | |
328 | ||
329 | static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) | |
330 | { | |
331 | if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) | |
332 | __blk_mq_dec_active_requests(hctx); | |
333 | } | |
334 | ||
bccf5e26 JG |
335 | static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) |
336 | { | |
079a2e3e JG |
337 | if (blk_mq_is_shared_tags(hctx->flags)) |
338 | return atomic_read(&hctx->queue->nr_active_requests_shared_tags); | |
bccf5e26 JG |
339 | return atomic_read(&hctx->nr_active); |
340 | } | |
4e2f62e5 JA |
341 | static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, |
342 | struct request *rq) | |
343 | { | |
b8643d68 | 344 | blk_mq_dec_active_requests(hctx); |
4e2f62e5 JA |
345 | blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); |
346 | rq->tag = BLK_MQ_NO_TAG; | |
4e2f62e5 JA |
347 | } |
348 | ||
349 | static inline void blk_mq_put_driver_tag(struct request *rq) | |
350 | { | |
351 | if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) | |
352 | return; | |
353 | ||
354 | __blk_mq_put_driver_tag(rq->mq_hctx, rq); | |
355 | } | |
356 | ||
b8643d68 | 357 | bool __blk_mq_alloc_driver_tag(struct request *rq); |
a808a9d5 JA |
358 | |
359 | static inline bool blk_mq_get_driver_tag(struct request *rq) | |
360 | { | |
b8643d68 CZ |
361 | if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) |
362 | return false; | |
a808a9d5 | 363 | |
b8643d68 | 364 | return true; |
a808a9d5 | 365 | } |
61347154 | 366 | |
ed76e329 | 367 | static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) |
0da73d00 MI |
368 | { |
369 | int cpu; | |
370 | ||
371 | for_each_possible_cpu(cpu) | |
ed76e329 | 372 | qmap->mq_map[cpu] = 0; |
0da73d00 MI |
373 | } |
374 | ||
fd2ef39c JK |
375 | /* Free all requests on the list */ |
376 | static inline void blk_mq_free_requests(struct list_head *list) | |
377 | { | |
378 | while (!list_empty(list)) { | |
379 | struct request *rq = list_entry_rq(list->next); | |
380 | ||
381 | list_del_init(&rq->queuelist); | |
382 | blk_mq_free_request(rq); | |
383 | } | |
384 | } | |
385 | ||
a0235d23 JG |
386 | /* |
387 | * For shared tag users, we track the number of currently active users | |
388 | * and attempt to provide a fair share of the tag depth for each of them. | |
389 | */ | |
390 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, | |
391 | struct sbitmap_queue *bt) | |
392 | { | |
393 | unsigned int depth, users; | |
394 | ||
395 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) | |
396 | return true; | |
a0235d23 JG |
397 | |
398 | /* | |
399 | * Don't try dividing an ant | |
400 | */ | |
401 | if (bt->sb.depth == 1) | |
402 | return true; | |
403 | ||
079a2e3e | 404 | if (blk_mq_is_shared_tags(hctx->flags)) { |
f1b49fdc | 405 | struct request_queue *q = hctx->queue; |
f1b49fdc | 406 | |
2569063c | 407 | if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) |
f1b49fdc | 408 | return true; |
f1b49fdc JG |
409 | } else { |
410 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
411 | return true; | |
f1b49fdc JG |
412 | } |
413 | ||
4f1731df | 414 | users = READ_ONCE(hctx->tags->active_queues); |
a0235d23 JG |
415 | if (!users) |
416 | return true; | |
417 | ||
418 | /* | |
419 | * Allow at least some tags | |
420 | */ | |
421 | depth = max((bt->sb.depth + users - 1) / users, 4U); | |
bccf5e26 | 422 | return __blk_mq_active_requests(hctx) < depth; |
a0235d23 JG |
423 | } |
424 | ||
2a904d00 | 425 | /* run the code block in @dispatch_ops with rcu/srcu read lock held */ |
41adf531 | 426 | #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \ |
2a904d00 | 427 | do { \ |
80bd4a7a | 428 | if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \ |
00e885ef | 429 | struct blk_mq_tag_set *__tag_set = (q)->tag_set; \ |
2a904d00 ML |
430 | int srcu_idx; \ |
431 | \ | |
41adf531 | 432 | might_sleep_if(check_sleep); \ |
00e885ef | 433 | srcu_idx = srcu_read_lock(__tag_set->srcu); \ |
2a904d00 | 434 | (dispatch_ops); \ |
00e885ef | 435 | srcu_read_unlock(__tag_set->srcu, srcu_idx); \ |
80bd4a7a CH |
436 | } else { \ |
437 | rcu_read_lock(); \ | |
438 | (dispatch_ops); \ | |
439 | rcu_read_unlock(); \ | |
2a904d00 ML |
440 | } \ |
441 | } while (0) | |
a0235d23 | 442 | |
41adf531 ML |
443 | #define blk_mq_run_dispatch_ops(q, dispatch_ops) \ |
444 | __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \ | |
445 | ||
d432c817 CH |
446 | static inline bool blk_mq_can_poll(struct request_queue *q) |
447 | { | |
448 | return (q->limits.features & BLK_FEAT_POLL) && | |
449 | q->tag_set->map[HCTX_TYPE_POLL].nr_queues; | |
450 | } | |
451 | ||
320ae51f | 452 | #endif |