Commit | Line | Data |
---|---|---|
db074436 DW |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2010 Red Hat, Inc. | |
4 | * Copyright (c) 2016-2018 Christoph Hellwig. | |
5 | */ | |
6 | #include <linux/module.h> | |
7 | #include <linux/compiler.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/iomap.h> | |
10 | #include <linux/backing-dev.h> | |
11 | #include <linux/uio.h> | |
12 | #include <linux/task_io_accounting_ops.h> | |
13 | ||
14 | #include "../internal.h" | |
15 | ||
16 | /* | |
17 | * Private flags for iomap_dio, must not overlap with the public ones in | |
18 | * iomap.h: | |
19 | */ | |
20 | #define IOMAP_DIO_WRITE_FUA (1 << 28) | |
21 | #define IOMAP_DIO_NEED_SYNC (1 << 29) | |
22 | #define IOMAP_DIO_WRITE (1 << 30) | |
23 | #define IOMAP_DIO_DIRTY (1 << 31) | |
24 | ||
25 | struct iomap_dio { | |
26 | struct kiocb *iocb; | |
838c4f3d | 27 | const struct iomap_dio_ops *dops; |
db074436 DW |
28 | loff_t i_size; |
29 | loff_t size; | |
30 | atomic_t ref; | |
31 | unsigned flags; | |
32 | int error; | |
33 | bool wait_for_completion; | |
34 | ||
35 | union { | |
36 | /* used during submission and for synchronous completion: */ | |
37 | struct { | |
38 | struct iov_iter *iter; | |
39 | struct task_struct *waiter; | |
40 | struct request_queue *last_queue; | |
41 | blk_qc_t cookie; | |
42 | } submit; | |
43 | ||
44 | /* used for aio completion: */ | |
45 | struct { | |
46 | struct work_struct work; | |
47 | } aio; | |
48 | }; | |
49 | }; | |
50 | ||
51 | int iomap_dio_iopoll(struct kiocb *kiocb, bool spin) | |
52 | { | |
53 | struct request_queue *q = READ_ONCE(kiocb->private); | |
54 | ||
55 | if (!q) | |
56 | return 0; | |
57 | return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin); | |
58 | } | |
59 | EXPORT_SYMBOL_GPL(iomap_dio_iopoll); | |
60 | ||
61 | static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap, | |
62 | struct bio *bio) | |
63 | { | |
64 | atomic_inc(&dio->ref); | |
65 | ||
66 | if (dio->iocb->ki_flags & IOCB_HIPRI) | |
67 | bio_set_polled(bio, dio->iocb); | |
68 | ||
69 | dio->submit.last_queue = bdev_get_queue(iomap->bdev); | |
70 | dio->submit.cookie = submit_bio(bio); | |
71 | } | |
72 | ||
73 | static ssize_t iomap_dio_complete(struct iomap_dio *dio) | |
74 | { | |
838c4f3d | 75 | const struct iomap_dio_ops *dops = dio->dops; |
db074436 DW |
76 | struct kiocb *iocb = dio->iocb; |
77 | struct inode *inode = file_inode(iocb->ki_filp); | |
78 | loff_t offset = iocb->ki_pos; | |
838c4f3d | 79 | ssize_t ret = dio->error; |
db074436 | 80 | |
838c4f3d CH |
81 | if (dops && dops->end_io) |
82 | ret = dops->end_io(iocb, dio->size, ret, dio->flags); | |
db074436 DW |
83 | |
84 | if (likely(!ret)) { | |
85 | ret = dio->size; | |
86 | /* check for short read */ | |
87 | if (offset + ret > dio->i_size && | |
88 | !(dio->flags & IOMAP_DIO_WRITE)) | |
89 | ret = dio->i_size - offset; | |
90 | iocb->ki_pos += ret; | |
91 | } | |
92 | ||
93 | /* | |
94 | * Try again to invalidate clean pages which might have been cached by | |
95 | * non-direct readahead, or faulted in by get_user_pages() if the source | |
96 | * of the write was an mmap'ed region of the file we're writing. Either | |
97 | * one is a pretty crazy thing to do, so we don't support it 100%. If | |
98 | * this invalidation fails, tough, the write still worked... | |
99 | * | |
838c4f3d CH |
100 | * And this page cache invalidation has to be after ->end_io(), as some |
101 | * filesystems convert unwritten extents to real allocations in | |
102 | * ->end_io() when necessary, otherwise a racing buffer read would cache | |
db074436 DW |
103 | * zeros from unwritten extents. |
104 | */ | |
105 | if (!dio->error && | |
106 | (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { | |
107 | int err; | |
108 | err = invalidate_inode_pages2_range(inode->i_mapping, | |
109 | offset >> PAGE_SHIFT, | |
110 | (offset + dio->size - 1) >> PAGE_SHIFT); | |
111 | if (err) | |
112 | dio_warn_stale_pagecache(iocb->ki_filp); | |
113 | } | |
114 | ||
115 | /* | |
116 | * If this is a DSYNC write, make sure we push it to stable storage now | |
117 | * that we've written data. | |
118 | */ | |
119 | if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) | |
120 | ret = generic_write_sync(iocb, ret); | |
121 | ||
122 | inode_dio_end(file_inode(iocb->ki_filp)); | |
123 | kfree(dio); | |
124 | ||
125 | return ret; | |
126 | } | |
127 | ||
128 | static void iomap_dio_complete_work(struct work_struct *work) | |
129 | { | |
130 | struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); | |
131 | struct kiocb *iocb = dio->iocb; | |
132 | ||
133 | iocb->ki_complete(iocb, iomap_dio_complete(dio), 0); | |
134 | } | |
135 | ||
136 | /* | |
137 | * Set an error in the dio if none is set yet. We have to use cmpxchg | |
138 | * as the submission context and the completion context(s) can race to | |
139 | * update the error. | |
140 | */ | |
141 | static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) | |
142 | { | |
143 | cmpxchg(&dio->error, 0, ret); | |
144 | } | |
145 | ||
146 | static void iomap_dio_bio_end_io(struct bio *bio) | |
147 | { | |
148 | struct iomap_dio *dio = bio->bi_private; | |
149 | bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); | |
150 | ||
151 | if (bio->bi_status) | |
152 | iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); | |
153 | ||
154 | if (atomic_dec_and_test(&dio->ref)) { | |
155 | if (dio->wait_for_completion) { | |
156 | struct task_struct *waiter = dio->submit.waiter; | |
157 | WRITE_ONCE(dio->submit.waiter, NULL); | |
158 | blk_wake_io_task(waiter); | |
159 | } else if (dio->flags & IOMAP_DIO_WRITE) { | |
160 | struct inode *inode = file_inode(dio->iocb->ki_filp); | |
161 | ||
162 | INIT_WORK(&dio->aio.work, iomap_dio_complete_work); | |
163 | queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); | |
164 | } else { | |
165 | iomap_dio_complete_work(&dio->aio.work); | |
166 | } | |
167 | } | |
168 | ||
169 | if (should_dirty) { | |
170 | bio_check_pages_dirty(bio); | |
171 | } else { | |
172 | bio_release_pages(bio, false); | |
173 | bio_put(bio); | |
174 | } | |
175 | } | |
176 | ||
177 | static void | |
178 | iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, | |
179 | unsigned len) | |
180 | { | |
181 | struct page *page = ZERO_PAGE(0); | |
182 | int flags = REQ_SYNC | REQ_IDLE; | |
183 | struct bio *bio; | |
184 | ||
185 | bio = bio_alloc(GFP_KERNEL, 1); | |
186 | bio_set_dev(bio, iomap->bdev); | |
187 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); | |
188 | bio->bi_private = dio; | |
189 | bio->bi_end_io = iomap_dio_bio_end_io; | |
190 | ||
191 | get_page(page); | |
192 | __bio_add_page(bio, page, len, 0); | |
193 | bio_set_op_attrs(bio, REQ_OP_WRITE, flags); | |
194 | iomap_dio_submit_bio(dio, iomap, bio); | |
195 | } | |
196 | ||
197 | static loff_t | |
198 | iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, | |
199 | struct iomap_dio *dio, struct iomap *iomap) | |
200 | { | |
201 | unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); | |
202 | unsigned int fs_block_size = i_blocksize(inode), pad; | |
203 | unsigned int align = iov_iter_alignment(dio->submit.iter); | |
204 | struct iov_iter iter; | |
205 | struct bio *bio; | |
206 | bool need_zeroout = false; | |
207 | bool use_fua = false; | |
208 | int nr_pages, ret = 0; | |
209 | size_t copied = 0; | |
210 | ||
211 | if ((pos | length | align) & ((1 << blkbits) - 1)) | |
212 | return -EINVAL; | |
213 | ||
214 | if (iomap->type == IOMAP_UNWRITTEN) { | |
215 | dio->flags |= IOMAP_DIO_UNWRITTEN; | |
216 | need_zeroout = true; | |
217 | } | |
218 | ||
219 | if (iomap->flags & IOMAP_F_SHARED) | |
220 | dio->flags |= IOMAP_DIO_COW; | |
221 | ||
222 | if (iomap->flags & IOMAP_F_NEW) { | |
223 | need_zeroout = true; | |
224 | } else if (iomap->type == IOMAP_MAPPED) { | |
225 | /* | |
226 | * Use a FUA write if we need datasync semantics, this is a pure | |
227 | * data IO that doesn't require any metadata updates (including | |
228 | * after IO completion such as unwritten extent conversion) and | |
229 | * the underlying device supports FUA. This allows us to avoid | |
230 | * cache flushes on IO completion. | |
231 | */ | |
232 | if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && | |
233 | (dio->flags & IOMAP_DIO_WRITE_FUA) && | |
234 | blk_queue_fua(bdev_get_queue(iomap->bdev))) | |
235 | use_fua = true; | |
236 | } | |
237 | ||
238 | /* | |
239 | * Operate on a partial iter trimmed to the extent we were called for. | |
240 | * We'll update the iter in the dio once we're done with this extent. | |
241 | */ | |
242 | iter = *dio->submit.iter; | |
243 | iov_iter_truncate(&iter, length); | |
244 | ||
245 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); | |
246 | if (nr_pages <= 0) | |
247 | return nr_pages; | |
248 | ||
249 | if (need_zeroout) { | |
250 | /* zero out from the start of the block to the write offset */ | |
251 | pad = pos & (fs_block_size - 1); | |
252 | if (pad) | |
253 | iomap_dio_zero(dio, iomap, pos - pad, pad); | |
254 | } | |
255 | ||
256 | do { | |
257 | size_t n; | |
258 | if (dio->error) { | |
259 | iov_iter_revert(dio->submit.iter, copied); | |
260 | return 0; | |
261 | } | |
262 | ||
263 | bio = bio_alloc(GFP_KERNEL, nr_pages); | |
264 | bio_set_dev(bio, iomap->bdev); | |
265 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); | |
266 | bio->bi_write_hint = dio->iocb->ki_hint; | |
267 | bio->bi_ioprio = dio->iocb->ki_ioprio; | |
268 | bio->bi_private = dio; | |
269 | bio->bi_end_io = iomap_dio_bio_end_io; | |
270 | ||
271 | ret = bio_iov_iter_get_pages(bio, &iter); | |
272 | if (unlikely(ret)) { | |
273 | /* | |
274 | * We have to stop part way through an IO. We must fall | |
275 | * through to the sub-block tail zeroing here, otherwise | |
276 | * this short IO may expose stale data in the tail of | |
277 | * the block we haven't written data to. | |
278 | */ | |
279 | bio_put(bio); | |
280 | goto zero_tail; | |
281 | } | |
282 | ||
283 | n = bio->bi_iter.bi_size; | |
284 | if (dio->flags & IOMAP_DIO_WRITE) { | |
285 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; | |
286 | if (use_fua) | |
287 | bio->bi_opf |= REQ_FUA; | |
288 | else | |
289 | dio->flags &= ~IOMAP_DIO_WRITE_FUA; | |
290 | task_io_account_write(n); | |
291 | } else { | |
292 | bio->bi_opf = REQ_OP_READ; | |
293 | if (dio->flags & IOMAP_DIO_DIRTY) | |
294 | bio_set_pages_dirty(bio); | |
295 | } | |
296 | ||
297 | iov_iter_advance(dio->submit.iter, n); | |
298 | ||
299 | dio->size += n; | |
300 | pos += n; | |
301 | copied += n; | |
302 | ||
303 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); | |
304 | iomap_dio_submit_bio(dio, iomap, bio); | |
305 | } while (nr_pages); | |
306 | ||
307 | /* | |
308 | * We need to zeroout the tail of a sub-block write if the extent type | |
309 | * requires zeroing or the write extends beyond EOF. If we don't zero | |
310 | * the block tail in the latter case, we can expose stale data via mmap | |
311 | * reads of the EOF block. | |
312 | */ | |
313 | zero_tail: | |
314 | if (need_zeroout || | |
315 | ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { | |
316 | /* zero out from the end of the write to the end of the block */ | |
317 | pad = pos & (fs_block_size - 1); | |
318 | if (pad) | |
319 | iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); | |
320 | } | |
321 | return copied ? copied : ret; | |
322 | } | |
323 | ||
324 | static loff_t | |
325 | iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio) | |
326 | { | |
327 | length = iov_iter_zero(length, dio->submit.iter); | |
328 | dio->size += length; | |
329 | return length; | |
330 | } | |
331 | ||
332 | static loff_t | |
333 | iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length, | |
334 | struct iomap_dio *dio, struct iomap *iomap) | |
335 | { | |
336 | struct iov_iter *iter = dio->submit.iter; | |
337 | size_t copied; | |
338 | ||
339 | BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data)); | |
340 | ||
341 | if (dio->flags & IOMAP_DIO_WRITE) { | |
342 | loff_t size = inode->i_size; | |
343 | ||
344 | if (pos > size) | |
345 | memset(iomap->inline_data + size, 0, pos - size); | |
346 | copied = copy_from_iter(iomap->inline_data + pos, length, iter); | |
347 | if (copied) { | |
348 | if (pos + copied > size) | |
349 | i_size_write(inode, pos + copied); | |
350 | mark_inode_dirty(inode); | |
351 | } | |
352 | } else { | |
353 | copied = copy_to_iter(iomap->inline_data + pos, length, iter); | |
354 | } | |
355 | dio->size += copied; | |
356 | return copied; | |
357 | } | |
358 | ||
359 | static loff_t | |
360 | iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, | |
c039b997 | 361 | void *data, struct iomap *iomap, struct iomap *srcmap) |
db074436 DW |
362 | { |
363 | struct iomap_dio *dio = data; | |
364 | ||
365 | switch (iomap->type) { | |
366 | case IOMAP_HOLE: | |
367 | if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) | |
368 | return -EIO; | |
369 | return iomap_dio_hole_actor(length, dio); | |
370 | case IOMAP_UNWRITTEN: | |
371 | if (!(dio->flags & IOMAP_DIO_WRITE)) | |
372 | return iomap_dio_hole_actor(length, dio); | |
373 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); | |
374 | case IOMAP_MAPPED: | |
375 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); | |
376 | case IOMAP_INLINE: | |
377 | return iomap_dio_inline_actor(inode, pos, length, dio, iomap); | |
378 | default: | |
379 | WARN_ON_ONCE(1); | |
380 | return -EIO; | |
381 | } | |
382 | } | |
383 | ||
384 | /* | |
385 | * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO | |
386 | * is being issued as AIO or not. This allows us to optimise pure data writes | |
387 | * to use REQ_FUA rather than requiring generic_write_sync() to issue a | |
388 | * REQ_FLUSH post write. This is slightly tricky because a single request here | |
389 | * can be mapped into multiple disjoint IOs and only a subset of the IOs issued | |
390 | * may be pure data writes. In that case, we still need to do a full data sync | |
391 | * completion. | |
392 | */ | |
393 | ssize_t | |
394 | iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |
13ef9544 JK |
395 | const struct iomap_ops *ops, const struct iomap_dio_ops *dops, |
396 | bool wait_for_completion) | |
db074436 DW |
397 | { |
398 | struct address_space *mapping = iocb->ki_filp->f_mapping; | |
399 | struct inode *inode = file_inode(iocb->ki_filp); | |
400 | size_t count = iov_iter_count(iter); | |
401 | loff_t pos = iocb->ki_pos, start = pos; | |
402 | loff_t end = iocb->ki_pos + count - 1, ret = 0; | |
403 | unsigned int flags = IOMAP_DIRECT; | |
db074436 DW |
404 | struct blk_plug plug; |
405 | struct iomap_dio *dio; | |
406 | ||
407 | lockdep_assert_held(&inode->i_rwsem); | |
408 | ||
409 | if (!count) | |
410 | return 0; | |
411 | ||
13ef9544 JK |
412 | if (WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion)) |
413 | return -EIO; | |
414 | ||
db074436 DW |
415 | dio = kmalloc(sizeof(*dio), GFP_KERNEL); |
416 | if (!dio) | |
417 | return -ENOMEM; | |
418 | ||
419 | dio->iocb = iocb; | |
420 | atomic_set(&dio->ref, 1); | |
421 | dio->size = 0; | |
422 | dio->i_size = i_size_read(inode); | |
838c4f3d | 423 | dio->dops = dops; |
db074436 DW |
424 | dio->error = 0; |
425 | dio->flags = 0; | |
426 | ||
427 | dio->submit.iter = iter; | |
428 | dio->submit.waiter = current; | |
429 | dio->submit.cookie = BLK_QC_T_NONE; | |
430 | dio->submit.last_queue = NULL; | |
431 | ||
432 | if (iov_iter_rw(iter) == READ) { | |
433 | if (pos >= dio->i_size) | |
434 | goto out_free_dio; | |
435 | ||
a9010042 | 436 | if (iter_is_iovec(iter)) |
db074436 DW |
437 | dio->flags |= IOMAP_DIO_DIRTY; |
438 | } else { | |
439 | flags |= IOMAP_WRITE; | |
440 | dio->flags |= IOMAP_DIO_WRITE; | |
441 | ||
442 | /* for data sync or sync, we need sync completion processing */ | |
443 | if (iocb->ki_flags & IOCB_DSYNC) | |
444 | dio->flags |= IOMAP_DIO_NEED_SYNC; | |
445 | ||
446 | /* | |
447 | * For datasync only writes, we optimistically try using FUA for | |
448 | * this IO. Any non-FUA write that occurs will clear this flag, | |
449 | * hence we know before completion whether a cache flush is | |
450 | * necessary. | |
451 | */ | |
452 | if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC) | |
453 | dio->flags |= IOMAP_DIO_WRITE_FUA; | |
454 | } | |
455 | ||
456 | if (iocb->ki_flags & IOCB_NOWAIT) { | |
457 | if (filemap_range_has_page(mapping, start, end)) { | |
458 | ret = -EAGAIN; | |
459 | goto out_free_dio; | |
460 | } | |
461 | flags |= IOMAP_NOWAIT; | |
462 | } | |
463 | ||
464 | ret = filemap_write_and_wait_range(mapping, start, end); | |
465 | if (ret) | |
466 | goto out_free_dio; | |
467 | ||
468 | /* | |
469 | * Try to invalidate cache pages for the range we're direct | |
470 | * writing. If this invalidation fails, tough, the write will | |
471 | * still work, but racing two incompatible write paths is a | |
472 | * pretty crazy thing to do, so we don't support it 100%. | |
473 | */ | |
474 | ret = invalidate_inode_pages2_range(mapping, | |
475 | start >> PAGE_SHIFT, end >> PAGE_SHIFT); | |
476 | if (ret) | |
477 | dio_warn_stale_pagecache(iocb->ki_filp); | |
478 | ret = 0; | |
479 | ||
480 | if (iov_iter_rw(iter) == WRITE && !wait_for_completion && | |
481 | !inode->i_sb->s_dio_done_wq) { | |
482 | ret = sb_init_dio_done_wq(inode->i_sb); | |
483 | if (ret < 0) | |
484 | goto out_free_dio; | |
485 | } | |
486 | ||
487 | inode_dio_begin(inode); | |
488 | ||
489 | blk_start_plug(&plug); | |
490 | do { | |
491 | ret = iomap_apply(inode, pos, count, flags, ops, dio, | |
492 | iomap_dio_actor); | |
493 | if (ret <= 0) { | |
494 | /* magic error code to fall back to buffered I/O */ | |
495 | if (ret == -ENOTBLK) { | |
496 | wait_for_completion = true; | |
497 | ret = 0; | |
498 | } | |
499 | break; | |
500 | } | |
501 | pos += ret; | |
502 | ||
503 | if (iov_iter_rw(iter) == READ && pos >= dio->i_size) | |
504 | break; | |
505 | } while ((count = iov_iter_count(iter)) > 0); | |
506 | blk_finish_plug(&plug); | |
507 | ||
508 | if (ret < 0) | |
509 | iomap_dio_set_error(dio, ret); | |
510 | ||
511 | /* | |
512 | * If all the writes we issued were FUA, we don't need to flush the | |
513 | * cache on IO completion. Clear the sync flag for this case. | |
514 | */ | |
515 | if (dio->flags & IOMAP_DIO_WRITE_FUA) | |
516 | dio->flags &= ~IOMAP_DIO_NEED_SYNC; | |
517 | ||
518 | WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie); | |
519 | WRITE_ONCE(iocb->private, dio->submit.last_queue); | |
520 | ||
521 | /* | |
522 | * We are about to drop our additional submission reference, which | |
523 | * might be the last reference to the dio. There are three three | |
524 | * different ways we can progress here: | |
525 | * | |
526 | * (a) If this is the last reference we will always complete and free | |
527 | * the dio ourselves. | |
528 | * (b) If this is not the last reference, and we serve an asynchronous | |
529 | * iocb, we must never touch the dio after the decrement, the | |
530 | * I/O completion handler will complete and free it. | |
531 | * (c) If this is not the last reference, but we serve a synchronous | |
532 | * iocb, the I/O completion handler will wake us up on the drop | |
533 | * of the final reference, and we will complete and free it here | |
534 | * after we got woken by the I/O completion handler. | |
535 | */ | |
536 | dio->wait_for_completion = wait_for_completion; | |
537 | if (!atomic_dec_and_test(&dio->ref)) { | |
538 | if (!wait_for_completion) | |
539 | return -EIOCBQUEUED; | |
540 | ||
541 | for (;;) { | |
542 | set_current_state(TASK_UNINTERRUPTIBLE); | |
543 | if (!READ_ONCE(dio->submit.waiter)) | |
544 | break; | |
545 | ||
546 | if (!(iocb->ki_flags & IOCB_HIPRI) || | |
547 | !dio->submit.last_queue || | |
548 | !blk_poll(dio->submit.last_queue, | |
549 | dio->submit.cookie, true)) | |
550 | io_schedule(); | |
551 | } | |
552 | __set_current_state(TASK_RUNNING); | |
553 | } | |
554 | ||
555 | return iomap_dio_complete(dio); | |
556 | ||
557 | out_free_dio: | |
558 | kfree(dio); | |
559 | return ret; | |
560 | } | |
561 | EXPORT_SYMBOL_GPL(iomap_dio_rw); |