Commit | Line | Data |
---|---|---|
ae259a9c CH |
1 | /* |
2 | * Copyright (C) 2010 Red Hat, Inc. | |
72b4daa2 | 3 | * Copyright (c) 2016-2018 Christoph Hellwig. |
ae259a9c CH |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | #include <linux/module.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/iomap.h> | |
18 | #include <linux/uaccess.h> | |
19 | #include <linux/gfp.h> | |
9dc55f13 | 20 | #include <linux/migrate.h> |
ae259a9c | 21 | #include <linux/mm.h> |
72b4daa2 | 22 | #include <linux/mm_inline.h> |
ae259a9c CH |
23 | #include <linux/swap.h> |
24 | #include <linux/pagemap.h> | |
8a78cb1f | 25 | #include <linux/pagevec.h> |
ae259a9c CH |
26 | #include <linux/file.h> |
27 | #include <linux/uio.h> | |
28 | #include <linux/backing-dev.h> | |
29 | #include <linux/buffer_head.h> | |
ff6a9292 | 30 | #include <linux/task_io_accounting_ops.h> |
9a286f0e | 31 | #include <linux/dax.h> |
f361bf4a IM |
32 | #include <linux/sched/signal.h> |
33 | ||
ae259a9c CH |
34 | #include "internal.h" |
35 | ||
ae259a9c CH |
36 | /* |
37 | * Execute a iomap write on a segment of the mapping that spans a | |
38 | * contiguous range of pages that have identical block mapping state. | |
39 | * | |
40 | * This avoids the need to map pages individually, do individual allocations | |
41 | * for each page and most importantly avoid the need for filesystem specific | |
42 | * locking per page. Instead, all the operations are amortised over the entire | |
43 | * range of pages. It is assumed that the filesystems will lock whatever | |
44 | * resources they require in the iomap_begin call, and release them in the | |
45 | * iomap_end call. | |
46 | */ | |
befb503c | 47 | loff_t |
ae259a9c | 48 | iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, |
8ff6daa1 | 49 | const struct iomap_ops *ops, void *data, iomap_actor_t actor) |
ae259a9c CH |
50 | { |
51 | struct iomap iomap = { 0 }; | |
52 | loff_t written = 0, ret; | |
53 | ||
54 | /* | |
55 | * Need to map a range from start position for length bytes. This can | |
56 | * span multiple pages - it is only guaranteed to return a range of a | |
57 | * single type of pages (e.g. all into a hole, all mapped or all | |
58 | * unwritten). Failure at this point has nothing to undo. | |
59 | * | |
60 | * If allocation is required for this range, reserve the space now so | |
61 | * that the allocation is guaranteed to succeed later on. Once we copy | |
62 | * the data into the page cache pages, then we cannot fail otherwise we | |
63 | * expose transient stale data. If the reserve fails, we can safely | |
64 | * back out at this point as there is nothing to undo. | |
65 | */ | |
66 | ret = ops->iomap_begin(inode, pos, length, flags, &iomap); | |
67 | if (ret) | |
68 | return ret; | |
69 | if (WARN_ON(iomap.offset > pos)) | |
70 | return -EIO; | |
0c6dda7a DW |
71 | if (WARN_ON(iomap.length == 0)) |
72 | return -EIO; | |
ae259a9c CH |
73 | |
74 | /* | |
75 | * Cut down the length to the one actually provided by the filesystem, | |
76 | * as it might not be able to give us the whole size that we requested. | |
77 | */ | |
78 | if (iomap.offset + iomap.length < pos + length) | |
79 | length = iomap.offset + iomap.length - pos; | |
80 | ||
81 | /* | |
82 | * Now that we have guaranteed that the space allocation will succeed. | |
83 | * we can do the copy-in page by page without having to worry about | |
84 | * failures exposing transient data. | |
85 | */ | |
86 | written = actor(inode, pos, length, data, &iomap); | |
87 | ||
88 | /* | |
89 | * Now the data has been copied, commit the range we've copied. This | |
90 | * should not fail unless the filesystem has had a fatal error. | |
91 | */ | |
f20ac7ab CH |
92 | if (ops->iomap_end) { |
93 | ret = ops->iomap_end(inode, pos, length, | |
94 | written > 0 ? written : 0, | |
95 | flags, &iomap); | |
96 | } | |
ae259a9c CH |
97 | |
98 | return written ? written : ret; | |
99 | } | |
100 | ||
57fc505d CH |
101 | static sector_t |
102 | iomap_sector(struct iomap *iomap, loff_t pos) | |
103 | { | |
104 | return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; | |
105 | } | |
106 | ||
9dc55f13 CH |
107 | static struct iomap_page * |
108 | iomap_page_create(struct inode *inode, struct page *page) | |
109 | { | |
110 | struct iomap_page *iop = to_iomap_page(page); | |
111 | ||
112 | if (iop || i_blocksize(inode) == PAGE_SIZE) | |
113 | return iop; | |
114 | ||
115 | iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); | |
116 | atomic_set(&iop->read_count, 0); | |
117 | atomic_set(&iop->write_count, 0); | |
118 | bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); | |
119 | set_page_private(page, (unsigned long)iop); | |
120 | SetPagePrivate(page); | |
121 | return iop; | |
122 | } | |
123 | ||
124 | static void | |
125 | iomap_page_release(struct page *page) | |
126 | { | |
127 | struct iomap_page *iop = to_iomap_page(page); | |
128 | ||
129 | if (!iop) | |
130 | return; | |
131 | WARN_ON_ONCE(atomic_read(&iop->read_count)); | |
132 | WARN_ON_ONCE(atomic_read(&iop->write_count)); | |
133 | ClearPagePrivate(page); | |
134 | set_page_private(page, 0); | |
135 | kfree(iop); | |
136 | } | |
137 | ||
138 | /* | |
139 | * Calculate the range inside the page that we actually need to read. | |
140 | */ | |
141 | static void | |
142 | iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, | |
143 | loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) | |
144 | { | |
8c110d43 DC |
145 | loff_t orig_pos = *pos; |
146 | loff_t isize = i_size_read(inode); | |
9dc55f13 CH |
147 | unsigned block_bits = inode->i_blkbits; |
148 | unsigned block_size = (1 << block_bits); | |
10259de1 | 149 | unsigned poff = offset_in_page(*pos); |
9dc55f13 CH |
150 | unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); |
151 | unsigned first = poff >> block_bits; | |
152 | unsigned last = (poff + plen - 1) >> block_bits; | |
9dc55f13 CH |
153 | |
154 | /* | |
155 | * If the block size is smaller than the page size we need to check the | |
156 | * per-block uptodate status and adjust the offset and length if needed | |
157 | * to avoid reading in already uptodate ranges. | |
158 | */ | |
159 | if (iop) { | |
160 | unsigned int i; | |
161 | ||
162 | /* move forward for each leading block marked uptodate */ | |
163 | for (i = first; i <= last; i++) { | |
164 | if (!test_bit(i, iop->uptodate)) | |
165 | break; | |
166 | *pos += block_size; | |
167 | poff += block_size; | |
168 | plen -= block_size; | |
169 | first++; | |
170 | } | |
171 | ||
172 | /* truncate len if we find any trailing uptodate block(s) */ | |
173 | for ( ; i <= last; i++) { | |
174 | if (test_bit(i, iop->uptodate)) { | |
175 | plen -= (last - i + 1) * block_size; | |
176 | last = i - 1; | |
177 | break; | |
178 | } | |
179 | } | |
180 | } | |
181 | ||
182 | /* | |
183 | * If the extent spans the block that contains the i_size we need to | |
184 | * handle both halves separately so that we properly zero data in the | |
185 | * page cache for blocks that are entirely outside of i_size. | |
186 | */ | |
8c110d43 DC |
187 | if (orig_pos <= isize && orig_pos + length > isize) { |
188 | unsigned end = offset_in_page(isize - 1) >> block_bits; | |
189 | ||
190 | if (first <= end && last > end) | |
191 | plen -= (last - end) * block_size; | |
192 | } | |
9dc55f13 CH |
193 | |
194 | *offp = poff; | |
195 | *lenp = plen; | |
196 | } | |
197 | ||
198 | static void | |
199 | iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) | |
200 | { | |
201 | struct iomap_page *iop = to_iomap_page(page); | |
202 | struct inode *inode = page->mapping->host; | |
203 | unsigned first = off >> inode->i_blkbits; | |
204 | unsigned last = (off + len - 1) >> inode->i_blkbits; | |
205 | unsigned int i; | |
206 | bool uptodate = true; | |
207 | ||
208 | if (iop) { | |
209 | for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { | |
210 | if (i >= first && i <= last) | |
211 | set_bit(i, iop->uptodate); | |
212 | else if (!test_bit(i, iop->uptodate)) | |
213 | uptodate = false; | |
214 | } | |
215 | } | |
216 | ||
217 | if (uptodate && !PageError(page)) | |
218 | SetPageUptodate(page); | |
219 | } | |
220 | ||
221 | static void | |
222 | iomap_read_finish(struct iomap_page *iop, struct page *page) | |
223 | { | |
224 | if (!iop || atomic_dec_and_test(&iop->read_count)) | |
225 | unlock_page(page); | |
226 | } | |
227 | ||
228 | static void | |
229 | iomap_read_page_end_io(struct bio_vec *bvec, int error) | |
230 | { | |
231 | struct page *page = bvec->bv_page; | |
232 | struct iomap_page *iop = to_iomap_page(page); | |
233 | ||
234 | if (unlikely(error)) { | |
235 | ClearPageUptodate(page); | |
236 | SetPageError(page); | |
237 | } else { | |
238 | iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); | |
239 | } | |
240 | ||
241 | iomap_read_finish(iop, page); | |
242 | } | |
243 | ||
19e0c58f AG |
244 | static void |
245 | iomap_read_inline_data(struct inode *inode, struct page *page, | |
246 | struct iomap *iomap) | |
247 | { | |
248 | size_t size = i_size_read(inode); | |
249 | void *addr; | |
250 | ||
251 | if (PageUptodate(page)) | |
252 | return; | |
253 | ||
254 | BUG_ON(page->index); | |
255 | BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); | |
256 | ||
257 | addr = kmap_atomic(page); | |
258 | memcpy(addr, iomap->inline_data, size); | |
259 | memset(addr + size, 0, PAGE_SIZE - size); | |
260 | kunmap_atomic(addr); | |
261 | SetPageUptodate(page); | |
262 | } | |
263 | ||
ae259a9c | 264 | static void |
72b4daa2 CH |
265 | iomap_read_end_io(struct bio *bio) |
266 | { | |
267 | int error = blk_status_to_errno(bio->bi_status); | |
268 | struct bio_vec *bvec; | |
269 | int i; | |
270 | ||
271 | bio_for_each_segment_all(bvec, bio, i) | |
9dc55f13 | 272 | iomap_read_page_end_io(bvec, error); |
72b4daa2 CH |
273 | bio_put(bio); |
274 | } | |
275 | ||
276 | struct iomap_readpage_ctx { | |
277 | struct page *cur_page; | |
278 | bool cur_page_in_bio; | |
279 | bool is_readahead; | |
280 | struct bio *bio; | |
281 | struct list_head *pages; | |
282 | }; | |
283 | ||
284 | static loff_t | |
285 | iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
286 | struct iomap *iomap) | |
287 | { | |
288 | struct iomap_readpage_ctx *ctx = data; | |
289 | struct page *page = ctx->cur_page; | |
9dc55f13 | 290 | struct iomap_page *iop = iomap_page_create(inode, page); |
72b4daa2 | 291 | bool is_contig = false; |
9dc55f13 CH |
292 | loff_t orig_pos = pos; |
293 | unsigned poff, plen; | |
72b4daa2 CH |
294 | sector_t sector; |
295 | ||
806a1477 | 296 | if (iomap->type == IOMAP_INLINE) { |
7d5e049e | 297 | WARN_ON_ONCE(pos); |
806a1477 AG |
298 | iomap_read_inline_data(inode, page, iomap); |
299 | return PAGE_SIZE; | |
300 | } | |
301 | ||
9dc55f13 CH |
302 | /* zero post-eof blocks as the page may be mapped */ |
303 | iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen); | |
304 | if (plen == 0) | |
305 | goto done; | |
72b4daa2 CH |
306 | |
307 | if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) { | |
308 | zero_user(page, poff, plen); | |
9dc55f13 | 309 | iomap_set_range_uptodate(page, poff, plen); |
72b4daa2 CH |
310 | goto done; |
311 | } | |
312 | ||
313 | ctx->cur_page_in_bio = true; | |
314 | ||
315 | /* | |
316 | * Try to merge into a previous segment if we can. | |
317 | */ | |
318 | sector = iomap_sector(iomap, pos); | |
319 | if (ctx->bio && bio_end_sector(ctx->bio) == sector) { | |
320 | if (__bio_try_merge_page(ctx->bio, page, plen, poff)) | |
321 | goto done; | |
322 | is_contig = true; | |
323 | } | |
324 | ||
9dc55f13 CH |
325 | /* |
326 | * If we start a new segment we need to increase the read count, and we | |
327 | * need to do so before submitting any previous full bio to make sure | |
328 | * that we don't prematurely unlock the page. | |
329 | */ | |
330 | if (iop) | |
331 | atomic_inc(&iop->read_count); | |
332 | ||
72b4daa2 CH |
333 | if (!ctx->bio || !is_contig || bio_full(ctx->bio)) { |
334 | gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); | |
335 | int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
336 | ||
337 | if (ctx->bio) | |
338 | submit_bio(ctx->bio); | |
339 | ||
340 | if (ctx->is_readahead) /* same as readahead_gfp_mask */ | |
341 | gfp |= __GFP_NORETRY | __GFP_NOWARN; | |
342 | ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); | |
343 | ctx->bio->bi_opf = REQ_OP_READ; | |
344 | if (ctx->is_readahead) | |
345 | ctx->bio->bi_opf |= REQ_RAHEAD; | |
346 | ctx->bio->bi_iter.bi_sector = sector; | |
347 | bio_set_dev(ctx->bio, iomap->bdev); | |
348 | ctx->bio->bi_end_io = iomap_read_end_io; | |
349 | } | |
350 | ||
351 | __bio_add_page(ctx->bio, page, plen, poff); | |
352 | done: | |
9dc55f13 CH |
353 | /* |
354 | * Move the caller beyond our range so that it keeps making progress. | |
355 | * For that we have to include any leading non-uptodate ranges, but | |
356 | * we can skip trailing ones as they will be handled in the next | |
357 | * iteration. | |
358 | */ | |
359 | return pos - orig_pos + plen; | |
72b4daa2 CH |
360 | } |
361 | ||
362 | int | |
363 | iomap_readpage(struct page *page, const struct iomap_ops *ops) | |
364 | { | |
365 | struct iomap_readpage_ctx ctx = { .cur_page = page }; | |
366 | struct inode *inode = page->mapping->host; | |
367 | unsigned poff; | |
368 | loff_t ret; | |
369 | ||
72b4daa2 CH |
370 | for (poff = 0; poff < PAGE_SIZE; poff += ret) { |
371 | ret = iomap_apply(inode, page_offset(page) + poff, | |
372 | PAGE_SIZE - poff, 0, ops, &ctx, | |
373 | iomap_readpage_actor); | |
374 | if (ret <= 0) { | |
375 | WARN_ON_ONCE(ret == 0); | |
376 | SetPageError(page); | |
377 | break; | |
378 | } | |
379 | } | |
380 | ||
381 | if (ctx.bio) { | |
382 | submit_bio(ctx.bio); | |
383 | WARN_ON_ONCE(!ctx.cur_page_in_bio); | |
384 | } else { | |
385 | WARN_ON_ONCE(ctx.cur_page_in_bio); | |
386 | unlock_page(page); | |
387 | } | |
388 | ||
389 | /* | |
390 | * Just like mpage_readpages and block_read_full_page we always | |
391 | * return 0 and just mark the page as PageError on errors. This | |
392 | * should be cleaned up all through the stack eventually. | |
393 | */ | |
394 | return 0; | |
395 | } | |
396 | EXPORT_SYMBOL_GPL(iomap_readpage); | |
397 | ||
398 | static struct page * | |
399 | iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos, | |
400 | loff_t length, loff_t *done) | |
401 | { | |
402 | while (!list_empty(pages)) { | |
403 | struct page *page = lru_to_page(pages); | |
404 | ||
405 | if (page_offset(page) >= (u64)pos + length) | |
406 | break; | |
407 | ||
408 | list_del(&page->lru); | |
409 | if (!add_to_page_cache_lru(page, inode->i_mapping, page->index, | |
410 | GFP_NOFS)) | |
411 | return page; | |
412 | ||
413 | /* | |
414 | * If we already have a page in the page cache at index we are | |
415 | * done. Upper layers don't care if it is uptodate after the | |
416 | * readpages call itself as every page gets checked again once | |
417 | * actually needed. | |
418 | */ | |
419 | *done += PAGE_SIZE; | |
420 | put_page(page); | |
421 | } | |
422 | ||
423 | return NULL; | |
424 | } | |
425 | ||
426 | static loff_t | |
427 | iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, | |
428 | void *data, struct iomap *iomap) | |
429 | { | |
430 | struct iomap_readpage_ctx *ctx = data; | |
431 | loff_t done, ret; | |
432 | ||
433 | for (done = 0; done < length; done += ret) { | |
10259de1 | 434 | if (ctx->cur_page && offset_in_page(pos + done) == 0) { |
72b4daa2 CH |
435 | if (!ctx->cur_page_in_bio) |
436 | unlock_page(ctx->cur_page); | |
437 | put_page(ctx->cur_page); | |
438 | ctx->cur_page = NULL; | |
439 | } | |
440 | if (!ctx->cur_page) { | |
441 | ctx->cur_page = iomap_next_page(inode, ctx->pages, | |
442 | pos, length, &done); | |
443 | if (!ctx->cur_page) | |
444 | break; | |
445 | ctx->cur_page_in_bio = false; | |
446 | } | |
447 | ret = iomap_readpage_actor(inode, pos + done, length - done, | |
448 | ctx, iomap); | |
449 | } | |
450 | ||
451 | return done; | |
452 | } | |
453 | ||
454 | int | |
455 | iomap_readpages(struct address_space *mapping, struct list_head *pages, | |
456 | unsigned nr_pages, const struct iomap_ops *ops) | |
457 | { | |
458 | struct iomap_readpage_ctx ctx = { | |
459 | .pages = pages, | |
460 | .is_readahead = true, | |
461 | }; | |
462 | loff_t pos = page_offset(list_entry(pages->prev, struct page, lru)); | |
463 | loff_t last = page_offset(list_entry(pages->next, struct page, lru)); | |
464 | loff_t length = last - pos + PAGE_SIZE, ret = 0; | |
465 | ||
466 | while (length > 0) { | |
467 | ret = iomap_apply(mapping->host, pos, length, 0, ops, | |
468 | &ctx, iomap_readpages_actor); | |
469 | if (ret <= 0) { | |
470 | WARN_ON_ONCE(ret == 0); | |
471 | goto done; | |
472 | } | |
473 | pos += ret; | |
474 | length -= ret; | |
475 | } | |
476 | ret = 0; | |
477 | done: | |
478 | if (ctx.bio) | |
479 | submit_bio(ctx.bio); | |
480 | if (ctx.cur_page) { | |
481 | if (!ctx.cur_page_in_bio) | |
482 | unlock_page(ctx.cur_page); | |
483 | put_page(ctx.cur_page); | |
484 | } | |
485 | ||
486 | /* | |
487 | * Check that we didn't lose a page due to the arcance calling | |
488 | * conventions.. | |
489 | */ | |
490 | WARN_ON_ONCE(!ret && !list_empty(ctx.pages)); | |
491 | return ret; | |
492 | } | |
493 | EXPORT_SYMBOL_GPL(iomap_readpages); | |
494 | ||
3cc31fa6 ES |
495 | /* |
496 | * iomap_is_partially_uptodate checks whether blocks within a page are | |
497 | * uptodate or not. | |
498 | * | |
499 | * Returns true if all blocks which correspond to a file portion | |
500 | * we want to read within the page are uptodate. | |
501 | */ | |
9dc55f13 CH |
502 | int |
503 | iomap_is_partially_uptodate(struct page *page, unsigned long from, | |
504 | unsigned long count) | |
505 | { | |
506 | struct iomap_page *iop = to_iomap_page(page); | |
507 | struct inode *inode = page->mapping->host; | |
3cc31fa6 | 508 | unsigned len, first, last; |
9dc55f13 CH |
509 | unsigned i; |
510 | ||
3cc31fa6 ES |
511 | /* Limit range to one page */ |
512 | len = min_t(unsigned, PAGE_SIZE - from, count); | |
513 | ||
514 | /* First and last blocks in range within page */ | |
515 | first = from >> inode->i_blkbits; | |
516 | last = (from + len - 1) >> inode->i_blkbits; | |
517 | ||
9dc55f13 CH |
518 | if (iop) { |
519 | for (i = first; i <= last; i++) | |
520 | if (!test_bit(i, iop->uptodate)) | |
521 | return 0; | |
522 | return 1; | |
523 | } | |
524 | ||
525 | return 0; | |
526 | } | |
527 | EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); | |
528 | ||
529 | int | |
530 | iomap_releasepage(struct page *page, gfp_t gfp_mask) | |
531 | { | |
532 | /* | |
533 | * mm accommodates an old ext3 case where clean pages might not have had | |
534 | * the dirty bit cleared. Thus, it can send actual dirty pages to | |
535 | * ->releasepage() via shrink_active_list(), skip those here. | |
536 | */ | |
537 | if (PageDirty(page) || PageWriteback(page)) | |
538 | return 0; | |
539 | iomap_page_release(page); | |
540 | return 1; | |
541 | } | |
542 | EXPORT_SYMBOL_GPL(iomap_releasepage); | |
543 | ||
544 | void | |
545 | iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) | |
546 | { | |
547 | /* | |
548 | * If we are invalidating the entire page, clear the dirty state from it | |
549 | * and release it to avoid unnecessary buildup of the LRU. | |
550 | */ | |
551 | if (offset == 0 && len == PAGE_SIZE) { | |
552 | WARN_ON_ONCE(PageWriteback(page)); | |
553 | cancel_dirty_page(page); | |
554 | iomap_page_release(page); | |
555 | } | |
556 | } | |
557 | EXPORT_SYMBOL_GPL(iomap_invalidatepage); | |
558 | ||
559 | #ifdef CONFIG_MIGRATION | |
560 | int | |
561 | iomap_migrate_page(struct address_space *mapping, struct page *newpage, | |
562 | struct page *page, enum migrate_mode mode) | |
563 | { | |
564 | int ret; | |
565 | ||
ab41ee68 | 566 | ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0); |
9dc55f13 CH |
567 | if (ret != MIGRATEPAGE_SUCCESS) |
568 | return ret; | |
569 | ||
570 | if (page_has_private(page)) { | |
571 | ClearPagePrivate(page); | |
572 | set_page_private(newpage, page_private(page)); | |
573 | set_page_private(page, 0); | |
574 | SetPagePrivate(newpage); | |
575 | } | |
576 | ||
577 | if (mode != MIGRATE_SYNC_NO_COPY) | |
578 | migrate_page_copy(newpage, page); | |
579 | else | |
580 | migrate_page_states(newpage, page); | |
581 | return MIGRATEPAGE_SUCCESS; | |
582 | } | |
583 | EXPORT_SYMBOL_GPL(iomap_migrate_page); | |
584 | #endif /* CONFIG_MIGRATION */ | |
585 | ||
ae259a9c CH |
586 | static void |
587 | iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) | |
588 | { | |
589 | loff_t i_size = i_size_read(inode); | |
590 | ||
591 | /* | |
592 | * Only truncate newly allocated pages beyoned EOF, even if the | |
593 | * write started inside the existing inode size. | |
594 | */ | |
595 | if (pos + len > i_size) | |
596 | truncate_pagecache_range(inode, max(pos, i_size), pos + len); | |
597 | } | |
598 | ||
c03cea42 CH |
599 | static int |
600 | iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page, | |
601 | unsigned poff, unsigned plen, unsigned from, unsigned to, | |
602 | struct iomap *iomap) | |
603 | { | |
604 | struct bio_vec bvec; | |
605 | struct bio bio; | |
606 | ||
607 | if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) { | |
608 | zero_user_segments(page, poff, from, to, poff + plen); | |
9dc55f13 | 609 | iomap_set_range_uptodate(page, poff, plen); |
c03cea42 CH |
610 | return 0; |
611 | } | |
612 | ||
613 | bio_init(&bio, &bvec, 1); | |
614 | bio.bi_opf = REQ_OP_READ; | |
615 | bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); | |
616 | bio_set_dev(&bio, iomap->bdev); | |
617 | __bio_add_page(&bio, page, plen, poff); | |
618 | return submit_bio_wait(&bio); | |
619 | } | |
620 | ||
621 | static int | |
622 | __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, | |
623 | struct page *page, struct iomap *iomap) | |
624 | { | |
9dc55f13 | 625 | struct iomap_page *iop = iomap_page_create(inode, page); |
c03cea42 CH |
626 | loff_t block_size = i_blocksize(inode); |
627 | loff_t block_start = pos & ~(block_size - 1); | |
628 | loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1); | |
10259de1 | 629 | unsigned from = offset_in_page(pos), to = from + len, poff, plen; |
9dc55f13 | 630 | int status = 0; |
c03cea42 CH |
631 | |
632 | if (PageUptodate(page)) | |
633 | return 0; | |
9dc55f13 CH |
634 | |
635 | do { | |
636 | iomap_adjust_read_range(inode, iop, &block_start, | |
637 | block_end - block_start, &poff, &plen); | |
638 | if (plen == 0) | |
639 | break; | |
640 | ||
641 | if ((from > poff && from < poff + plen) || | |
642 | (to > poff && to < poff + plen)) { | |
643 | status = iomap_read_page_sync(inode, block_start, page, | |
644 | poff, plen, from, to, iomap); | |
645 | if (status) | |
646 | break; | |
647 | } | |
648 | ||
649 | } while ((block_start += plen) < block_end); | |
650 | ||
651 | return status; | |
c03cea42 CH |
652 | } |
653 | ||
ae259a9c CH |
654 | static int |
655 | iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, | |
656 | struct page **pagep, struct iomap *iomap) | |
657 | { | |
658 | pgoff_t index = pos >> PAGE_SHIFT; | |
659 | struct page *page; | |
660 | int status = 0; | |
661 | ||
662 | BUG_ON(pos + len > iomap->offset + iomap->length); | |
663 | ||
d1908f52 MH |
664 | if (fatal_signal_pending(current)) |
665 | return -EINTR; | |
666 | ||
ae259a9c CH |
667 | page = grab_cache_page_write_begin(inode->i_mapping, index, flags); |
668 | if (!page) | |
669 | return -ENOMEM; | |
670 | ||
19e0c58f AG |
671 | if (iomap->type == IOMAP_INLINE) |
672 | iomap_read_inline_data(inode, page, iomap); | |
c03cea42 | 673 | else if (iomap->flags & IOMAP_F_BUFFER_HEAD) |
19e0c58f | 674 | status = __block_write_begin_int(page, pos, len, NULL, iomap); |
c03cea42 CH |
675 | else |
676 | status = __iomap_write_begin(inode, pos, len, page, iomap); | |
ae259a9c CH |
677 | if (unlikely(status)) { |
678 | unlock_page(page); | |
679 | put_page(page); | |
680 | page = NULL; | |
681 | ||
682 | iomap_write_failed(inode, pos, len); | |
683 | } | |
684 | ||
685 | *pagep = page; | |
686 | return status; | |
687 | } | |
688 | ||
c03cea42 CH |
689 | int |
690 | iomap_set_page_dirty(struct page *page) | |
691 | { | |
692 | struct address_space *mapping = page_mapping(page); | |
693 | int newly_dirty; | |
694 | ||
695 | if (unlikely(!mapping)) | |
696 | return !TestSetPageDirty(page); | |
697 | ||
698 | /* | |
699 | * Lock out page->mem_cgroup migration to keep PageDirty | |
700 | * synchronized with per-memcg dirty page counters. | |
701 | */ | |
702 | lock_page_memcg(page); | |
703 | newly_dirty = !TestSetPageDirty(page); | |
704 | if (newly_dirty) | |
705 | __set_page_dirty(page, mapping, 0); | |
706 | unlock_page_memcg(page); | |
707 | ||
708 | if (newly_dirty) | |
709 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | |
710 | return newly_dirty; | |
711 | } | |
712 | EXPORT_SYMBOL_GPL(iomap_set_page_dirty); | |
713 | ||
714 | static int | |
715 | __iomap_write_end(struct inode *inode, loff_t pos, unsigned len, | |
716 | unsigned copied, struct page *page, struct iomap *iomap) | |
717 | { | |
718 | flush_dcache_page(page); | |
719 | ||
720 | /* | |
721 | * The blocks that were entirely written will now be uptodate, so we | |
722 | * don't have to worry about a readpage reading them and overwriting a | |
723 | * partial write. However if we have encountered a short write and only | |
724 | * partially written into a block, it will not be marked uptodate, so a | |
725 | * readpage might come in and destroy our partial write. | |
726 | * | |
727 | * Do the simplest thing, and just treat any short write to a non | |
728 | * uptodate page as a zero-length write, and force the caller to redo | |
729 | * the whole thing. | |
730 | */ | |
731 | if (unlikely(copied < len && !PageUptodate(page))) { | |
732 | copied = 0; | |
733 | } else { | |
10259de1 | 734 | iomap_set_range_uptodate(page, offset_in_page(pos), len); |
c03cea42 CH |
735 | iomap_set_page_dirty(page); |
736 | } | |
737 | return __generic_write_end(inode, pos, copied, page); | |
738 | } | |
739 | ||
19e0c58f AG |
740 | static int |
741 | iomap_write_end_inline(struct inode *inode, struct page *page, | |
742 | struct iomap *iomap, loff_t pos, unsigned copied) | |
743 | { | |
744 | void *addr; | |
745 | ||
746 | WARN_ON_ONCE(!PageUptodate(page)); | |
747 | BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data)); | |
748 | ||
749 | addr = kmap_atomic(page); | |
750 | memcpy(iomap->inline_data + pos, addr + pos, copied); | |
751 | kunmap_atomic(addr); | |
752 | ||
753 | mark_inode_dirty(inode); | |
754 | __generic_write_end(inode, pos, copied, page); | |
755 | return copied; | |
756 | } | |
757 | ||
ae259a9c CH |
758 | static int |
759 | iomap_write_end(struct inode *inode, loff_t pos, unsigned len, | |
19e0c58f | 760 | unsigned copied, struct page *page, struct iomap *iomap) |
ae259a9c CH |
761 | { |
762 | int ret; | |
763 | ||
19e0c58f AG |
764 | if (iomap->type == IOMAP_INLINE) { |
765 | ret = iomap_write_end_inline(inode, page, iomap, pos, copied); | |
c03cea42 | 766 | } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) { |
19e0c58f AG |
767 | ret = generic_write_end(NULL, inode->i_mapping, pos, len, |
768 | copied, page, NULL); | |
c03cea42 CH |
769 | } else { |
770 | ret = __iomap_write_end(inode, pos, len, copied, page, iomap); | |
19e0c58f AG |
771 | } |
772 | ||
63899c6f CH |
773 | if (iomap->page_done) |
774 | iomap->page_done(inode, pos, copied, page, iomap); | |
775 | ||
ae259a9c CH |
776 | if (ret < len) |
777 | iomap_write_failed(inode, pos, len); | |
778 | return ret; | |
779 | } | |
780 | ||
781 | static loff_t | |
782 | iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
783 | struct iomap *iomap) | |
784 | { | |
785 | struct iov_iter *i = data; | |
786 | long status = 0; | |
787 | ssize_t written = 0; | |
788 | unsigned int flags = AOP_FLAG_NOFS; | |
789 | ||
ae259a9c CH |
790 | do { |
791 | struct page *page; | |
792 | unsigned long offset; /* Offset into pagecache page */ | |
793 | unsigned long bytes; /* Bytes to write to page */ | |
794 | size_t copied; /* Bytes copied from user */ | |
795 | ||
10259de1 | 796 | offset = offset_in_page(pos); |
ae259a9c CH |
797 | bytes = min_t(unsigned long, PAGE_SIZE - offset, |
798 | iov_iter_count(i)); | |
799 | again: | |
800 | if (bytes > length) | |
801 | bytes = length; | |
802 | ||
803 | /* | |
804 | * Bring in the user page that we will copy from _first_. | |
805 | * Otherwise there's a nasty deadlock on copying from the | |
806 | * same page as we're writing to, without it being marked | |
807 | * up-to-date. | |
808 | * | |
809 | * Not only is this an optimisation, but it is also required | |
810 | * to check that the address is actually valid, when atomic | |
811 | * usercopies are used, below. | |
812 | */ | |
813 | if (unlikely(iov_iter_fault_in_readable(i, bytes))) { | |
814 | status = -EFAULT; | |
815 | break; | |
816 | } | |
817 | ||
818 | status = iomap_write_begin(inode, pos, bytes, flags, &page, | |
819 | iomap); | |
820 | if (unlikely(status)) | |
821 | break; | |
822 | ||
823 | if (mapping_writably_mapped(inode->i_mapping)) | |
824 | flush_dcache_page(page); | |
825 | ||
ae259a9c | 826 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); |
ae259a9c CH |
827 | |
828 | flush_dcache_page(page); | |
ae259a9c | 829 | |
19e0c58f AG |
830 | status = iomap_write_end(inode, pos, bytes, copied, page, |
831 | iomap); | |
ae259a9c CH |
832 | if (unlikely(status < 0)) |
833 | break; | |
834 | copied = status; | |
835 | ||
836 | cond_resched(); | |
837 | ||
838 | iov_iter_advance(i, copied); | |
839 | if (unlikely(copied == 0)) { | |
840 | /* | |
841 | * If we were unable to copy any data at all, we must | |
842 | * fall back to a single segment length write. | |
843 | * | |
844 | * If we didn't fallback here, we could livelock | |
845 | * because not all segments in the iov can be copied at | |
846 | * once without a pagefault. | |
847 | */ | |
848 | bytes = min_t(unsigned long, PAGE_SIZE - offset, | |
849 | iov_iter_single_seg_count(i)); | |
850 | goto again; | |
851 | } | |
852 | pos += copied; | |
853 | written += copied; | |
854 | length -= copied; | |
855 | ||
856 | balance_dirty_pages_ratelimited(inode->i_mapping); | |
857 | } while (iov_iter_count(i) && length); | |
858 | ||
859 | return written ? written : status; | |
860 | } | |
861 | ||
862 | ssize_t | |
863 | iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, | |
8ff6daa1 | 864 | const struct iomap_ops *ops) |
ae259a9c CH |
865 | { |
866 | struct inode *inode = iocb->ki_filp->f_mapping->host; | |
867 | loff_t pos = iocb->ki_pos, ret = 0, written = 0; | |
868 | ||
869 | while (iov_iter_count(iter)) { | |
870 | ret = iomap_apply(inode, pos, iov_iter_count(iter), | |
871 | IOMAP_WRITE, ops, iter, iomap_write_actor); | |
872 | if (ret <= 0) | |
873 | break; | |
874 | pos += ret; | |
875 | written += ret; | |
876 | } | |
877 | ||
878 | return written ? written : ret; | |
879 | } | |
880 | EXPORT_SYMBOL_GPL(iomap_file_buffered_write); | |
881 | ||
5f4e5752 CH |
882 | static struct page * |
883 | __iomap_read_page(struct inode *inode, loff_t offset) | |
884 | { | |
885 | struct address_space *mapping = inode->i_mapping; | |
886 | struct page *page; | |
887 | ||
888 | page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL); | |
889 | if (IS_ERR(page)) | |
890 | return page; | |
891 | if (!PageUptodate(page)) { | |
892 | put_page(page); | |
893 | return ERR_PTR(-EIO); | |
894 | } | |
895 | return page; | |
896 | } | |
897 | ||
898 | static loff_t | |
899 | iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
900 | struct iomap *iomap) | |
901 | { | |
902 | long status = 0; | |
903 | ssize_t written = 0; | |
904 | ||
905 | do { | |
906 | struct page *page, *rpage; | |
907 | unsigned long offset; /* Offset into pagecache page */ | |
908 | unsigned long bytes; /* Bytes to write to page */ | |
909 | ||
10259de1 | 910 | offset = offset_in_page(pos); |
e28ae8e4 | 911 | bytes = min_t(loff_t, PAGE_SIZE - offset, length); |
5f4e5752 CH |
912 | |
913 | rpage = __iomap_read_page(inode, pos); | |
914 | if (IS_ERR(rpage)) | |
915 | return PTR_ERR(rpage); | |
916 | ||
917 | status = iomap_write_begin(inode, pos, bytes, | |
c718a975 | 918 | AOP_FLAG_NOFS, &page, iomap); |
5f4e5752 CH |
919 | put_page(rpage); |
920 | if (unlikely(status)) | |
921 | return status; | |
922 | ||
923 | WARN_ON_ONCE(!PageUptodate(page)); | |
924 | ||
19e0c58f | 925 | status = iomap_write_end(inode, pos, bytes, bytes, page, iomap); |
5f4e5752 CH |
926 | if (unlikely(status <= 0)) { |
927 | if (WARN_ON_ONCE(status == 0)) | |
928 | return -EIO; | |
929 | return status; | |
930 | } | |
931 | ||
932 | cond_resched(); | |
933 | ||
934 | pos += status; | |
935 | written += status; | |
936 | length -= status; | |
937 | ||
938 | balance_dirty_pages_ratelimited(inode->i_mapping); | |
939 | } while (length); | |
940 | ||
941 | return written; | |
942 | } | |
943 | ||
944 | int | |
945 | iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, | |
8ff6daa1 | 946 | const struct iomap_ops *ops) |
5f4e5752 CH |
947 | { |
948 | loff_t ret; | |
949 | ||
950 | while (len) { | |
951 | ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL, | |
952 | iomap_dirty_actor); | |
953 | if (ret <= 0) | |
954 | return ret; | |
955 | pos += ret; | |
956 | len -= ret; | |
957 | } | |
958 | ||
959 | return 0; | |
960 | } | |
961 | EXPORT_SYMBOL_GPL(iomap_file_dirty); | |
962 | ||
ae259a9c CH |
963 | static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, |
964 | unsigned bytes, struct iomap *iomap) | |
965 | { | |
966 | struct page *page; | |
967 | int status; | |
968 | ||
c718a975 TH |
969 | status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page, |
970 | iomap); | |
ae259a9c CH |
971 | if (status) |
972 | return status; | |
973 | ||
974 | zero_user(page, offset, bytes); | |
975 | mark_page_accessed(page); | |
976 | ||
19e0c58f | 977 | return iomap_write_end(inode, pos, bytes, bytes, page, iomap); |
ae259a9c CH |
978 | } |
979 | ||
9a286f0e CH |
980 | static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, |
981 | struct iomap *iomap) | |
982 | { | |
57fc505d CH |
983 | return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, |
984 | iomap_sector(iomap, pos & PAGE_MASK), offset, bytes); | |
9a286f0e CH |
985 | } |
986 | ||
ae259a9c CH |
987 | static loff_t |
988 | iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, | |
989 | void *data, struct iomap *iomap) | |
990 | { | |
991 | bool *did_zero = data; | |
992 | loff_t written = 0; | |
993 | int status; | |
994 | ||
995 | /* already zeroed? we're done. */ | |
996 | if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) | |
997 | return count; | |
998 | ||
999 | do { | |
1000 | unsigned offset, bytes; | |
1001 | ||
10259de1 | 1002 | offset = offset_in_page(pos); |
e28ae8e4 | 1003 | bytes = min_t(loff_t, PAGE_SIZE - offset, count); |
ae259a9c | 1004 | |
9a286f0e CH |
1005 | if (IS_DAX(inode)) |
1006 | status = iomap_dax_zero(pos, offset, bytes, iomap); | |
1007 | else | |
1008 | status = iomap_zero(inode, pos, offset, bytes, iomap); | |
ae259a9c CH |
1009 | if (status < 0) |
1010 | return status; | |
1011 | ||
1012 | pos += bytes; | |
1013 | count -= bytes; | |
1014 | written += bytes; | |
1015 | if (did_zero) | |
1016 | *did_zero = true; | |
1017 | } while (count > 0); | |
1018 | ||
1019 | return written; | |
1020 | } | |
1021 | ||
1022 | int | |
1023 | iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, | |
8ff6daa1 | 1024 | const struct iomap_ops *ops) |
ae259a9c CH |
1025 | { |
1026 | loff_t ret; | |
1027 | ||
1028 | while (len > 0) { | |
1029 | ret = iomap_apply(inode, pos, len, IOMAP_ZERO, | |
1030 | ops, did_zero, iomap_zero_range_actor); | |
1031 | if (ret <= 0) | |
1032 | return ret; | |
1033 | ||
1034 | pos += ret; | |
1035 | len -= ret; | |
1036 | } | |
1037 | ||
1038 | return 0; | |
1039 | } | |
1040 | EXPORT_SYMBOL_GPL(iomap_zero_range); | |
1041 | ||
1042 | int | |
1043 | iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, | |
8ff6daa1 | 1044 | const struct iomap_ops *ops) |
ae259a9c | 1045 | { |
93407472 FF |
1046 | unsigned int blocksize = i_blocksize(inode); |
1047 | unsigned int off = pos & (blocksize - 1); | |
ae259a9c CH |
1048 | |
1049 | /* Block boundary? Nothing to do */ | |
1050 | if (!off) | |
1051 | return 0; | |
1052 | return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); | |
1053 | } | |
1054 | EXPORT_SYMBOL_GPL(iomap_truncate_page); | |
1055 | ||
1056 | static loff_t | |
1057 | iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, | |
1058 | void *data, struct iomap *iomap) | |
1059 | { | |
1060 | struct page *page = data; | |
1061 | int ret; | |
1062 | ||
c03cea42 CH |
1063 | if (iomap->flags & IOMAP_F_BUFFER_HEAD) { |
1064 | ret = __block_write_begin_int(page, pos, length, NULL, iomap); | |
1065 | if (ret) | |
1066 | return ret; | |
1067 | block_commit_write(page, 0, length); | |
1068 | } else { | |
1069 | WARN_ON_ONCE(!PageUptodate(page)); | |
9dc55f13 | 1070 | iomap_page_create(inode, page); |
561295a3 | 1071 | set_page_dirty(page); |
c03cea42 | 1072 | } |
ae259a9c | 1073 | |
ae259a9c CH |
1074 | return length; |
1075 | } | |
1076 | ||
5780a02f | 1077 | vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) |
ae259a9c CH |
1078 | { |
1079 | struct page *page = vmf->page; | |
11bac800 | 1080 | struct inode *inode = file_inode(vmf->vma->vm_file); |
ae259a9c CH |
1081 | unsigned long length; |
1082 | loff_t offset, size; | |
1083 | ssize_t ret; | |
1084 | ||
1085 | lock_page(page); | |
1086 | size = i_size_read(inode); | |
1087 | if ((page->mapping != inode->i_mapping) || | |
1088 | (page_offset(page) > size)) { | |
1089 | /* We overload EFAULT to mean page got truncated */ | |
1090 | ret = -EFAULT; | |
1091 | goto out_unlock; | |
1092 | } | |
1093 | ||
1094 | /* page is wholly or partially inside EOF */ | |
1095 | if (((page->index + 1) << PAGE_SHIFT) > size) | |
10259de1 | 1096 | length = offset_in_page(size); |
ae259a9c CH |
1097 | else |
1098 | length = PAGE_SIZE; | |
1099 | ||
1100 | offset = page_offset(page); | |
1101 | while (length > 0) { | |
9484ab1b JK |
1102 | ret = iomap_apply(inode, offset, length, |
1103 | IOMAP_WRITE | IOMAP_FAULT, ops, page, | |
1104 | iomap_page_mkwrite_actor); | |
ae259a9c CH |
1105 | if (unlikely(ret <= 0)) |
1106 | goto out_unlock; | |
1107 | offset += ret; | |
1108 | length -= ret; | |
1109 | } | |
1110 | ||
ae259a9c | 1111 | wait_for_stable_page(page); |
e7647fb4 | 1112 | return VM_FAULT_LOCKED; |
ae259a9c CH |
1113 | out_unlock: |
1114 | unlock_page(page); | |
e7647fb4 | 1115 | return block_page_mkwrite_return(ret); |
ae259a9c CH |
1116 | } |
1117 | EXPORT_SYMBOL_GPL(iomap_page_mkwrite); | |
8be9f564 CH |
1118 | |
1119 | struct fiemap_ctx { | |
1120 | struct fiemap_extent_info *fi; | |
1121 | struct iomap prev; | |
1122 | }; | |
1123 | ||
1124 | static int iomap_to_fiemap(struct fiemap_extent_info *fi, | |
1125 | struct iomap *iomap, u32 flags) | |
1126 | { | |
1127 | switch (iomap->type) { | |
1128 | case IOMAP_HOLE: | |
1129 | /* skip holes */ | |
1130 | return 0; | |
1131 | case IOMAP_DELALLOC: | |
1132 | flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN; | |
1133 | break; | |
19319b53 CH |
1134 | case IOMAP_MAPPED: |
1135 | break; | |
8be9f564 CH |
1136 | case IOMAP_UNWRITTEN: |
1137 | flags |= FIEMAP_EXTENT_UNWRITTEN; | |
1138 | break; | |
19319b53 CH |
1139 | case IOMAP_INLINE: |
1140 | flags |= FIEMAP_EXTENT_DATA_INLINE; | |
8be9f564 CH |
1141 | break; |
1142 | } | |
1143 | ||
17de0a9f CH |
1144 | if (iomap->flags & IOMAP_F_MERGED) |
1145 | flags |= FIEMAP_EXTENT_MERGED; | |
e43c460d DW |
1146 | if (iomap->flags & IOMAP_F_SHARED) |
1147 | flags |= FIEMAP_EXTENT_SHARED; | |
17de0a9f | 1148 | |
8be9f564 | 1149 | return fiemap_fill_next_extent(fi, iomap->offset, |
19fe5f64 | 1150 | iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0, |
17de0a9f | 1151 | iomap->length, flags); |
8be9f564 CH |
1152 | } |
1153 | ||
1154 | static loff_t | |
1155 | iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
1156 | struct iomap *iomap) | |
1157 | { | |
1158 | struct fiemap_ctx *ctx = data; | |
1159 | loff_t ret = length; | |
1160 | ||
1161 | if (iomap->type == IOMAP_HOLE) | |
1162 | return length; | |
1163 | ||
1164 | ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0); | |
1165 | ctx->prev = *iomap; | |
1166 | switch (ret) { | |
1167 | case 0: /* success */ | |
1168 | return length; | |
1169 | case 1: /* extent array full */ | |
1170 | return 0; | |
1171 | default: | |
1172 | return ret; | |
1173 | } | |
1174 | } | |
1175 | ||
1176 | int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, | |
8ff6daa1 | 1177 | loff_t start, loff_t len, const struct iomap_ops *ops) |
8be9f564 CH |
1178 | { |
1179 | struct fiemap_ctx ctx; | |
1180 | loff_t ret; | |
1181 | ||
1182 | memset(&ctx, 0, sizeof(ctx)); | |
1183 | ctx.fi = fi; | |
1184 | ctx.prev.type = IOMAP_HOLE; | |
1185 | ||
1186 | ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC); | |
1187 | if (ret) | |
1188 | return ret; | |
1189 | ||
8896b8f6 DC |
1190 | if (fi->fi_flags & FIEMAP_FLAG_SYNC) { |
1191 | ret = filemap_write_and_wait(inode->i_mapping); | |
1192 | if (ret) | |
1193 | return ret; | |
1194 | } | |
8be9f564 CH |
1195 | |
1196 | while (len > 0) { | |
d33fd776 | 1197 | ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx, |
8be9f564 | 1198 | iomap_fiemap_actor); |
ac2dc058 DC |
1199 | /* inode with no (attribute) mapping will give ENOENT */ |
1200 | if (ret == -ENOENT) | |
1201 | break; | |
8be9f564 CH |
1202 | if (ret < 0) |
1203 | return ret; | |
1204 | if (ret == 0) | |
1205 | break; | |
1206 | ||
1207 | start += ret; | |
1208 | len -= ret; | |
1209 | } | |
1210 | ||
1211 | if (ctx.prev.type != IOMAP_HOLE) { | |
1212 | ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST); | |
1213 | if (ret < 0) | |
1214 | return ret; | |
1215 | } | |
1216 | ||
1217 | return 0; | |
1218 | } | |
1219 | EXPORT_SYMBOL_GPL(iomap_fiemap); | |
ff6a9292 | 1220 | |
8a78cb1f CH |
1221 | /* |
1222 | * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff. | |
afd9d6a1 | 1223 | * Returns true if found and updates @lastoff to the offset in file. |
8a78cb1f | 1224 | */ |
afd9d6a1 CH |
1225 | static bool |
1226 | page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff, | |
1227 | int whence) | |
8a78cb1f | 1228 | { |
afd9d6a1 CH |
1229 | const struct address_space_operations *ops = inode->i_mapping->a_ops; |
1230 | unsigned int bsize = i_blocksize(inode), off; | |
8a78cb1f | 1231 | bool seek_data = whence == SEEK_DATA; |
afd9d6a1 | 1232 | loff_t poff = page_offset(page); |
8a78cb1f | 1233 | |
afd9d6a1 CH |
1234 | if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE)) |
1235 | return false; | |
8a78cb1f | 1236 | |
afd9d6a1 | 1237 | if (*lastoff < poff) { |
8a78cb1f | 1238 | /* |
afd9d6a1 CH |
1239 | * Last offset smaller than the start of the page means we found |
1240 | * a hole: | |
8a78cb1f | 1241 | */ |
afd9d6a1 CH |
1242 | if (whence == SEEK_HOLE) |
1243 | return true; | |
1244 | *lastoff = poff; | |
1245 | } | |
8a78cb1f | 1246 | |
afd9d6a1 CH |
1247 | /* |
1248 | * Just check the page unless we can and should check block ranges: | |
1249 | */ | |
1250 | if (bsize == PAGE_SIZE || !ops->is_partially_uptodate) | |
1251 | return PageUptodate(page) == seek_data; | |
1252 | ||
1253 | lock_page(page); | |
1254 | if (unlikely(page->mapping != inode->i_mapping)) | |
1255 | goto out_unlock_not_found; | |
1256 | ||
1257 | for (off = 0; off < PAGE_SIZE; off += bsize) { | |
10259de1 | 1258 | if (offset_in_page(*lastoff) >= off + bsize) |
afd9d6a1 CH |
1259 | continue; |
1260 | if (ops->is_partially_uptodate(page, off, bsize) == seek_data) { | |
1261 | unlock_page(page); | |
1262 | return true; | |
1263 | } | |
1264 | *lastoff = poff + off + bsize; | |
1265 | } | |
1266 | ||
1267 | out_unlock_not_found: | |
1268 | unlock_page(page); | |
1269 | return false; | |
8a78cb1f CH |
1270 | } |
1271 | ||
1272 | /* | |
1273 | * Seek for SEEK_DATA / SEEK_HOLE in the page cache. | |
1274 | * | |
1275 | * Within unwritten extents, the page cache determines which parts are holes | |
bd56b3e1 CH |
1276 | * and which are data: uptodate buffer heads count as data; everything else |
1277 | * counts as a hole. | |
8a78cb1f CH |
1278 | * |
1279 | * Returns the resulting offset on successs, and -ENOENT otherwise. | |
1280 | */ | |
1281 | static loff_t | |
1282 | page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length, | |
1283 | int whence) | |
1284 | { | |
1285 | pgoff_t index = offset >> PAGE_SHIFT; | |
1286 | pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE); | |
1287 | loff_t lastoff = offset; | |
1288 | struct pagevec pvec; | |
1289 | ||
1290 | if (length <= 0) | |
1291 | return -ENOENT; | |
1292 | ||
1293 | pagevec_init(&pvec); | |
1294 | ||
1295 | do { | |
1296 | unsigned nr_pages, i; | |
1297 | ||
1298 | nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index, | |
1299 | end - 1); | |
1300 | if (nr_pages == 0) | |
1301 | break; | |
1302 | ||
1303 | for (i = 0; i < nr_pages; i++) { | |
1304 | struct page *page = pvec.pages[i]; | |
1305 | ||
afd9d6a1 | 1306 | if (page_seek_hole_data(inode, page, &lastoff, whence)) |
8a78cb1f | 1307 | goto check_range; |
8a78cb1f CH |
1308 | lastoff = page_offset(page) + PAGE_SIZE; |
1309 | } | |
1310 | pagevec_release(&pvec); | |
1311 | } while (index < end); | |
1312 | ||
1313 | /* When no page at lastoff and we are not done, we found a hole. */ | |
1314 | if (whence != SEEK_HOLE) | |
1315 | goto not_found; | |
1316 | ||
1317 | check_range: | |
1318 | if (lastoff < offset + length) | |
1319 | goto out; | |
1320 | not_found: | |
1321 | lastoff = -ENOENT; | |
1322 | out: | |
1323 | pagevec_release(&pvec); | |
1324 | return lastoff; | |
1325 | } | |
1326 | ||
1327 | ||
0ed3b0d4 AG |
1328 | static loff_t |
1329 | iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length, | |
1330 | void *data, struct iomap *iomap) | |
1331 | { | |
1332 | switch (iomap->type) { | |
1333 | case IOMAP_UNWRITTEN: | |
1334 | offset = page_cache_seek_hole_data(inode, offset, length, | |
1335 | SEEK_HOLE); | |
1336 | if (offset < 0) | |
1337 | return length; | |
1338 | /* fall through */ | |
1339 | case IOMAP_HOLE: | |
1340 | *(loff_t *)data = offset; | |
1341 | return 0; | |
1342 | default: | |
1343 | return length; | |
1344 | } | |
1345 | } | |
1346 | ||
1347 | loff_t | |
1348 | iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops) | |
1349 | { | |
1350 | loff_t size = i_size_read(inode); | |
1351 | loff_t length = size - offset; | |
1352 | loff_t ret; | |
1353 | ||
d6ab17f2 DW |
1354 | /* Nothing to be found before or beyond the end of the file. */ |
1355 | if (offset < 0 || offset >= size) | |
0ed3b0d4 AG |
1356 | return -ENXIO; |
1357 | ||
1358 | while (length > 0) { | |
1359 | ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, | |
1360 | &offset, iomap_seek_hole_actor); | |
1361 | if (ret < 0) | |
1362 | return ret; | |
1363 | if (ret == 0) | |
1364 | break; | |
1365 | ||
1366 | offset += ret; | |
1367 | length -= ret; | |
1368 | } | |
1369 | ||
1370 | return offset; | |
1371 | } | |
1372 | EXPORT_SYMBOL_GPL(iomap_seek_hole); | |
1373 | ||
1374 | static loff_t | |
1375 | iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length, | |
1376 | void *data, struct iomap *iomap) | |
1377 | { | |
1378 | switch (iomap->type) { | |
1379 | case IOMAP_HOLE: | |
1380 | return length; | |
1381 | case IOMAP_UNWRITTEN: | |
1382 | offset = page_cache_seek_hole_data(inode, offset, length, | |
1383 | SEEK_DATA); | |
1384 | if (offset < 0) | |
1385 | return length; | |
1386 | /*FALLTHRU*/ | |
1387 | default: | |
1388 | *(loff_t *)data = offset; | |
1389 | return 0; | |
1390 | } | |
1391 | } | |
1392 | ||
1393 | loff_t | |
1394 | iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops) | |
1395 | { | |
1396 | loff_t size = i_size_read(inode); | |
1397 | loff_t length = size - offset; | |
1398 | loff_t ret; | |
1399 | ||
d6ab17f2 DW |
1400 | /* Nothing to be found before or beyond the end of the file. */ |
1401 | if (offset < 0 || offset >= size) | |
0ed3b0d4 AG |
1402 | return -ENXIO; |
1403 | ||
1404 | while (length > 0) { | |
1405 | ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, | |
1406 | &offset, iomap_seek_data_actor); | |
1407 | if (ret < 0) | |
1408 | return ret; | |
1409 | if (ret == 0) | |
1410 | break; | |
1411 | ||
1412 | offset += ret; | |
1413 | length -= ret; | |
1414 | } | |
1415 | ||
1416 | if (length <= 0) | |
1417 | return -ENXIO; | |
1418 | return offset; | |
1419 | } | |
1420 | EXPORT_SYMBOL_GPL(iomap_seek_data); | |
1421 | ||
ff6a9292 CH |
1422 | /* |
1423 | * Private flags for iomap_dio, must not overlap with the public ones in | |
1424 | * iomap.h: | |
1425 | */ | |
3460cac1 | 1426 | #define IOMAP_DIO_WRITE_FUA (1 << 28) |
4f8ff44b | 1427 | #define IOMAP_DIO_NEED_SYNC (1 << 29) |
ff6a9292 CH |
1428 | #define IOMAP_DIO_WRITE (1 << 30) |
1429 | #define IOMAP_DIO_DIRTY (1 << 31) | |
1430 | ||
1431 | struct iomap_dio { | |
1432 | struct kiocb *iocb; | |
1433 | iomap_dio_end_io_t *end_io; | |
1434 | loff_t i_size; | |
1435 | loff_t size; | |
1436 | atomic_t ref; | |
1437 | unsigned flags; | |
1438 | int error; | |
ebf00be3 | 1439 | bool wait_for_completion; |
ff6a9292 CH |
1440 | |
1441 | union { | |
1442 | /* used during submission and for synchronous completion: */ | |
1443 | struct { | |
1444 | struct iov_iter *iter; | |
1445 | struct task_struct *waiter; | |
1446 | struct request_queue *last_queue; | |
1447 | blk_qc_t cookie; | |
1448 | } submit; | |
1449 | ||
1450 | /* used for aio completion: */ | |
1451 | struct { | |
1452 | struct work_struct work; | |
1453 | } aio; | |
1454 | }; | |
1455 | }; | |
1456 | ||
1457 | static ssize_t iomap_dio_complete(struct iomap_dio *dio) | |
1458 | { | |
1459 | struct kiocb *iocb = dio->iocb; | |
332391a9 | 1460 | struct inode *inode = file_inode(iocb->ki_filp); |
5e25c269 | 1461 | loff_t offset = iocb->ki_pos; |
ff6a9292 CH |
1462 | ssize_t ret; |
1463 | ||
1464 | if (dio->end_io) { | |
1465 | ret = dio->end_io(iocb, | |
1466 | dio->error ? dio->error : dio->size, | |
1467 | dio->flags); | |
1468 | } else { | |
1469 | ret = dio->error; | |
1470 | } | |
1471 | ||
1472 | if (likely(!ret)) { | |
1473 | ret = dio->size; | |
1474 | /* check for short read */ | |
5e25c269 | 1475 | if (offset + ret > dio->i_size && |
ff6a9292 | 1476 | !(dio->flags & IOMAP_DIO_WRITE)) |
5e25c269 | 1477 | ret = dio->i_size - offset; |
ff6a9292 CH |
1478 | iocb->ki_pos += ret; |
1479 | } | |
1480 | ||
5e25c269 EG |
1481 | /* |
1482 | * Try again to invalidate clean pages which might have been cached by | |
1483 | * non-direct readahead, or faulted in by get_user_pages() if the source | |
1484 | * of the write was an mmap'ed region of the file we're writing. Either | |
1485 | * one is a pretty crazy thing to do, so we don't support it 100%. If | |
1486 | * this invalidation fails, tough, the write still worked... | |
1487 | * | |
1488 | * And this page cache invalidation has to be after dio->end_io(), as | |
1489 | * some filesystems convert unwritten extents to real allocations in | |
1490 | * end_io() when necessary, otherwise a racing buffer read would cache | |
1491 | * zeros from unwritten extents. | |
1492 | */ | |
1493 | if (!dio->error && | |
1494 | (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { | |
1495 | int err; | |
1496 | err = invalidate_inode_pages2_range(inode->i_mapping, | |
1497 | offset >> PAGE_SHIFT, | |
1498 | (offset + dio->size - 1) >> PAGE_SHIFT); | |
5a9d929d DW |
1499 | if (err) |
1500 | dio_warn_stale_pagecache(iocb->ki_filp); | |
5e25c269 EG |
1501 | } |
1502 | ||
4f8ff44b DC |
1503 | /* |
1504 | * If this is a DSYNC write, make sure we push it to stable storage now | |
1505 | * that we've written data. | |
1506 | */ | |
1507 | if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) | |
1508 | ret = generic_write_sync(iocb, ret); | |
1509 | ||
ff6a9292 CH |
1510 | inode_dio_end(file_inode(iocb->ki_filp)); |
1511 | kfree(dio); | |
1512 | ||
1513 | return ret; | |
1514 | } | |
1515 | ||
1516 | static void iomap_dio_complete_work(struct work_struct *work) | |
1517 | { | |
1518 | struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); | |
1519 | struct kiocb *iocb = dio->iocb; | |
ff6a9292 | 1520 | |
4f8ff44b | 1521 | iocb->ki_complete(iocb, iomap_dio_complete(dio), 0); |
ff6a9292 CH |
1522 | } |
1523 | ||
1524 | /* | |
1525 | * Set an error in the dio if none is set yet. We have to use cmpxchg | |
1526 | * as the submission context and the completion context(s) can race to | |
1527 | * update the error. | |
1528 | */ | |
1529 | static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) | |
1530 | { | |
1531 | cmpxchg(&dio->error, 0, ret); | |
1532 | } | |
1533 | ||
1534 | static void iomap_dio_bio_end_io(struct bio *bio) | |
1535 | { | |
1536 | struct iomap_dio *dio = bio->bi_private; | |
1537 | bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); | |
1538 | ||
4e4cbee9 CH |
1539 | if (bio->bi_status) |
1540 | iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); | |
ff6a9292 CH |
1541 | |
1542 | if (atomic_dec_and_test(&dio->ref)) { | |
ebf00be3 | 1543 | if (dio->wait_for_completion) { |
ff6a9292 | 1544 | struct task_struct *waiter = dio->submit.waiter; |
ff6a9292 | 1545 | WRITE_ONCE(dio->submit.waiter, NULL); |
0619317f | 1546 | blk_wake_io_task(waiter); |
ff6a9292 CH |
1547 | } else if (dio->flags & IOMAP_DIO_WRITE) { |
1548 | struct inode *inode = file_inode(dio->iocb->ki_filp); | |
1549 | ||
1550 | INIT_WORK(&dio->aio.work, iomap_dio_complete_work); | |
1551 | queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); | |
1552 | } else { | |
1553 | iomap_dio_complete_work(&dio->aio.work); | |
1554 | } | |
1555 | } | |
1556 | ||
1557 | if (should_dirty) { | |
1558 | bio_check_pages_dirty(bio); | |
1559 | } else { | |
1560 | struct bio_vec *bvec; | |
1561 | int i; | |
1562 | ||
1563 | bio_for_each_segment_all(bvec, bio, i) | |
1564 | put_page(bvec->bv_page); | |
1565 | bio_put(bio); | |
1566 | } | |
1567 | } | |
1568 | ||
1569 | static blk_qc_t | |
1570 | iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, | |
1571 | unsigned len) | |
1572 | { | |
1573 | struct page *page = ZERO_PAGE(0); | |
d1e36282 | 1574 | int flags = REQ_SYNC | REQ_IDLE; |
ff6a9292 CH |
1575 | struct bio *bio; |
1576 | ||
1577 | bio = bio_alloc(GFP_KERNEL, 1); | |
74d46992 | 1578 | bio_set_dev(bio, iomap->bdev); |
57fc505d | 1579 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); |
ff6a9292 CH |
1580 | bio->bi_private = dio; |
1581 | bio->bi_end_io = iomap_dio_bio_end_io; | |
1582 | ||
d1e36282 JA |
1583 | if (dio->iocb->ki_flags & IOCB_HIPRI) |
1584 | flags |= REQ_HIPRI; | |
1585 | ||
ff6a9292 | 1586 | get_page(page); |
6533b4e4 | 1587 | __bio_add_page(bio, page, len, 0); |
d1e36282 | 1588 | bio_set_op_attrs(bio, REQ_OP_WRITE, flags); |
ff6a9292 CH |
1589 | |
1590 | atomic_inc(&dio->ref); | |
1591 | return submit_bio(bio); | |
1592 | } | |
1593 | ||
1594 | static loff_t | |
09230435 CH |
1595 | iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, |
1596 | struct iomap_dio *dio, struct iomap *iomap) | |
ff6a9292 | 1597 | { |
93407472 FF |
1598 | unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); |
1599 | unsigned int fs_block_size = i_blocksize(inode), pad; | |
1600 | unsigned int align = iov_iter_alignment(dio->submit.iter); | |
ff6a9292 CH |
1601 | struct iov_iter iter; |
1602 | struct bio *bio; | |
1603 | bool need_zeroout = false; | |
3460cac1 | 1604 | bool use_fua = false; |
4721a601 | 1605 | int nr_pages, ret = 0; |
cfe057f7 | 1606 | size_t copied = 0; |
ff6a9292 CH |
1607 | |
1608 | if ((pos | length | align) & ((1 << blkbits) - 1)) | |
1609 | return -EINVAL; | |
1610 | ||
09230435 | 1611 | if (iomap->type == IOMAP_UNWRITTEN) { |
ff6a9292 CH |
1612 | dio->flags |= IOMAP_DIO_UNWRITTEN; |
1613 | need_zeroout = true; | |
09230435 CH |
1614 | } |
1615 | ||
1616 | if (iomap->flags & IOMAP_F_SHARED) | |
1617 | dio->flags |= IOMAP_DIO_COW; | |
1618 | ||
1619 | if (iomap->flags & IOMAP_F_NEW) { | |
1620 | need_zeroout = true; | |
0929d858 | 1621 | } else if (iomap->type == IOMAP_MAPPED) { |
09230435 | 1622 | /* |
0929d858 DC |
1623 | * Use a FUA write if we need datasync semantics, this is a pure |
1624 | * data IO that doesn't require any metadata updates (including | |
1625 | * after IO completion such as unwritten extent conversion) and | |
1626 | * the underlying device supports FUA. This allows us to avoid | |
1627 | * cache flushes on IO completion. | |
09230435 CH |
1628 | */ |
1629 | if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && | |
1630 | (dio->flags & IOMAP_DIO_WRITE_FUA) && | |
1631 | blk_queue_fua(bdev_get_queue(iomap->bdev))) | |
1632 | use_fua = true; | |
ff6a9292 CH |
1633 | } |
1634 | ||
1635 | /* | |
1636 | * Operate on a partial iter trimmed to the extent we were called for. | |
1637 | * We'll update the iter in the dio once we're done with this extent. | |
1638 | */ | |
1639 | iter = *dio->submit.iter; | |
1640 | iov_iter_truncate(&iter, length); | |
1641 | ||
1642 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); | |
1643 | if (nr_pages <= 0) | |
1644 | return nr_pages; | |
1645 | ||
1646 | if (need_zeroout) { | |
1647 | /* zero out from the start of the block to the write offset */ | |
1648 | pad = pos & (fs_block_size - 1); | |
1649 | if (pad) | |
1650 | iomap_dio_zero(dio, iomap, pos - pad, pad); | |
1651 | } | |
1652 | ||
1653 | do { | |
cfe057f7 AV |
1654 | size_t n; |
1655 | if (dio->error) { | |
1656 | iov_iter_revert(dio->submit.iter, copied); | |
ff6a9292 | 1657 | return 0; |
cfe057f7 | 1658 | } |
ff6a9292 CH |
1659 | |
1660 | bio = bio_alloc(GFP_KERNEL, nr_pages); | |
74d46992 | 1661 | bio_set_dev(bio, iomap->bdev); |
57fc505d | 1662 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); |
45d06cf7 | 1663 | bio->bi_write_hint = dio->iocb->ki_hint; |
087e5669 | 1664 | bio->bi_ioprio = dio->iocb->ki_ioprio; |
ff6a9292 CH |
1665 | bio->bi_private = dio; |
1666 | bio->bi_end_io = iomap_dio_bio_end_io; | |
1667 | ||
1668 | ret = bio_iov_iter_get_pages(bio, &iter); | |
1669 | if (unlikely(ret)) { | |
4721a601 DC |
1670 | /* |
1671 | * We have to stop part way through an IO. We must fall | |
1672 | * through to the sub-block tail zeroing here, otherwise | |
1673 | * this short IO may expose stale data in the tail of | |
1674 | * the block we haven't written data to. | |
1675 | */ | |
ff6a9292 | 1676 | bio_put(bio); |
4721a601 | 1677 | goto zero_tail; |
ff6a9292 CH |
1678 | } |
1679 | ||
cfe057f7 | 1680 | n = bio->bi_iter.bi_size; |
ff6a9292 | 1681 | if (dio->flags & IOMAP_DIO_WRITE) { |
3460cac1 DC |
1682 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
1683 | if (use_fua) | |
1684 | bio->bi_opf |= REQ_FUA; | |
1685 | else | |
1686 | dio->flags &= ~IOMAP_DIO_WRITE_FUA; | |
cfe057f7 | 1687 | task_io_account_write(n); |
ff6a9292 | 1688 | } else { |
3460cac1 | 1689 | bio->bi_opf = REQ_OP_READ; |
ff6a9292 CH |
1690 | if (dio->flags & IOMAP_DIO_DIRTY) |
1691 | bio_set_pages_dirty(bio); | |
1692 | } | |
1693 | ||
d1e36282 JA |
1694 | if (dio->iocb->ki_flags & IOCB_HIPRI) |
1695 | bio->bi_opf |= REQ_HIPRI; | |
1696 | ||
cfe057f7 AV |
1697 | iov_iter_advance(dio->submit.iter, n); |
1698 | ||
1699 | dio->size += n; | |
1700 | pos += n; | |
1701 | copied += n; | |
ff6a9292 CH |
1702 | |
1703 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); | |
1704 | ||
1705 | atomic_inc(&dio->ref); | |
1706 | ||
1707 | dio->submit.last_queue = bdev_get_queue(iomap->bdev); | |
1708 | dio->submit.cookie = submit_bio(bio); | |
1709 | } while (nr_pages); | |
1710 | ||
b450672f DC |
1711 | /* |
1712 | * We need to zeroout the tail of a sub-block write if the extent type | |
1713 | * requires zeroing or the write extends beyond EOF. If we don't zero | |
1714 | * the block tail in the latter case, we can expose stale data via mmap | |
1715 | * reads of the EOF block. | |
1716 | */ | |
4721a601 | 1717 | zero_tail: |
b450672f DC |
1718 | if (need_zeroout || |
1719 | ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { | |
ff6a9292 CH |
1720 | /* zero out from the end of the write to the end of the block */ |
1721 | pad = pos & (fs_block_size - 1); | |
1722 | if (pad) | |
1723 | iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); | |
1724 | } | |
4721a601 | 1725 | return copied ? copied : ret; |
ff6a9292 CH |
1726 | } |
1727 | ||
09230435 CH |
1728 | static loff_t |
1729 | iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio) | |
1730 | { | |
1731 | length = iov_iter_zero(length, dio->submit.iter); | |
1732 | dio->size += length; | |
1733 | return length; | |
1734 | } | |
1735 | ||
ec181f67 AG |
1736 | static loff_t |
1737 | iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length, | |
1738 | struct iomap_dio *dio, struct iomap *iomap) | |
1739 | { | |
1740 | struct iov_iter *iter = dio->submit.iter; | |
1741 | size_t copied; | |
1742 | ||
1743 | BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data)); | |
1744 | ||
1745 | if (dio->flags & IOMAP_DIO_WRITE) { | |
1746 | loff_t size = inode->i_size; | |
1747 | ||
1748 | if (pos > size) | |
1749 | memset(iomap->inline_data + size, 0, pos - size); | |
1750 | copied = copy_from_iter(iomap->inline_data + pos, length, iter); | |
1751 | if (copied) { | |
1752 | if (pos + copied > size) | |
1753 | i_size_write(inode, pos + copied); | |
1754 | mark_inode_dirty(inode); | |
1755 | } | |
1756 | } else { | |
1757 | copied = copy_to_iter(iomap->inline_data + pos, length, iter); | |
1758 | } | |
1759 | dio->size += copied; | |
1760 | return copied; | |
1761 | } | |
1762 | ||
09230435 CH |
1763 | static loff_t |
1764 | iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, | |
1765 | void *data, struct iomap *iomap) | |
1766 | { | |
1767 | struct iomap_dio *dio = data; | |
1768 | ||
1769 | switch (iomap->type) { | |
1770 | case IOMAP_HOLE: | |
1771 | if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) | |
1772 | return -EIO; | |
1773 | return iomap_dio_hole_actor(length, dio); | |
1774 | case IOMAP_UNWRITTEN: | |
1775 | if (!(dio->flags & IOMAP_DIO_WRITE)) | |
1776 | return iomap_dio_hole_actor(length, dio); | |
1777 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); | |
1778 | case IOMAP_MAPPED: | |
1779 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); | |
ec181f67 AG |
1780 | case IOMAP_INLINE: |
1781 | return iomap_dio_inline_actor(inode, pos, length, dio, iomap); | |
09230435 CH |
1782 | default: |
1783 | WARN_ON_ONCE(1); | |
1784 | return -EIO; | |
1785 | } | |
1786 | } | |
1787 | ||
4f8ff44b DC |
1788 | /* |
1789 | * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO | |
3460cac1 DC |
1790 | * is being issued as AIO or not. This allows us to optimise pure data writes |
1791 | * to use REQ_FUA rather than requiring generic_write_sync() to issue a | |
1792 | * REQ_FLUSH post write. This is slightly tricky because a single request here | |
1793 | * can be mapped into multiple disjoint IOs and only a subset of the IOs issued | |
1794 | * may be pure data writes. In that case, we still need to do a full data sync | |
1795 | * completion. | |
4f8ff44b | 1796 | */ |
ff6a9292 | 1797 | ssize_t |
8ff6daa1 CH |
1798 | iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, |
1799 | const struct iomap_ops *ops, iomap_dio_end_io_t end_io) | |
ff6a9292 CH |
1800 | { |
1801 | struct address_space *mapping = iocb->ki_filp->f_mapping; | |
1802 | struct inode *inode = file_inode(iocb->ki_filp); | |
1803 | size_t count = iov_iter_count(iter); | |
c771c14b EG |
1804 | loff_t pos = iocb->ki_pos, start = pos; |
1805 | loff_t end = iocb->ki_pos + count - 1, ret = 0; | |
ff6a9292 CH |
1806 | unsigned int flags = IOMAP_DIRECT; |
1807 | struct blk_plug plug; | |
1808 | struct iomap_dio *dio; | |
1809 | ||
1810 | lockdep_assert_held(&inode->i_rwsem); | |
1811 | ||
1812 | if (!count) | |
1813 | return 0; | |
1814 | ||
1815 | dio = kmalloc(sizeof(*dio), GFP_KERNEL); | |
1816 | if (!dio) | |
1817 | return -ENOMEM; | |
1818 | ||
1819 | dio->iocb = iocb; | |
1820 | atomic_set(&dio->ref, 1); | |
1821 | dio->size = 0; | |
1822 | dio->i_size = i_size_read(inode); | |
1823 | dio->end_io = end_io; | |
1824 | dio->error = 0; | |
1825 | dio->flags = 0; | |
ebf00be3 | 1826 | dio->wait_for_completion = is_sync_kiocb(iocb); |
ff6a9292 CH |
1827 | |
1828 | dio->submit.iter = iter; | |
ebf00be3 AG |
1829 | dio->submit.waiter = current; |
1830 | dio->submit.cookie = BLK_QC_T_NONE; | |
1831 | dio->submit.last_queue = NULL; | |
ff6a9292 CH |
1832 | |
1833 | if (iov_iter_rw(iter) == READ) { | |
1834 | if (pos >= dio->i_size) | |
1835 | goto out_free_dio; | |
1836 | ||
00e23707 | 1837 | if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ) |
ff6a9292 CH |
1838 | dio->flags |= IOMAP_DIO_DIRTY; |
1839 | } else { | |
3460cac1 | 1840 | flags |= IOMAP_WRITE; |
ff6a9292 | 1841 | dio->flags |= IOMAP_DIO_WRITE; |
3460cac1 DC |
1842 | |
1843 | /* for data sync or sync, we need sync completion processing */ | |
4f8ff44b DC |
1844 | if (iocb->ki_flags & IOCB_DSYNC) |
1845 | dio->flags |= IOMAP_DIO_NEED_SYNC; | |
3460cac1 DC |
1846 | |
1847 | /* | |
1848 | * For datasync only writes, we optimistically try using FUA for | |
1849 | * this IO. Any non-FUA write that occurs will clear this flag, | |
1850 | * hence we know before completion whether a cache flush is | |
1851 | * necessary. | |
1852 | */ | |
1853 | if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC) | |
1854 | dio->flags |= IOMAP_DIO_WRITE_FUA; | |
ff6a9292 CH |
1855 | } |
1856 | ||
a38d1243 GR |
1857 | if (iocb->ki_flags & IOCB_NOWAIT) { |
1858 | if (filemap_range_has_page(mapping, start, end)) { | |
1859 | ret = -EAGAIN; | |
1860 | goto out_free_dio; | |
1861 | } | |
1862 | flags |= IOMAP_NOWAIT; | |
1863 | } | |
1864 | ||
55635ba7 AR |
1865 | ret = filemap_write_and_wait_range(mapping, start, end); |
1866 | if (ret) | |
1867 | goto out_free_dio; | |
ff6a9292 | 1868 | |
5a9d929d DW |
1869 | /* |
1870 | * Try to invalidate cache pages for the range we're direct | |
1871 | * writing. If this invalidation fails, tough, the write will | |
1872 | * still work, but racing two incompatible write paths is a | |
1873 | * pretty crazy thing to do, so we don't support it 100%. | |
1874 | */ | |
55635ba7 AR |
1875 | ret = invalidate_inode_pages2_range(mapping, |
1876 | start >> PAGE_SHIFT, end >> PAGE_SHIFT); | |
5a9d929d DW |
1877 | if (ret) |
1878 | dio_warn_stale_pagecache(iocb->ki_filp); | |
55635ba7 | 1879 | ret = 0; |
ff6a9292 | 1880 | |
ebf00be3 | 1881 | if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion && |
546e7be8 CR |
1882 | !inode->i_sb->s_dio_done_wq) { |
1883 | ret = sb_init_dio_done_wq(inode->i_sb); | |
1884 | if (ret < 0) | |
1885 | goto out_free_dio; | |
1886 | } | |
1887 | ||
ff6a9292 CH |
1888 | inode_dio_begin(inode); |
1889 | ||
1890 | blk_start_plug(&plug); | |
1891 | do { | |
1892 | ret = iomap_apply(inode, pos, count, flags, ops, dio, | |
1893 | iomap_dio_actor); | |
1894 | if (ret <= 0) { | |
1895 | /* magic error code to fall back to buffered I/O */ | |
ebf00be3 AG |
1896 | if (ret == -ENOTBLK) { |
1897 | dio->wait_for_completion = true; | |
ff6a9292 | 1898 | ret = 0; |
ebf00be3 | 1899 | } |
ff6a9292 CH |
1900 | break; |
1901 | } | |
1902 | pos += ret; | |
a008c31c CR |
1903 | |
1904 | if (iov_iter_rw(iter) == READ && pos >= dio->i_size) | |
1905 | break; | |
ff6a9292 CH |
1906 | } while ((count = iov_iter_count(iter)) > 0); |
1907 | blk_finish_plug(&plug); | |
1908 | ||
1909 | if (ret < 0) | |
1910 | iomap_dio_set_error(dio, ret); | |
1911 | ||
3460cac1 DC |
1912 | /* |
1913 | * If all the writes we issued were FUA, we don't need to flush the | |
1914 | * cache on IO completion. Clear the sync flag for this case. | |
1915 | */ | |
1916 | if (dio->flags & IOMAP_DIO_WRITE_FUA) | |
1917 | dio->flags &= ~IOMAP_DIO_NEED_SYNC; | |
1918 | ||
ff6a9292 | 1919 | if (!atomic_dec_and_test(&dio->ref)) { |
ebf00be3 | 1920 | if (!dio->wait_for_completion) |
ff6a9292 CH |
1921 | return -EIOCBQUEUED; |
1922 | ||
1923 | for (;;) { | |
1ac5cd49 | 1924 | set_current_state(TASK_UNINTERRUPTIBLE); |
ff6a9292 CH |
1925 | if (!READ_ONCE(dio->submit.waiter)) |
1926 | break; | |
1927 | ||
1928 | if (!(iocb->ki_flags & IOCB_HIPRI) || | |
1929 | !dio->submit.last_queue || | |
ea435e1b | 1930 | !blk_poll(dio->submit.last_queue, |
0a1b8b87 | 1931 | dio->submit.cookie, true)) |
ff6a9292 CH |
1932 | io_schedule(); |
1933 | } | |
1934 | __set_current_state(TASK_RUNNING); | |
1935 | } | |
1936 | ||
c771c14b EG |
1937 | ret = iomap_dio_complete(dio); |
1938 | ||
c771c14b | 1939 | return ret; |
ff6a9292 CH |
1940 | |
1941 | out_free_dio: | |
1942 | kfree(dio); | |
1943 | return ret; | |
1944 | } | |
1945 | EXPORT_SYMBOL_GPL(iomap_dio_rw); | |
67482129 DW |
1946 | |
1947 | /* Swapfile activation */ | |
1948 | ||
1949 | #ifdef CONFIG_SWAP | |
1950 | struct iomap_swapfile_info { | |
1951 | struct iomap iomap; /* accumulated iomap */ | |
1952 | struct swap_info_struct *sis; | |
1953 | uint64_t lowest_ppage; /* lowest physical addr seen (pages) */ | |
1954 | uint64_t highest_ppage; /* highest physical addr seen (pages) */ | |
1955 | unsigned long nr_pages; /* number of pages collected */ | |
1956 | int nr_extents; /* extent count */ | |
1957 | }; | |
1958 | ||
1959 | /* | |
1960 | * Collect physical extents for this swap file. Physical extents reported to | |
1961 | * the swap code must be trimmed to align to a page boundary. The logical | |
1962 | * offset within the file is irrelevant since the swapfile code maps logical | |
1963 | * page numbers of the swap device to the physical page-aligned extents. | |
1964 | */ | |
1965 | static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi) | |
1966 | { | |
1967 | struct iomap *iomap = &isi->iomap; | |
1968 | unsigned long nr_pages; | |
1969 | uint64_t first_ppage; | |
1970 | uint64_t first_ppage_reported; | |
1971 | uint64_t next_ppage; | |
1972 | int error; | |
1973 | ||
1974 | /* | |
1975 | * Round the start up and the end down so that the physical | |
1976 | * extent aligns to a page boundary. | |
1977 | */ | |
1978 | first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT; | |
1979 | next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >> | |
1980 | PAGE_SHIFT; | |
1981 | ||
1982 | /* Skip too-short physical extents. */ | |
1983 | if (first_ppage >= next_ppage) | |
1984 | return 0; | |
1985 | nr_pages = next_ppage - first_ppage; | |
1986 | ||
1987 | /* | |
1988 | * Calculate how much swap space we're adding; the first page contains | |
1989 | * the swap header and doesn't count. The mm still wants that first | |
1990 | * page fed to add_swap_extent, however. | |
1991 | */ | |
1992 | first_ppage_reported = first_ppage; | |
1993 | if (iomap->offset == 0) | |
1994 | first_ppage_reported++; | |
1995 | if (isi->lowest_ppage > first_ppage_reported) | |
1996 | isi->lowest_ppage = first_ppage_reported; | |
1997 | if (isi->highest_ppage < (next_ppage - 1)) | |
1998 | isi->highest_ppage = next_ppage - 1; | |
1999 | ||
2000 | /* Add extent, set up for the next call. */ | |
2001 | error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); | |
2002 | if (error < 0) | |
2003 | return error; | |
2004 | isi->nr_extents += error; | |
2005 | isi->nr_pages += nr_pages; | |
2006 | return 0; | |
2007 | } | |
2008 | ||
2009 | /* | |
2010 | * Accumulate iomaps for this swap file. We have to accumulate iomaps because | |
2011 | * swap only cares about contiguous page-aligned physical extents and makes no | |
2012 | * distinction between written and unwritten extents. | |
2013 | */ | |
2014 | static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos, | |
2015 | loff_t count, void *data, struct iomap *iomap) | |
2016 | { | |
2017 | struct iomap_swapfile_info *isi = data; | |
2018 | int error; | |
2019 | ||
19319b53 CH |
2020 | switch (iomap->type) { |
2021 | case IOMAP_MAPPED: | |
2022 | case IOMAP_UNWRITTEN: | |
2023 | /* Only real or unwritten extents. */ | |
2024 | break; | |
2025 | case IOMAP_INLINE: | |
2026 | /* No inline data. */ | |
ec601924 OS |
2027 | pr_err("swapon: file is inline\n"); |
2028 | return -EINVAL; | |
19319b53 | 2029 | default: |
ec601924 OS |
2030 | pr_err("swapon: file has unallocated extents\n"); |
2031 | return -EINVAL; | |
2032 | } | |
67482129 | 2033 | |
ec601924 OS |
2034 | /* No uncommitted metadata or shared blocks. */ |
2035 | if (iomap->flags & IOMAP_F_DIRTY) { | |
2036 | pr_err("swapon: file is not committed\n"); | |
2037 | return -EINVAL; | |
2038 | } | |
2039 | if (iomap->flags & IOMAP_F_SHARED) { | |
2040 | pr_err("swapon: file has shared extents\n"); | |
2041 | return -EINVAL; | |
2042 | } | |
67482129 | 2043 | |
ec601924 OS |
2044 | /* Only one bdev per swap file. */ |
2045 | if (iomap->bdev != isi->sis->bdev) { | |
2046 | pr_err("swapon: file is on multiple devices\n"); | |
2047 | return -EINVAL; | |
2048 | } | |
67482129 DW |
2049 | |
2050 | if (isi->iomap.length == 0) { | |
2051 | /* No accumulated extent, so just store it. */ | |
2052 | memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); | |
2053 | } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) { | |
2054 | /* Append this to the accumulated extent. */ | |
2055 | isi->iomap.length += iomap->length; | |
2056 | } else { | |
2057 | /* Otherwise, add the retained iomap and store this one. */ | |
2058 | error = iomap_swapfile_add_extent(isi); | |
2059 | if (error) | |
2060 | return error; | |
2061 | memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); | |
2062 | } | |
67482129 | 2063 | return count; |
67482129 DW |
2064 | } |
2065 | ||
2066 | /* | |
2067 | * Iterate a swap file's iomaps to construct physical extents that can be | |
2068 | * passed to the swapfile subsystem. | |
2069 | */ | |
2070 | int iomap_swapfile_activate(struct swap_info_struct *sis, | |
2071 | struct file *swap_file, sector_t *pagespan, | |
2072 | const struct iomap_ops *ops) | |
2073 | { | |
2074 | struct iomap_swapfile_info isi = { | |
2075 | .sis = sis, | |
2076 | .lowest_ppage = (sector_t)-1ULL, | |
2077 | }; | |
2078 | struct address_space *mapping = swap_file->f_mapping; | |
2079 | struct inode *inode = mapping->host; | |
2080 | loff_t pos = 0; | |
2081 | loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE); | |
2082 | loff_t ret; | |
2083 | ||
117a148f DW |
2084 | /* |
2085 | * Persist all file mapping metadata so that we won't have any | |
2086 | * IOMAP_F_DIRTY iomaps. | |
2087 | */ | |
2088 | ret = vfs_fsync(swap_file, 1); | |
67482129 DW |
2089 | if (ret) |
2090 | return ret; | |
2091 | ||
2092 | while (len > 0) { | |
2093 | ret = iomap_apply(inode, pos, len, IOMAP_REPORT, | |
2094 | ops, &isi, iomap_swapfile_activate_actor); | |
2095 | if (ret <= 0) | |
2096 | return ret; | |
2097 | ||
2098 | pos += ret; | |
2099 | len -= ret; | |
2100 | } | |
2101 | ||
2102 | if (isi.iomap.length) { | |
2103 | ret = iomap_swapfile_add_extent(&isi); | |
2104 | if (ret) | |
2105 | return ret; | |
2106 | } | |
2107 | ||
2108 | *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage; | |
2109 | sis->max = isi.nr_pages; | |
2110 | sis->pages = isi.nr_pages - 1; | |
2111 | sis->highest_bit = isi.nr_pages - 1; | |
2112 | return isi.nr_extents; | |
2113 | } | |
2114 | EXPORT_SYMBOL_GPL(iomap_swapfile_activate); | |
2115 | #endif /* CONFIG_SWAP */ | |
89eb1906 CH |
2116 | |
2117 | static loff_t | |
2118 | iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length, | |
2119 | void *data, struct iomap *iomap) | |
2120 | { | |
2121 | sector_t *bno = data, addr; | |
2122 | ||
2123 | if (iomap->type == IOMAP_MAPPED) { | |
2124 | addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits; | |
2125 | if (addr > INT_MAX) | |
2126 | WARN(1, "would truncate bmap result\n"); | |
2127 | else | |
2128 | *bno = addr; | |
2129 | } | |
2130 | return 0; | |
2131 | } | |
2132 | ||
2133 | /* legacy ->bmap interface. 0 is the error return (!) */ | |
2134 | sector_t | |
2135 | iomap_bmap(struct address_space *mapping, sector_t bno, | |
2136 | const struct iomap_ops *ops) | |
2137 | { | |
2138 | struct inode *inode = mapping->host; | |
79b3dbe4 | 2139 | loff_t pos = bno << inode->i_blkbits; |
89eb1906 CH |
2140 | unsigned blocksize = i_blocksize(inode); |
2141 | ||
2142 | if (filemap_write_and_wait(mapping)) | |
2143 | return 0; | |
2144 | ||
2145 | bno = 0; | |
2146 | iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor); | |
2147 | return bno; | |
2148 | } | |
2149 | EXPORT_SYMBOL_GPL(iomap_bmap); |