Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
bd2d0210 TT |
2 | /* |
3 | * linux/fs/ext4/page-io.c | |
4 | * | |
5 | * This contains the new page_io functions for ext4 | |
6 | * | |
7 | * Written by Theodore Ts'o, 2010. | |
8 | */ | |
9 | ||
bd2d0210 TT |
10 | #include <linux/fs.h> |
11 | #include <linux/time.h> | |
bd2d0210 TT |
12 | #include <linux/highuid.h> |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/quotaops.h> | |
15 | #include <linux/string.h> | |
16 | #include <linux/buffer_head.h> | |
17 | #include <linux/writeback.h> | |
18 | #include <linux/pagevec.h> | |
19 | #include <linux/mpage.h> | |
20 | #include <linux/namei.h> | |
21 | #include <linux/uio.h> | |
22 | #include <linux/bio.h> | |
23 | #include <linux/workqueue.h> | |
24 | #include <linux/kernel.h> | |
25 | #include <linux/slab.h> | |
1ae48a63 | 26 | #include <linux/mm.h> |
4034247a | 27 | #include <linux/sched/mm.h> |
bd2d0210 TT |
28 | |
29 | #include "ext4_jbd2.h" | |
30 | #include "xattr.h" | |
31 | #include "acl.h" | |
bd2d0210 | 32 | |
0058f965 | 33 | static struct kmem_cache *io_end_cachep; |
c8cc8816 | 34 | static struct kmem_cache *io_end_vec_cachep; |
bd2d0210 | 35 | |
5dabfc78 | 36 | int __init ext4_init_pageio(void) |
bd2d0210 | 37 | { |
bd2d0210 | 38 | io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); |
0058f965 | 39 | if (io_end_cachep == NULL) |
bd2d0210 | 40 | return -ENOMEM; |
c8cc8816 RH |
41 | |
42 | io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0); | |
43 | if (io_end_vec_cachep == NULL) { | |
44 | kmem_cache_destroy(io_end_cachep); | |
45 | return -ENOMEM; | |
46 | } | |
bd2d0210 TT |
47 | return 0; |
48 | } | |
49 | ||
5dabfc78 | 50 | void ext4_exit_pageio(void) |
bd2d0210 TT |
51 | { |
52 | kmem_cache_destroy(io_end_cachep); | |
c8cc8816 RH |
53 | kmem_cache_destroy(io_end_vec_cachep); |
54 | } | |
55 | ||
56 | struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end) | |
57 | { | |
58 | struct ext4_io_end_vec *io_end_vec; | |
59 | ||
60 | io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS); | |
61 | if (!io_end_vec) | |
62 | return ERR_PTR(-ENOMEM); | |
63 | INIT_LIST_HEAD(&io_end_vec->list); | |
64 | list_add_tail(&io_end_vec->list, &io_end->list_vec); | |
65 | return io_end_vec; | |
66 | } | |
67 | ||
68 | static void ext4_free_io_end_vec(ext4_io_end_t *io_end) | |
69 | { | |
70 | struct ext4_io_end_vec *io_end_vec, *tmp; | |
71 | ||
72 | if (list_empty(&io_end->list_vec)) | |
73 | return; | |
74 | list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) { | |
75 | list_del(&io_end_vec->list); | |
76 | kmem_cache_free(io_end_vec_cachep, io_end_vec); | |
77 | } | |
78 | } | |
79 | ||
80 | struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end) | |
81 | { | |
82 | BUG_ON(list_empty(&io_end->list_vec)); | |
83 | return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list); | |
bd2d0210 TT |
84 | } |
85 | ||
b0857d30 JK |
86 | /* |
87 | * Print an buffer I/O error compatible with the fs/buffer.c. This | |
88 | * provides compatibility with dmesg scrapers that look for a specific | |
89 | * buffer I/O error message. We really need a unified error reporting | |
90 | * structure to userspace ala Digital Unix's uerf system, but it's | |
91 | * probably not going to happen in my lifetime, due to LKML politics... | |
92 | */ | |
93 | static void buffer_io_error(struct buffer_head *bh) | |
94 | { | |
a1c6f057 DM |
95 | printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n", |
96 | bh->b_bdev, | |
b0857d30 JK |
97 | (unsigned long long)bh->b_blocknr); |
98 | } | |
99 | ||
100 | static void ext4_finish_bio(struct bio *bio) | |
101 | { | |
bb64c08b | 102 | struct folio_iter fi; |
b0857d30 | 103 | |
bb64c08b MW |
104 | bio_for_each_folio_all(fi, bio) { |
105 | struct folio *folio = fi.folio; | |
106 | struct folio *io_folio = NULL; | |
b0857d30 | 107 | struct buffer_head *bh, *head; |
bb64c08b MW |
108 | size_t bio_start = fi.offset; |
109 | size_t bio_end = bio_start + fi.length; | |
b0857d30 JK |
110 | unsigned under_io = 0; |
111 | unsigned long flags; | |
112 | ||
bb64c08b MW |
113 | if (fscrypt_is_bounce_folio(folio)) { |
114 | io_folio = folio; | |
115 | folio = fscrypt_pagecache_folio(folio); | |
2058f83a | 116 | } |
2058f83a | 117 | |
4e4cbee9 | 118 | if (bio->bi_status) { |
bb64c08b MW |
119 | int err = blk_status_to_errno(bio->bi_status); |
120 | folio_set_error(folio); | |
121 | mapping_set_error(folio->mapping, err); | |
b0857d30 | 122 | } |
bb64c08b | 123 | bh = head = folio_buffers(folio); |
b0857d30 | 124 | /* |
bb64c08b | 125 | * We check all buffers in the folio under b_uptodate_lock |
b0857d30 JK |
126 | * to avoid races with other end io clearing async_write flags |
127 | */ | |
f1e67e35 | 128 | spin_lock_irqsave(&head->b_uptodate_lock, flags); |
b0857d30 JK |
129 | do { |
130 | if (bh_offset(bh) < bio_start || | |
131 | bh_offset(bh) + bh->b_size > bio_end) { | |
132 | if (buffer_async_write(bh)) | |
133 | under_io++; | |
134 | continue; | |
135 | } | |
136 | clear_buffer_async_write(bh); | |
a2b0b205 YB |
137 | if (bio->bi_status) { |
138 | set_buffer_write_io_error(bh); | |
b0857d30 | 139 | buffer_io_error(bh); |
a2b0b205 | 140 | } |
b0857d30 | 141 | } while ((bh = bh->b_this_page) != head); |
f1e67e35 | 142 | spin_unlock_irqrestore(&head->b_uptodate_lock, flags); |
2058f83a | 143 | if (!under_io) { |
bb64c08b MW |
144 | fscrypt_free_bounce_page(&io_folio->page); |
145 | folio_end_writeback(folio); | |
2058f83a | 146 | } |
b0857d30 JK |
147 | } |
148 | } | |
149 | ||
97a851ed | 150 | static void ext4_release_io_end(ext4_io_end_t *io_end) |
bd2d0210 | 151 | { |
b0857d30 JK |
152 | struct bio *bio, *next_bio; |
153 | ||
97a851ed JK |
154 | BUG_ON(!list_empty(&io_end->list)); |
155 | BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); | |
6b523df4 | 156 | WARN_ON(io_end->handle); |
97a851ed | 157 | |
b0857d30 JK |
158 | for (bio = io_end->bio; bio; bio = next_bio) { |
159 | next_bio = bio->bi_private; | |
160 | ext4_finish_bio(bio); | |
161 | bio_put(bio); | |
162 | } | |
c8cc8816 | 163 | ext4_free_io_end_vec(io_end); |
97a851ed JK |
164 | kmem_cache_free(io_end_cachep, io_end); |
165 | } | |
166 | ||
a115f749 JK |
167 | /* |
168 | * Check a range of space and convert unwritten extents to written. Note that | |
169 | * we are protected from truncate touching same part of extent tree by the | |
170 | * fact that truncate code waits for all DIO to finish (thus exclusion from | |
171 | * direct IO is achieved) and also waits for PageWriteback bits. Thus we | |
172 | * cannot get to ext4_ext_truncate() before all IOs overlapping that range are | |
173 | * completed (happens from ext4_free_ioend()). | |
174 | */ | |
821ff38d | 175 | static int ext4_end_io_end(ext4_io_end_t *io_end) |
bd2d0210 | 176 | { |
821ff38d | 177 | struct inode *inode = io_end->inode; |
821ff38d | 178 | handle_t *handle = io_end->handle; |
bd2d0210 TT |
179 | int ret = 0; |
180 | ||
821ff38d | 181 | ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p," |
bd2d0210 | 182 | "list->prev 0x%p\n", |
821ff38d | 183 | io_end, inode->i_ino, io_end->list.next, io_end->list.prev); |
bd2d0210 | 184 | |
821ff38d | 185 | io_end->handle = NULL; /* Following call will use up the handle */ |
a00713ea | 186 | ret = ext4_convert_unwritten_io_end_vec(handle, io_end); |
0db1ff22 | 187 | if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) { |
b82e384c TT |
188 | ext4_msg(inode->i_sb, KERN_EMERG, |
189 | "failed to convert unwritten extents to written " | |
190 | "extents -- potential data loss! " | |
c8cc8816 | 191 | "(inode %lu, error %d)", inode->i_ino, ret); |
bd2d0210 | 192 | } |
821ff38d RH |
193 | ext4_clear_io_unwritten_flag(io_end); |
194 | ext4_release_io_end(io_end); | |
bd2d0210 TT |
195 | return ret; |
196 | } | |
197 | ||
2e8fa54e | 198 | static void dump_completed_IO(struct inode *inode, struct list_head *head) |
28a535f9 DM |
199 | { |
200 | #ifdef EXT4FS_DEBUG | |
201 | struct list_head *cur, *before, *after; | |
821ff38d | 202 | ext4_io_end_t *io_end, *io_end0, *io_end1; |
28a535f9 | 203 | |
2e8fa54e | 204 | if (list_empty(head)) |
28a535f9 | 205 | return; |
28a535f9 | 206 | |
2e8fa54e | 207 | ext4_debug("Dump inode %lu completed io list\n", inode->i_ino); |
821ff38d RH |
208 | list_for_each_entry(io_end, head, list) { |
209 | cur = &io_end->list; | |
28a535f9 | 210 | before = cur->prev; |
821ff38d | 211 | io_end0 = container_of(before, ext4_io_end_t, list); |
28a535f9 | 212 | after = cur->next; |
821ff38d | 213 | io_end1 = container_of(after, ext4_io_end_t, list); |
28a535f9 DM |
214 | |
215 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", | |
821ff38d | 216 | io_end, inode->i_ino, io_end0, io_end1); |
28a535f9 DM |
217 | } |
218 | #endif | |
219 | } | |
220 | ||
221 | /* Add the io_end to per-inode completed end_io list. */ | |
97a851ed | 222 | static void ext4_add_complete_io(ext4_io_end_t *io_end) |
bd2d0210 | 223 | { |
28a535f9 | 224 | struct ext4_inode_info *ei = EXT4_I(io_end->inode); |
78371a45 | 225 | struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb); |
28a535f9 DM |
226 | struct workqueue_struct *wq; |
227 | unsigned long flags; | |
228 | ||
7b7a8665 CH |
229 | /* Only reserved conversions from writeback should enter here */ |
230 | WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); | |
78371a45 | 231 | WARN_ON(!io_end->handle && sbi->s_journal); |
d73d5046 | 232 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
78371a45 | 233 | wq = sbi->rsv_conversion_wq; |
7b7a8665 CH |
234 | if (list_empty(&ei->i_rsv_conversion_list)) |
235 | queue_work(wq, &ei->i_rsv_conversion_work); | |
236 | list_add_tail(&io_end->list, &ei->i_rsv_conversion_list); | |
28a535f9 DM |
237 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
238 | } | |
d73d5046 | 239 | |
2e8fa54e JK |
240 | static int ext4_do_flush_completed_IO(struct inode *inode, |
241 | struct list_head *head) | |
28a535f9 | 242 | { |
821ff38d | 243 | ext4_io_end_t *io_end; |
002bd7fa | 244 | struct list_head unwritten; |
28a535f9 DM |
245 | unsigned long flags; |
246 | struct ext4_inode_info *ei = EXT4_I(inode); | |
247 | int err, ret = 0; | |
248 | ||
28a535f9 | 249 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
2e8fa54e JK |
250 | dump_completed_IO(inode, head); |
251 | list_replace_init(head, &unwritten); | |
28a535f9 DM |
252 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
253 | ||
254 | while (!list_empty(&unwritten)) { | |
821ff38d RH |
255 | io_end = list_entry(unwritten.next, ext4_io_end_t, list); |
256 | BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); | |
257 | list_del_init(&io_end->list); | |
28a535f9 | 258 | |
821ff38d | 259 | err = ext4_end_io_end(io_end); |
28a535f9 DM |
260 | if (unlikely(!ret && err)) |
261 | ret = err; | |
28a535f9 DM |
262 | } |
263 | return ret; | |
264 | } | |
265 | ||
266 | /* | |
2e8fa54e | 267 | * work on completed IO, to convert unwritten extents to extents |
28a535f9 | 268 | */ |
2e8fa54e JK |
269 | void ext4_end_io_rsv_work(struct work_struct *work) |
270 | { | |
271 | struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, | |
272 | i_rsv_conversion_work); | |
273 | ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list); | |
274 | } | |
275 | ||
bd2d0210 TT |
276 | ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) |
277 | { | |
821ff38d RH |
278 | ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags); |
279 | ||
280 | if (io_end) { | |
281 | io_end->inode = inode; | |
282 | INIT_LIST_HEAD(&io_end->list); | |
c8cc8816 | 283 | INIT_LIST_HEAD(&io_end->list_vec); |
31d21d21 | 284 | refcount_set(&io_end->count, 1); |
bd2d0210 | 285 | } |
821ff38d | 286 | return io_end; |
bd2d0210 TT |
287 | } |
288 | ||
97a851ed JK |
289 | void ext4_put_io_end_defer(ext4_io_end_t *io_end) |
290 | { | |
31d21d21 | 291 | if (refcount_dec_and_test(&io_end->count)) { |
c8cc8816 RH |
292 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || |
293 | list_empty(&io_end->list_vec)) { | |
97a851ed JK |
294 | ext4_release_io_end(io_end); |
295 | return; | |
296 | } | |
297 | ext4_add_complete_io(io_end); | |
298 | } | |
299 | } | |
300 | ||
301 | int ext4_put_io_end(ext4_io_end_t *io_end) | |
302 | { | |
303 | int err = 0; | |
304 | ||
31d21d21 | 305 | if (refcount_dec_and_test(&io_end->count)) { |
97a851ed | 306 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { |
a00713ea RH |
307 | err = ext4_convert_unwritten_io_end_vec(io_end->handle, |
308 | io_end); | |
6b523df4 | 309 | io_end->handle = NULL; |
97a851ed JK |
310 | ext4_clear_io_unwritten_flag(io_end); |
311 | } | |
312 | ext4_release_io_end(io_end); | |
313 | } | |
314 | return err; | |
315 | } | |
316 | ||
317 | ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) | |
318 | { | |
31d21d21 | 319 | refcount_inc(&io_end->count); |
97a851ed JK |
320 | return io_end; |
321 | } | |
322 | ||
822dbba3 | 323 | /* BIO completion function for page writeback */ |
4246a0b6 | 324 | static void ext4_end_bio(struct bio *bio) |
bd2d0210 TT |
325 | { |
326 | ext4_io_end_t *io_end = bio->bi_private; | |
4f024f37 | 327 | sector_t bi_sector = bio->bi_iter.bi_sector; |
bd2d0210 | 328 | |
734294e4 CH |
329 | if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n", |
330 | bio->bi_bdev, | |
72d622b4 TT |
331 | (long long) bio->bi_iter.bi_sector, |
332 | (unsigned) bio_sectors(bio), | |
4e4cbee9 | 333 | bio->bi_status)) { |
72d622b4 TT |
334 | ext4_finish_bio(bio); |
335 | bio_put(bio); | |
336 | return; | |
337 | } | |
bd2d0210 | 338 | bio->bi_end_io = NULL; |
0058f965 | 339 | |
4e4cbee9 | 340 | if (bio->bi_status) { |
b0857d30 JK |
341 | struct inode *inode = io_end->inode; |
342 | ||
9503c67c | 343 | ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " |
c8cc8816 | 344 | "starting block %llu)", |
4e4cbee9 | 345 | bio->bi_status, inode->i_ino, |
f7ad6d2e | 346 | (unsigned long long) |
d50bdd5a | 347 | bi_sector >> (inode->i_blkbits - 9)); |
4e4cbee9 CH |
348 | mapping_set_error(inode->i_mapping, |
349 | blk_status_to_errno(bio->bi_status)); | |
f7ad6d2e | 350 | } |
822dbba3 JK |
351 | |
352 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { | |
353 | /* | |
354 | * Link bio into list hanging from io_end. We have to do it | |
355 | * atomically as bio completions can be racing against each | |
356 | * other. | |
357 | */ | |
358 | bio->bi_private = xchg(&io_end->bio, bio); | |
359 | ext4_put_io_end_defer(io_end); | |
360 | } else { | |
361 | /* | |
362 | * Drop io_end reference early. Inode can get freed once | |
363 | * we finish the bio. | |
364 | */ | |
365 | ext4_put_io_end_defer(io_end); | |
366 | ext4_finish_bio(bio); | |
367 | bio_put(bio); | |
368 | } | |
bd2d0210 TT |
369 | } |
370 | ||
371 | void ext4_io_submit(struct ext4_io_submit *io) | |
372 | { | |
373 | struct bio *bio = io->io_bio; | |
374 | ||
375 | if (bio) { | |
4c4dad11 CH |
376 | if (io->io_wbc->sync_mode == WB_SYNC_ALL) |
377 | io->io_bio->bi_opf |= REQ_SYNC; | |
4e49ea4a | 378 | submit_bio(io->io_bio); |
bd2d0210 | 379 | } |
7dc57615 | 380 | io->io_bio = NULL; |
97a851ed JK |
381 | } |
382 | ||
383 | void ext4_io_submit_init(struct ext4_io_submit *io, | |
384 | struct writeback_control *wbc) | |
385 | { | |
5a33911f | 386 | io->io_wbc = wbc; |
97a851ed | 387 | io->io_bio = NULL; |
7dc57615 | 388 | io->io_end = NULL; |
bd2d0210 TT |
389 | } |
390 | ||
5500221e GX |
391 | static void io_submit_init_bio(struct ext4_io_submit *io, |
392 | struct buffer_head *bh) | |
bd2d0210 | 393 | { |
bd2d0210 TT |
394 | struct bio *bio; |
395 | ||
5500221e GX |
396 | /* |
397 | * bio_alloc will _always_ be able to allocate a bio if | |
398 | * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset(). | |
399 | */ | |
4c4dad11 | 400 | bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO); |
4f74d15f | 401 | fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); |
4f024f37 | 402 | bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
bd2d0210 | 403 | bio->bi_end_io = ext4_end_bio; |
97a851ed | 404 | bio->bi_private = ext4_get_io_end(io->io_end); |
bd2d0210 | 405 | io->io_bio = bio; |
bd2d0210 | 406 | io->io_next_block = bh->b_blocknr; |
fd42df30 | 407 | wbc_init_bio(io->io_wbc, bio); |
bd2d0210 TT |
408 | } |
409 | ||
5500221e GX |
410 | static void io_submit_add_bh(struct ext4_io_submit *io, |
411 | struct inode *inode, | |
cd57b771 MW |
412 | struct folio *folio, |
413 | struct folio *io_folio, | |
5500221e | 414 | struct buffer_head *bh) |
bd2d0210 | 415 | { |
4f74d15f EB |
416 | if (io->io_bio && (bh->b_blocknr != io->io_next_block || |
417 | !fscrypt_mergeable_bio_bh(io->io_bio, bh))) { | |
bd2d0210 TT |
418 | submit_and_retry: |
419 | ext4_io_submit(io); | |
420 | } | |
c75e707f | 421 | if (io->io_bio == NULL) |
5500221e | 422 | io_submit_init_bio(io, bh); |
cd57b771 | 423 | if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh))) |
97a851ed | 424 | goto submit_and_retry; |
cd57b771 | 425 | wbc_account_cgroup_owner(io->io_wbc, &folio->page, bh->b_size); |
bd2d0210 | 426 | io->io_next_block++; |
bd2d0210 TT |
427 | } |
428 | ||
e8d6062c MW |
429 | int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio, |
430 | size_t len) | |
bd2d0210 | 431 | { |
cd57b771 MW |
432 | struct folio *io_folio = folio; |
433 | struct inode *inode = folio->mapping->host; | |
18017479 | 434 | unsigned block_start; |
bd2d0210 TT |
435 | struct buffer_head *bh, *head; |
436 | int ret = 0; | |
937d7b84 | 437 | int nr_to_submit = 0; |
be993933 | 438 | struct writeback_control *wbc = io->io_wbc; |
dff4ac75 | 439 | bool keep_towrite = false; |
bd2d0210 | 440 | |
cd57b771 MW |
441 | BUG_ON(!folio_test_locked(folio)); |
442 | BUG_ON(folio_test_writeback(folio)); | |
bd2d0210 | 443 | |
cd57b771 | 444 | folio_clear_error(folio); |
bd2d0210 | 445 | |
eeece469 | 446 | /* |
f8409abd | 447 | * Comments copied from block_write_full_page: |
eeece469 | 448 | * |
cd57b771 | 449 | * The folio straddles i_size. It must be zeroed out on each and every |
eeece469 JK |
450 | * writepage invocation because it may be mmapped. "A file is mapped |
451 | * in multiples of the page size. For a file that is not a multiple of | |
452 | * the page size, the remaining memory is zeroed when mapped, and | |
453 | * writes to that region are not written out to the file." | |
454 | */ | |
cd57b771 MW |
455 | if (len < folio_size(folio)) |
456 | folio_zero_segment(folio, len, folio_size(folio)); | |
0058f965 JK |
457 | /* |
458 | * In the first loop we prepare and mark buffers to submit. We have to | |
cd57b771 MW |
459 | * mark all buffers in the folio before submitting so that |
460 | * folio_end_writeback() cannot be called from ext4_end_bio() when IO | |
0058f965 JK |
461 | * on the first buffer finishes and we are still working on submitting |
462 | * the second buffer. | |
463 | */ | |
cd57b771 | 464 | bh = head = folio_buffers(folio); |
0058f965 JK |
465 | do { |
466 | block_start = bh_offset(bh); | |
bd2d0210 TT |
467 | if (block_start >= len) { |
468 | clear_buffer_dirty(bh); | |
469 | set_buffer_uptodate(bh); | |
470 | continue; | |
471 | } | |
8a850c3f JK |
472 | if (!buffer_dirty(bh) || buffer_delay(bh) || |
473 | !buffer_mapped(bh) || buffer_unwritten(bh)) { | |
474 | /* A hole? We can safely clear the dirty bit */ | |
475 | if (!buffer_mapped(bh)) | |
476 | clear_buffer_dirty(bh); | |
04e568a3 | 477 | /* |
dff4ac75 | 478 | * Keeping dirty some buffer we cannot write? Make sure |
cd57b771 MW |
479 | * to redirty the folio and keep TOWRITE tag so that |
480 | * racing WB_SYNC_ALL writeback does not skip the folio. | |
dff4ac75 | 481 | * This happens e.g. when doing writeout for |
265e72ef JK |
482 | * transaction commit or when journalled data is not |
483 | * yet committed. | |
04e568a3 | 484 | */ |
265e72ef JK |
485 | if (buffer_dirty(bh) || |
486 | (buffer_jbd(bh) && buffer_jbddirty(bh))) { | |
cd57b771 MW |
487 | if (!folio_test_dirty(folio)) |
488 | folio_redirty_for_writepage(wbc, folio); | |
dff4ac75 JK |
489 | keep_towrite = true; |
490 | } | |
8a850c3f JK |
491 | continue; |
492 | } | |
16e08b14 | 493 | if (buffer_new(bh)) |
0058f965 | 494 | clear_buffer_new(bh); |
0058f965 | 495 | set_buffer_async_write(bh); |
04e568a3 | 496 | clear_buffer_dirty(bh); |
937d7b84 | 497 | nr_to_submit++; |
0058f965 JK |
498 | } while ((bh = bh->b_this_page) != head); |
499 | ||
cd57b771 | 500 | /* Nothing to submit? Just unlock the folio... */ |
dff4ac75 | 501 | if (!nr_to_submit) |
f1496362 | 502 | return 0; |
dff4ac75 | 503 | |
cd57b771 | 504 | bh = head = folio_buffers(folio); |
2058f83a | 505 | |
6e4b73bc EB |
506 | /* |
507 | * If any blocks are being written to an encrypted file, encrypt them | |
508 | * into a bounce page. For simplicity, just encrypt until the last | |
509 | * block which might be needed. This may cause some unneeded blocks | |
510 | * (e.g. holes) to be unnecessarily encrypted, but this is rare and | |
511 | * can't happen in the common case of blocksize == PAGE_SIZE. | |
512 | */ | |
8ae56b4e | 513 | if (fscrypt_inode_uses_fs_layer_crypto(inode)) { |
c9af28fd | 514 | gfp_t gfp_flags = GFP_NOFS; |
6e4b73bc | 515 | unsigned int enc_bytes = round_up(len, i_blocksize(inode)); |
cd57b771 | 516 | struct page *bounce_page; |
c9af28fd | 517 | |
547c556f EB |
518 | /* |
519 | * Since bounce page allocation uses a mempool, we can only use | |
520 | * a waiting mask (i.e. request guaranteed allocation) on the | |
521 | * first page of the bio. Otherwise it can deadlock. | |
522 | */ | |
523 | if (io->io_bio) | |
524 | gfp_flags = GFP_NOWAIT | __GFP_NOWARN; | |
c9af28fd | 525 | retry_encrypt: |
e8d6062c MW |
526 | bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page, |
527 | enc_bytes, 0, gfp_flags); | |
d2d0727b EB |
528 | if (IS_ERR(bounce_page)) { |
529 | ret = PTR_ERR(bounce_page); | |
547c556f EB |
530 | if (ret == -ENOMEM && |
531 | (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) { | |
4034247a | 532 | gfp_t new_gfp_flags = GFP_NOFS; |
547c556f | 533 | if (io->io_bio) |
c9af28fd | 534 | ext4_io_submit(io); |
547c556f | 535 | else |
4034247a N |
536 | new_gfp_flags |= __GFP_NOFAIL; |
537 | memalloc_retry_wait(gfp_flags); | |
538 | gfp_flags = new_gfp_flags; | |
c9af28fd TT |
539 | goto retry_encrypt; |
540 | } | |
5500221e GX |
541 | |
542 | printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); | |
cd57b771 | 543 | folio_redirty_for_writepage(wbc, folio); |
5500221e | 544 | do { |
04e568a3 JK |
545 | if (buffer_async_write(bh)) { |
546 | clear_buffer_async_write(bh); | |
547 | set_buffer_dirty(bh); | |
548 | } | |
5500221e GX |
549 | bh = bh->b_this_page; |
550 | } while (bh != head); | |
f1496362 JK |
551 | |
552 | return ret; | |
2058f83a | 553 | } |
cd57b771 | 554 | io_folio = page_folio(bounce_page); |
2058f83a MH |
555 | } |
556 | ||
cd57b771 | 557 | __folio_start_writeback(folio, keep_towrite); |
dff4ac75 | 558 | |
2058f83a | 559 | /* Now submit buffers to write */ |
0058f965 JK |
560 | do { |
561 | if (!buffer_async_write(bh)) | |
562 | continue; | |
cd57b771 | 563 | io_submit_add_bh(io, inode, folio, io_folio, bh); |
0058f965 | 564 | } while ((bh = bh->b_this_page) != head); |
f1496362 JK |
565 | |
566 | return 0; | |
bd2d0210 | 567 | } |