Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
bd2d0210 TT |
2 | /* |
3 | * linux/fs/ext4/page-io.c | |
4 | * | |
5 | * This contains the new page_io functions for ext4 | |
6 | * | |
7 | * Written by Theodore Ts'o, 2010. | |
8 | */ | |
9 | ||
bd2d0210 TT |
10 | #include <linux/fs.h> |
11 | #include <linux/time.h> | |
bd2d0210 TT |
12 | #include <linux/highuid.h> |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/quotaops.h> | |
15 | #include <linux/string.h> | |
16 | #include <linux/buffer_head.h> | |
17 | #include <linux/writeback.h> | |
18 | #include <linux/pagevec.h> | |
19 | #include <linux/mpage.h> | |
20 | #include <linux/namei.h> | |
21 | #include <linux/uio.h> | |
22 | #include <linux/bio.h> | |
23 | #include <linux/workqueue.h> | |
24 | #include <linux/kernel.h> | |
25 | #include <linux/slab.h> | |
1ae48a63 | 26 | #include <linux/mm.h> |
4034247a | 27 | #include <linux/sched/mm.h> |
bd2d0210 TT |
28 | |
29 | #include "ext4_jbd2.h" | |
30 | #include "xattr.h" | |
31 | #include "acl.h" | |
bd2d0210 | 32 | |
0058f965 | 33 | static struct kmem_cache *io_end_cachep; |
c8cc8816 | 34 | static struct kmem_cache *io_end_vec_cachep; |
bd2d0210 | 35 | |
5dabfc78 | 36 | int __init ext4_init_pageio(void) |
bd2d0210 | 37 | { |
bd2d0210 | 38 | io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); |
0058f965 | 39 | if (io_end_cachep == NULL) |
bd2d0210 | 40 | return -ENOMEM; |
c8cc8816 RH |
41 | |
42 | io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0); | |
43 | if (io_end_vec_cachep == NULL) { | |
44 | kmem_cache_destroy(io_end_cachep); | |
45 | return -ENOMEM; | |
46 | } | |
bd2d0210 TT |
47 | return 0; |
48 | } | |
49 | ||
5dabfc78 | 50 | void ext4_exit_pageio(void) |
bd2d0210 TT |
51 | { |
52 | kmem_cache_destroy(io_end_cachep); | |
c8cc8816 RH |
53 | kmem_cache_destroy(io_end_vec_cachep); |
54 | } | |
55 | ||
56 | struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end) | |
57 | { | |
58 | struct ext4_io_end_vec *io_end_vec; | |
59 | ||
60 | io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS); | |
61 | if (!io_end_vec) | |
62 | return ERR_PTR(-ENOMEM); | |
63 | INIT_LIST_HEAD(&io_end_vec->list); | |
64 | list_add_tail(&io_end_vec->list, &io_end->list_vec); | |
65 | return io_end_vec; | |
66 | } | |
67 | ||
68 | static void ext4_free_io_end_vec(ext4_io_end_t *io_end) | |
69 | { | |
70 | struct ext4_io_end_vec *io_end_vec, *tmp; | |
71 | ||
72 | if (list_empty(&io_end->list_vec)) | |
73 | return; | |
74 | list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) { | |
75 | list_del(&io_end_vec->list); | |
76 | kmem_cache_free(io_end_vec_cachep, io_end_vec); | |
77 | } | |
78 | } | |
79 | ||
80 | struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end) | |
81 | { | |
82 | BUG_ON(list_empty(&io_end->list_vec)); | |
83 | return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list); | |
bd2d0210 TT |
84 | } |
85 | ||
b0857d30 JK |
86 | /* |
87 | * Print an buffer I/O error compatible with the fs/buffer.c. This | |
88 | * provides compatibility with dmesg scrapers that look for a specific | |
89 | * buffer I/O error message. We really need a unified error reporting | |
90 | * structure to userspace ala Digital Unix's uerf system, but it's | |
91 | * probably not going to happen in my lifetime, due to LKML politics... | |
92 | */ | |
93 | static void buffer_io_error(struct buffer_head *bh) | |
94 | { | |
a1c6f057 DM |
95 | printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n", |
96 | bh->b_bdev, | |
b0857d30 JK |
97 | (unsigned long long)bh->b_blocknr); |
98 | } | |
99 | ||
100 | static void ext4_finish_bio(struct bio *bio) | |
101 | { | |
bb64c08b | 102 | struct folio_iter fi; |
b0857d30 | 103 | |
bb64c08b MW |
104 | bio_for_each_folio_all(fi, bio) { |
105 | struct folio *folio = fi.folio; | |
106 | struct folio *io_folio = NULL; | |
b0857d30 | 107 | struct buffer_head *bh, *head; |
bb64c08b MW |
108 | size_t bio_start = fi.offset; |
109 | size_t bio_end = bio_start + fi.length; | |
b0857d30 JK |
110 | unsigned under_io = 0; |
111 | unsigned long flags; | |
112 | ||
bb64c08b MW |
113 | if (fscrypt_is_bounce_folio(folio)) { |
114 | io_folio = folio; | |
115 | folio = fscrypt_pagecache_folio(folio); | |
2058f83a | 116 | } |
2058f83a | 117 | |
4e4cbee9 | 118 | if (bio->bi_status) { |
bb64c08b | 119 | int err = blk_status_to_errno(bio->bi_status); |
bb64c08b | 120 | mapping_set_error(folio->mapping, err); |
b0857d30 | 121 | } |
bb64c08b | 122 | bh = head = folio_buffers(folio); |
b0857d30 | 123 | /* |
bb64c08b | 124 | * We check all buffers in the folio under b_uptodate_lock |
b0857d30 JK |
125 | * to avoid races with other end io clearing async_write flags |
126 | */ | |
f1e67e35 | 127 | spin_lock_irqsave(&head->b_uptodate_lock, flags); |
b0857d30 JK |
128 | do { |
129 | if (bh_offset(bh) < bio_start || | |
130 | bh_offset(bh) + bh->b_size > bio_end) { | |
131 | if (buffer_async_write(bh)) | |
132 | under_io++; | |
133 | continue; | |
134 | } | |
135 | clear_buffer_async_write(bh); | |
a2b0b205 YB |
136 | if (bio->bi_status) { |
137 | set_buffer_write_io_error(bh); | |
b0857d30 | 138 | buffer_io_error(bh); |
a2b0b205 | 139 | } |
b0857d30 | 140 | } while ((bh = bh->b_this_page) != head); |
f1e67e35 | 141 | spin_unlock_irqrestore(&head->b_uptodate_lock, flags); |
2058f83a | 142 | if (!under_io) { |
bb64c08b MW |
143 | fscrypt_free_bounce_page(&io_folio->page); |
144 | folio_end_writeback(folio); | |
2058f83a | 145 | } |
b0857d30 JK |
146 | } |
147 | } | |
148 | ||
97a851ed | 149 | static void ext4_release_io_end(ext4_io_end_t *io_end) |
bd2d0210 | 150 | { |
b0857d30 JK |
151 | struct bio *bio, *next_bio; |
152 | ||
97a851ed JK |
153 | BUG_ON(!list_empty(&io_end->list)); |
154 | BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); | |
6b523df4 | 155 | WARN_ON(io_end->handle); |
97a851ed | 156 | |
b0857d30 JK |
157 | for (bio = io_end->bio; bio; bio = next_bio) { |
158 | next_bio = bio->bi_private; | |
159 | ext4_finish_bio(bio); | |
160 | bio_put(bio); | |
161 | } | |
c8cc8816 | 162 | ext4_free_io_end_vec(io_end); |
97a851ed JK |
163 | kmem_cache_free(io_end_cachep, io_end); |
164 | } | |
165 | ||
a115f749 | 166 | /* |
ce51afb8 BL |
167 | * On successful IO, check a range of space and convert unwritten extents to |
168 | * written. On IO failure, check if journal abort is needed. Note that | |
a115f749 JK |
169 | * we are protected from truncate touching same part of extent tree by the |
170 | * fact that truncate code waits for all DIO to finish (thus exclusion from | |
171 | * direct IO is achieved) and also waits for PageWriteback bits. Thus we | |
172 | * cannot get to ext4_ext_truncate() before all IOs overlapping that range are | |
173 | * completed (happens from ext4_free_ioend()). | |
174 | */ | |
821ff38d | 175 | static int ext4_end_io_end(ext4_io_end_t *io_end) |
bd2d0210 | 176 | { |
821ff38d | 177 | struct inode *inode = io_end->inode; |
821ff38d | 178 | handle_t *handle = io_end->handle; |
ce51afb8 | 179 | struct super_block *sb = inode->i_sb; |
bd2d0210 TT |
180 | int ret = 0; |
181 | ||
821ff38d | 182 | ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p," |
bd2d0210 | 183 | "list->prev 0x%p\n", |
821ff38d | 184 | io_end, inode->i_ino, io_end->list.next, io_end->list.prev); |
bd2d0210 | 185 | |
e856f93e BL |
186 | /* |
187 | * Do not convert the unwritten extents if data writeback fails, | |
188 | * or stale data may be exposed. | |
189 | */ | |
190 | io_end->handle = NULL; /* Following call will use up the handle */ | |
191 | if (unlikely(io_end->flag & EXT4_IO_END_FAILED)) { | |
192 | ret = -EIO; | |
193 | if (handle) | |
194 | jbd2_journal_free_reserved(handle); | |
ce51afb8 BL |
195 | |
196 | if (test_opt(sb, DATA_ERR_ABORT)) | |
197 | jbd2_journal_abort(EXT4_SB(sb)->s_journal, ret); | |
e856f93e BL |
198 | } else { |
199 | ret = ext4_convert_unwritten_io_end_vec(handle, io_end); | |
200 | } | |
0a1b2f5e | 201 | if (ret < 0 && !ext4_emergency_state(sb) && |
ce51afb8 BL |
202 | io_end->flag & EXT4_IO_END_UNWRITTEN) { |
203 | ext4_msg(sb, KERN_EMERG, | |
b82e384c TT |
204 | "failed to convert unwritten extents to written " |
205 | "extents -- potential data loss! " | |
c8cc8816 | 206 | "(inode %lu, error %d)", inode->i_ino, ret); |
bd2d0210 | 207 | } |
e856f93e | 208 | |
821ff38d RH |
209 | ext4_clear_io_unwritten_flag(io_end); |
210 | ext4_release_io_end(io_end); | |
bd2d0210 TT |
211 | return ret; |
212 | } | |
213 | ||
2e8fa54e | 214 | static void dump_completed_IO(struct inode *inode, struct list_head *head) |
28a535f9 DM |
215 | { |
216 | #ifdef EXT4FS_DEBUG | |
217 | struct list_head *cur, *before, *after; | |
821ff38d | 218 | ext4_io_end_t *io_end, *io_end0, *io_end1; |
28a535f9 | 219 | |
2e8fa54e | 220 | if (list_empty(head)) |
28a535f9 | 221 | return; |
28a535f9 | 222 | |
2e8fa54e | 223 | ext4_debug("Dump inode %lu completed io list\n", inode->i_ino); |
821ff38d RH |
224 | list_for_each_entry(io_end, head, list) { |
225 | cur = &io_end->list; | |
28a535f9 | 226 | before = cur->prev; |
821ff38d | 227 | io_end0 = container_of(before, ext4_io_end_t, list); |
28a535f9 | 228 | after = cur->next; |
821ff38d | 229 | io_end1 = container_of(after, ext4_io_end_t, list); |
28a535f9 DM |
230 | |
231 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", | |
821ff38d | 232 | io_end, inode->i_ino, io_end0, io_end1); |
28a535f9 DM |
233 | } |
234 | #endif | |
235 | } | |
236 | ||
ce51afb8 BL |
237 | static bool ext4_io_end_defer_completion(ext4_io_end_t *io_end) |
238 | { | |
239 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) | |
240 | return true; | |
241 | if (test_opt(io_end->inode->i_sb, DATA_ERR_ABORT) && | |
242 | io_end->flag & EXT4_IO_END_FAILED) | |
243 | return true; | |
244 | return false; | |
245 | } | |
246 | ||
28a535f9 | 247 | /* Add the io_end to per-inode completed end_io list. */ |
97a851ed | 248 | static void ext4_add_complete_io(ext4_io_end_t *io_end) |
bd2d0210 | 249 | { |
28a535f9 | 250 | struct ext4_inode_info *ei = EXT4_I(io_end->inode); |
78371a45 | 251 | struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb); |
28a535f9 DM |
252 | struct workqueue_struct *wq; |
253 | unsigned long flags; | |
254 | ||
ce51afb8 BL |
255 | /* Only reserved conversions or pending IO errors will enter here. */ |
256 | WARN_ON(!(io_end->flag & EXT4_IO_END_DEFER_COMPLETION)); | |
257 | WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN && | |
258 | !io_end->handle && sbi->s_journal); | |
259 | ||
d73d5046 | 260 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
78371a45 | 261 | wq = sbi->rsv_conversion_wq; |
7b7a8665 CH |
262 | if (list_empty(&ei->i_rsv_conversion_list)) |
263 | queue_work(wq, &ei->i_rsv_conversion_work); | |
264 | list_add_tail(&io_end->list, &ei->i_rsv_conversion_list); | |
28a535f9 DM |
265 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
266 | } | |
d73d5046 | 267 | |
2e8fa54e JK |
268 | static int ext4_do_flush_completed_IO(struct inode *inode, |
269 | struct list_head *head) | |
28a535f9 | 270 | { |
821ff38d | 271 | ext4_io_end_t *io_end; |
002bd7fa | 272 | struct list_head unwritten; |
28a535f9 DM |
273 | unsigned long flags; |
274 | struct ext4_inode_info *ei = EXT4_I(inode); | |
275 | int err, ret = 0; | |
276 | ||
28a535f9 | 277 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
2e8fa54e JK |
278 | dump_completed_IO(inode, head); |
279 | list_replace_init(head, &unwritten); | |
28a535f9 DM |
280 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
281 | ||
282 | while (!list_empty(&unwritten)) { | |
821ff38d | 283 | io_end = list_entry(unwritten.next, ext4_io_end_t, list); |
ce51afb8 | 284 | BUG_ON(!(io_end->flag & EXT4_IO_END_DEFER_COMPLETION)); |
821ff38d | 285 | list_del_init(&io_end->list); |
28a535f9 | 286 | |
821ff38d | 287 | err = ext4_end_io_end(io_end); |
28a535f9 DM |
288 | if (unlikely(!ret && err)) |
289 | ret = err; | |
28a535f9 DM |
290 | } |
291 | return ret; | |
292 | } | |
293 | ||
294 | /* | |
ce51afb8 BL |
295 | * Used to convert unwritten extents to written extents upon IO completion, |
296 | * or used to abort the journal upon IO errors. | |
28a535f9 | 297 | */ |
2e8fa54e JK |
298 | void ext4_end_io_rsv_work(struct work_struct *work) |
299 | { | |
300 | struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, | |
301 | i_rsv_conversion_work); | |
302 | ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list); | |
303 | } | |
304 | ||
bd2d0210 TT |
305 | ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) |
306 | { | |
821ff38d RH |
307 | ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags); |
308 | ||
309 | if (io_end) { | |
310 | io_end->inode = inode; | |
311 | INIT_LIST_HEAD(&io_end->list); | |
c8cc8816 | 312 | INIT_LIST_HEAD(&io_end->list_vec); |
31d21d21 | 313 | refcount_set(&io_end->count, 1); |
bd2d0210 | 314 | } |
821ff38d | 315 | return io_end; |
bd2d0210 TT |
316 | } |
317 | ||
97a851ed JK |
318 | void ext4_put_io_end_defer(ext4_io_end_t *io_end) |
319 | { | |
31d21d21 | 320 | if (refcount_dec_and_test(&io_end->count)) { |
ce51afb8 BL |
321 | if (io_end->flag & EXT4_IO_END_FAILED || |
322 | (io_end->flag & EXT4_IO_END_UNWRITTEN && | |
323 | !list_empty(&io_end->list_vec))) { | |
324 | ext4_add_complete_io(io_end); | |
97a851ed JK |
325 | return; |
326 | } | |
ce51afb8 | 327 | ext4_release_io_end(io_end); |
97a851ed JK |
328 | } |
329 | } | |
330 | ||
331 | int ext4_put_io_end(ext4_io_end_t *io_end) | |
332 | { | |
31d21d21 | 333 | if (refcount_dec_and_test(&io_end->count)) { |
ce51afb8 | 334 | if (ext4_io_end_defer_completion(io_end)) |
2f94b537 BL |
335 | return ext4_end_io_end(io_end); |
336 | ||
97a851ed JK |
337 | ext4_release_io_end(io_end); |
338 | } | |
2f94b537 | 339 | return 0; |
97a851ed JK |
340 | } |
341 | ||
342 | ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) | |
343 | { | |
31d21d21 | 344 | refcount_inc(&io_end->count); |
97a851ed JK |
345 | return io_end; |
346 | } | |
347 | ||
822dbba3 | 348 | /* BIO completion function for page writeback */ |
4246a0b6 | 349 | static void ext4_end_bio(struct bio *bio) |
bd2d0210 TT |
350 | { |
351 | ext4_io_end_t *io_end = bio->bi_private; | |
4f024f37 | 352 | sector_t bi_sector = bio->bi_iter.bi_sector; |
bd2d0210 | 353 | |
734294e4 CH |
354 | if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n", |
355 | bio->bi_bdev, | |
72d622b4 TT |
356 | (long long) bio->bi_iter.bi_sector, |
357 | (unsigned) bio_sectors(bio), | |
4e4cbee9 | 358 | bio->bi_status)) { |
72d622b4 TT |
359 | ext4_finish_bio(bio); |
360 | bio_put(bio); | |
361 | return; | |
362 | } | |
bd2d0210 | 363 | bio->bi_end_io = NULL; |
0058f965 | 364 | |
4e4cbee9 | 365 | if (bio->bi_status) { |
b0857d30 JK |
366 | struct inode *inode = io_end->inode; |
367 | ||
9503c67c | 368 | ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " |
c8cc8816 | 369 | "starting block %llu)", |
4e4cbee9 | 370 | bio->bi_status, inode->i_ino, |
f7ad6d2e | 371 | (unsigned long long) |
d50bdd5a | 372 | bi_sector >> (inode->i_blkbits - 9)); |
e856f93e | 373 | io_end->flag |= EXT4_IO_END_FAILED; |
4e4cbee9 CH |
374 | mapping_set_error(inode->i_mapping, |
375 | blk_status_to_errno(bio->bi_status)); | |
f7ad6d2e | 376 | } |
822dbba3 | 377 | |
ce51afb8 | 378 | if (ext4_io_end_defer_completion(io_end)) { |
822dbba3 JK |
379 | /* |
380 | * Link bio into list hanging from io_end. We have to do it | |
381 | * atomically as bio completions can be racing against each | |
382 | * other. | |
383 | */ | |
384 | bio->bi_private = xchg(&io_end->bio, bio); | |
385 | ext4_put_io_end_defer(io_end); | |
386 | } else { | |
387 | /* | |
388 | * Drop io_end reference early. Inode can get freed once | |
389 | * we finish the bio. | |
390 | */ | |
391 | ext4_put_io_end_defer(io_end); | |
392 | ext4_finish_bio(bio); | |
393 | bio_put(bio); | |
394 | } | |
bd2d0210 TT |
395 | } |
396 | ||
397 | void ext4_io_submit(struct ext4_io_submit *io) | |
398 | { | |
399 | struct bio *bio = io->io_bio; | |
400 | ||
401 | if (bio) { | |
4c4dad11 CH |
402 | if (io->io_wbc->sync_mode == WB_SYNC_ALL) |
403 | io->io_bio->bi_opf |= REQ_SYNC; | |
4e49ea4a | 404 | submit_bio(io->io_bio); |
bd2d0210 | 405 | } |
7dc57615 | 406 | io->io_bio = NULL; |
97a851ed JK |
407 | } |
408 | ||
409 | void ext4_io_submit_init(struct ext4_io_submit *io, | |
410 | struct writeback_control *wbc) | |
411 | { | |
5a33911f | 412 | io->io_wbc = wbc; |
97a851ed | 413 | io->io_bio = NULL; |
7dc57615 | 414 | io->io_end = NULL; |
bd2d0210 TT |
415 | } |
416 | ||
5500221e GX |
417 | static void io_submit_init_bio(struct ext4_io_submit *io, |
418 | struct buffer_head *bh) | |
bd2d0210 | 419 | { |
bd2d0210 TT |
420 | struct bio *bio; |
421 | ||
5500221e GX |
422 | /* |
423 | * bio_alloc will _always_ be able to allocate a bio if | |
424 | * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset(). | |
425 | */ | |
4c4dad11 | 426 | bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO); |
4f74d15f | 427 | fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); |
4f024f37 | 428 | bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
bd2d0210 | 429 | bio->bi_end_io = ext4_end_bio; |
97a851ed | 430 | bio->bi_private = ext4_get_io_end(io->io_end); |
bd2d0210 | 431 | io->io_bio = bio; |
bd2d0210 | 432 | io->io_next_block = bh->b_blocknr; |
fd42df30 | 433 | wbc_init_bio(io->io_wbc, bio); |
bd2d0210 TT |
434 | } |
435 | ||
5500221e GX |
436 | static void io_submit_add_bh(struct ext4_io_submit *io, |
437 | struct inode *inode, | |
cd57b771 MW |
438 | struct folio *folio, |
439 | struct folio *io_folio, | |
5500221e | 440 | struct buffer_head *bh) |
bd2d0210 | 441 | { |
4f74d15f EB |
442 | if (io->io_bio && (bh->b_blocknr != io->io_next_block || |
443 | !fscrypt_mergeable_bio_bh(io->io_bio, bh))) { | |
bd2d0210 TT |
444 | submit_and_retry: |
445 | ext4_io_submit(io); | |
446 | } | |
813f8536 | 447 | if (io->io_bio == NULL) { |
5500221e | 448 | io_submit_init_bio(io, bh); |
813f8536 | 449 | io->io_bio->bi_write_hint = inode->i_write_hint; |
450 | } | |
cd57b771 | 451 | if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh))) |
97a851ed | 452 | goto submit_and_retry; |
30dac24e | 453 | wbc_account_cgroup_owner(io->io_wbc, folio, bh->b_size); |
bd2d0210 | 454 | io->io_next_block++; |
bd2d0210 TT |
455 | } |
456 | ||
e8d6062c MW |
457 | int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio, |
458 | size_t len) | |
bd2d0210 | 459 | { |
cd57b771 MW |
460 | struct folio *io_folio = folio; |
461 | struct inode *inode = folio->mapping->host; | |
18017479 | 462 | unsigned block_start; |
bd2d0210 TT |
463 | struct buffer_head *bh, *head; |
464 | int ret = 0; | |
937d7b84 | 465 | int nr_to_submit = 0; |
be993933 | 466 | struct writeback_control *wbc = io->io_wbc; |
dff4ac75 | 467 | bool keep_towrite = false; |
bd2d0210 | 468 | |
cd57b771 MW |
469 | BUG_ON(!folio_test_locked(folio)); |
470 | BUG_ON(folio_test_writeback(folio)); | |
bd2d0210 | 471 | |
eeece469 | 472 | /* |
17bf23a9 | 473 | * Comments copied from block_write_full_folio: |
eeece469 | 474 | * |
cd57b771 | 475 | * The folio straddles i_size. It must be zeroed out on each and every |
eeece469 JK |
476 | * writepage invocation because it may be mmapped. "A file is mapped |
477 | * in multiples of the page size. For a file that is not a multiple of | |
478 | * the page size, the remaining memory is zeroed when mapped, and | |
479 | * writes to that region are not written out to the file." | |
480 | */ | |
cd57b771 MW |
481 | if (len < folio_size(folio)) |
482 | folio_zero_segment(folio, len, folio_size(folio)); | |
0058f965 JK |
483 | /* |
484 | * In the first loop we prepare and mark buffers to submit. We have to | |
cd57b771 MW |
485 | * mark all buffers in the folio before submitting so that |
486 | * folio_end_writeback() cannot be called from ext4_end_bio() when IO | |
0058f965 JK |
487 | * on the first buffer finishes and we are still working on submitting |
488 | * the second buffer. | |
489 | */ | |
cd57b771 | 490 | bh = head = folio_buffers(folio); |
0058f965 JK |
491 | do { |
492 | block_start = bh_offset(bh); | |
bd2d0210 TT |
493 | if (block_start >= len) { |
494 | clear_buffer_dirty(bh); | |
495 | set_buffer_uptodate(bh); | |
496 | continue; | |
497 | } | |
8a850c3f JK |
498 | if (!buffer_dirty(bh) || buffer_delay(bh) || |
499 | !buffer_mapped(bh) || buffer_unwritten(bh)) { | |
500 | /* A hole? We can safely clear the dirty bit */ | |
501 | if (!buffer_mapped(bh)) | |
502 | clear_buffer_dirty(bh); | |
04e568a3 | 503 | /* |
dff4ac75 | 504 | * Keeping dirty some buffer we cannot write? Make sure |
cd57b771 MW |
505 | * to redirty the folio and keep TOWRITE tag so that |
506 | * racing WB_SYNC_ALL writeback does not skip the folio. | |
dff4ac75 | 507 | * This happens e.g. when doing writeout for |
265e72ef JK |
508 | * transaction commit or when journalled data is not |
509 | * yet committed. | |
04e568a3 | 510 | */ |
265e72ef JK |
511 | if (buffer_dirty(bh) || |
512 | (buffer_jbd(bh) && buffer_jbddirty(bh))) { | |
cd57b771 MW |
513 | if (!folio_test_dirty(folio)) |
514 | folio_redirty_for_writepage(wbc, folio); | |
dff4ac75 JK |
515 | keep_towrite = true; |
516 | } | |
8a850c3f JK |
517 | continue; |
518 | } | |
16e08b14 | 519 | if (buffer_new(bh)) |
0058f965 | 520 | clear_buffer_new(bh); |
0058f965 | 521 | set_buffer_async_write(bh); |
04e568a3 | 522 | clear_buffer_dirty(bh); |
937d7b84 | 523 | nr_to_submit++; |
0058f965 JK |
524 | } while ((bh = bh->b_this_page) != head); |
525 | ||
cd57b771 | 526 | /* Nothing to submit? Just unlock the folio... */ |
dff4ac75 | 527 | if (!nr_to_submit) |
f1496362 | 528 | return 0; |
dff4ac75 | 529 | |
cd57b771 | 530 | bh = head = folio_buffers(folio); |
2058f83a | 531 | |
6e4b73bc EB |
532 | /* |
533 | * If any blocks are being written to an encrypted file, encrypt them | |
534 | * into a bounce page. For simplicity, just encrypt until the last | |
535 | * block which might be needed. This may cause some unneeded blocks | |
536 | * (e.g. holes) to be unnecessarily encrypted, but this is rare and | |
537 | * can't happen in the common case of blocksize == PAGE_SIZE. | |
538 | */ | |
8ae56b4e | 539 | if (fscrypt_inode_uses_fs_layer_crypto(inode)) { |
c9af28fd | 540 | gfp_t gfp_flags = GFP_NOFS; |
6e4b73bc | 541 | unsigned int enc_bytes = round_up(len, i_blocksize(inode)); |
cd57b771 | 542 | struct page *bounce_page; |
c9af28fd | 543 | |
547c556f EB |
544 | /* |
545 | * Since bounce page allocation uses a mempool, we can only use | |
546 | * a waiting mask (i.e. request guaranteed allocation) on the | |
547 | * first page of the bio. Otherwise it can deadlock. | |
548 | */ | |
549 | if (io->io_bio) | |
550 | gfp_flags = GFP_NOWAIT | __GFP_NOWARN; | |
c9af28fd | 551 | retry_encrypt: |
59b59a94 | 552 | bounce_page = fscrypt_encrypt_pagecache_blocks(folio, |
e8d6062c | 553 | enc_bytes, 0, gfp_flags); |
d2d0727b EB |
554 | if (IS_ERR(bounce_page)) { |
555 | ret = PTR_ERR(bounce_page); | |
547c556f EB |
556 | if (ret == -ENOMEM && |
557 | (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) { | |
4034247a | 558 | gfp_t new_gfp_flags = GFP_NOFS; |
547c556f | 559 | if (io->io_bio) |
c9af28fd | 560 | ext4_io_submit(io); |
547c556f | 561 | else |
4034247a N |
562 | new_gfp_flags |= __GFP_NOFAIL; |
563 | memalloc_retry_wait(gfp_flags); | |
564 | gfp_flags = new_gfp_flags; | |
c9af28fd TT |
565 | goto retry_encrypt; |
566 | } | |
5500221e GX |
567 | |
568 | printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); | |
cd57b771 | 569 | folio_redirty_for_writepage(wbc, folio); |
5500221e | 570 | do { |
04e568a3 JK |
571 | if (buffer_async_write(bh)) { |
572 | clear_buffer_async_write(bh); | |
573 | set_buffer_dirty(bh); | |
574 | } | |
5500221e GX |
575 | bh = bh->b_this_page; |
576 | } while (bh != head); | |
f1496362 JK |
577 | |
578 | return ret; | |
2058f83a | 579 | } |
cd57b771 | 580 | io_folio = page_folio(bounce_page); |
2058f83a MH |
581 | } |
582 | ||
cd57b771 | 583 | __folio_start_writeback(folio, keep_towrite); |
dff4ac75 | 584 | |
2058f83a | 585 | /* Now submit buffers to write */ |
0058f965 JK |
586 | do { |
587 | if (!buffer_async_write(bh)) | |
588 | continue; | |
cd57b771 | 589 | io_submit_add_bh(io, inode, folio, io_folio, bh); |
0058f965 | 590 | } while ((bh = bh->b_this_page) != head); |
f1496362 JK |
591 | |
592 | return 0; | |
bd2d0210 | 593 | } |