brd: use XArray instead of radix-tree to index backing pages
[linux-block.git] / fs / ext4 / inode.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
ac27a0ec 2/*
617ba13b 3 * linux/fs/ext4/inode.c
ac27a0ec
DK
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/inode.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
ac27a0ec
DK
16 * 64-bit file support on 64-bit platforms by Jakub Jelinek
17 * (jj@sunsite.ms.mff.cuni.cz)
18 *
617ba13b 19 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
ac27a0ec
DK
20 */
21
ac27a0ec 22#include <linux/fs.h>
14f3db55 23#include <linux/mount.h>
ac27a0ec 24#include <linux/time.h>
ac27a0ec
DK
25#include <linux/highuid.h>
26#include <linux/pagemap.h>
c94c2acf 27#include <linux/dax.h>
ac27a0ec
DK
28#include <linux/quotaops.h>
29#include <linux/string.h>
30#include <linux/buffer_head.h>
31#include <linux/writeback.h>
64769240 32#include <linux/pagevec.h>
ac27a0ec 33#include <linux/mpage.h>
e83c1397 34#include <linux/namei.h>
ac27a0ec
DK
35#include <linux/uio.h>
36#include <linux/bio.h>
4c0425ff 37#include <linux/workqueue.h>
744692dc 38#include <linux/kernel.h>
6db26ffc 39#include <linux/printk.h>
5a0e3ad6 40#include <linux/slab.h>
00a1a053 41#include <linux/bitops.h>
364443cb 42#include <linux/iomap.h>
ae5e165d 43#include <linux/iversion.h>
9bffad1e 44
3dcf5451 45#include "ext4_jbd2.h"
ac27a0ec
DK
46#include "xattr.h"
47#include "acl.h"
9f125d64 48#include "truncate.h"
ac27a0ec 49
9bffad1e
TT
50#include <trace/events/ext4.h>
51
814525f4
DW
52static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
53 struct ext4_inode_info *ei)
54{
55 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
814525f4 56 __u32 csum;
b47820ed
DJ
57 __u16 dummy_csum = 0;
58 int offset = offsetof(struct ext4_inode, i_checksum_lo);
59 unsigned int csum_size = sizeof(dummy_csum);
814525f4 60
b47820ed
DJ
61 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
62 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
63 offset += csum_size;
64 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
65 EXT4_GOOD_OLD_INODE_SIZE - offset);
814525f4 66
b47820ed
DJ
67 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
68 offset = offsetof(struct ext4_inode, i_checksum_hi);
69 csum = ext4_chksum(sbi, csum, (__u8 *)raw +
70 EXT4_GOOD_OLD_INODE_SIZE,
71 offset - EXT4_GOOD_OLD_INODE_SIZE);
72 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
73 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
74 csum_size);
75 offset += csum_size;
b47820ed 76 }
05ac5aa1
DJ
77 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
78 EXT4_INODE_SIZE(inode->i_sb) - offset);
814525f4
DW
79 }
80
814525f4
DW
81 return csum;
82}
83
84static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
85 struct ext4_inode_info *ei)
86{
87 __u32 provided, calculated;
88
89 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
90 cpu_to_le32(EXT4_OS_LINUX) ||
9aa5d32b 91 !ext4_has_metadata_csum(inode->i_sb))
814525f4
DW
92 return 1;
93
94 provided = le16_to_cpu(raw->i_checksum_lo);
95 calculated = ext4_inode_csum(inode, raw, ei);
96 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
97 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
98 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
99 else
100 calculated &= 0xFFFF;
101
102 return provided == calculated;
103}
104
8016e29f
HS
105void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
106 struct ext4_inode_info *ei)
814525f4
DW
107{
108 __u32 csum;
109
110 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
111 cpu_to_le32(EXT4_OS_LINUX) ||
9aa5d32b 112 !ext4_has_metadata_csum(inode->i_sb))
814525f4
DW
113 return;
114
115 csum = ext4_inode_csum(inode, raw, ei);
116 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
117 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
118 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
119 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
120}
121
678aaf48
JK
122static inline int ext4_begin_ordered_truncate(struct inode *inode,
123 loff_t new_size)
124{
7ff9c073 125 trace_ext4_begin_ordered_truncate(inode, new_size);
8aefcd55
TT
126 /*
127 * If jinode is zero, then we never opened the file for
128 * writing, so there's no need to call
129 * jbd2_journal_begin_ordered_truncate() since there's no
130 * outstanding writes we need to flush.
131 */
132 if (!EXT4_I(inode)->jinode)
133 return 0;
134 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
135 EXT4_I(inode)->jinode,
136 new_size);
678aaf48
JK
137}
138
dec214d0
TE
139static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
140 int pextents);
64769240 141
ac27a0ec
DK
142/*
143 * Test whether an inode is a fast symlink.
407cd7fb 144 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
ac27a0ec 145 */
f348c252 146int ext4_inode_is_fast_symlink(struct inode *inode)
ac27a0ec 147{
fc82228a
AK
148 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
149 int ea_blocks = EXT4_I(inode)->i_file_acl ?
150 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
151
152 if (ext4_has_inline_data(inode))
153 return 0;
154
155 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
156 }
407cd7fb
TE
157 return S_ISLNK(inode->i_mode) && inode->i_size &&
158 (inode->i_size < EXT4_N_BLOCKS * 4);
ac27a0ec
DK
159}
160
ac27a0ec
DK
161/*
162 * Called at the last iput() if i_nlink is zero.
163 */
0930fcc1 164void ext4_evict_inode(struct inode *inode)
ac27a0ec
DK
165{
166 handle_t *handle;
bc965ab3 167 int err;
65db869c
JK
168 /*
169 * Credits for final inode cleanup and freeing:
170 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
171 * (xattr block freeing), bitmap, group descriptor (inode freeing)
172 */
173 int extra_credits = 6;
0421a189 174 struct ext4_xattr_inode_array *ea_inode_array = NULL;
46e294ef 175 bool freeze_protected = false;
ac27a0ec 176
7ff9c073 177 trace_ext4_evict_inode(inode);
2581fdc8 178
6bc0d63d
JK
179 if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
180 ext4_evict_ea_inode(inode);
0930fcc1 181 if (inode->i_nlink) {
91b0abe3 182 truncate_inode_pages_final(&inode->i_data);
5dc23bdd 183
0930fcc1
AV
184 goto no_delete;
185 }
186
e2bfb088
TT
187 if (is_bad_inode(inode))
188 goto no_delete;
189 dquot_initialize(inode);
907f4554 190
678aaf48
JK
191 if (ext4_should_order_data(inode))
192 ext4_begin_ordered_truncate(inode, 0);
91b0abe3 193 truncate_inode_pages_final(&inode->i_data);
ac27a0ec 194
ceff86fd
JK
195 /*
196 * For inodes with journalled data, transaction commit could have
bc12ac98
ZY
197 * dirtied the inode. And for inodes with dioread_nolock, unwritten
198 * extents converting worker could merge extents and also have dirtied
199 * the inode. Flush worker is ignoring it because of I_FREEING flag but
200 * we still need to remove the inode from the writeback lists.
ceff86fd 201 */
bc12ac98 202 if (!list_empty_careful(&inode->i_io_list))
ceff86fd 203 inode_io_list_del(inode);
ceff86fd 204
8e8ad8a5
JK
205 /*
206 * Protect us against freezing - iput() caller didn't have to have any
46e294ef
JK
207 * protection against it. When we are in a running transaction though,
208 * we are already protected against freezing and we cannot grab further
209 * protection due to lock ordering constraints.
8e8ad8a5 210 */
46e294ef
JK
211 if (!ext4_journal_current_handle()) {
212 sb_start_intwrite(inode->i_sb);
213 freeze_protected = true;
214 }
e50e5129 215
30a7eb97
TE
216 if (!IS_NOQUOTA(inode))
217 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
218
65db869c
JK
219 /*
220 * Block bitmap, group descriptor, and inode are accounted in both
221 * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
222 */
30a7eb97 223 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
65db869c 224 ext4_blocks_for_truncate(inode) + extra_credits - 3);
ac27a0ec 225 if (IS_ERR(handle)) {
bc965ab3 226 ext4_std_error(inode->i_sb, PTR_ERR(handle));
ac27a0ec
DK
227 /*
228 * If we're going to skip the normal cleanup, we still need to
229 * make sure that the in-core orphan linked list is properly
230 * cleaned up.
231 */
617ba13b 232 ext4_orphan_del(NULL, inode);
46e294ef
JK
233 if (freeze_protected)
234 sb_end_intwrite(inode->i_sb);
ac27a0ec
DK
235 goto no_delete;
236 }
30a7eb97 237
ac27a0ec 238 if (IS_SYNC(inode))
0390131b 239 ext4_handle_sync(handle);
407cd7fb
TE
240
241 /*
242 * Set inode->i_size to 0 before calling ext4_truncate(). We need
243 * special handling of symlinks here because i_size is used to
244 * determine whether ext4_inode_info->i_data contains symlink data or
245 * block mappings. Setting i_size to 0 will remove its fast symlink
246 * status. Erase i_data so that it becomes a valid empty block map.
247 */
248 if (ext4_inode_is_fast_symlink(inode))
249 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
ac27a0ec 250 inode->i_size = 0;
bc965ab3
TT
251 err = ext4_mark_inode_dirty(handle, inode);
252 if (err) {
12062ddd 253 ext4_warning(inode->i_sb,
bc965ab3
TT
254 "couldn't mark inode dirty (err %d)", err);
255 goto stop_handle;
256 }
2c98eb5e
TT
257 if (inode->i_blocks) {
258 err = ext4_truncate(inode);
259 if (err) {
54d3adbc
TT
260 ext4_error_err(inode->i_sb, -err,
261 "couldn't truncate inode %lu (err %d)",
262 inode->i_ino, err);
2c98eb5e
TT
263 goto stop_handle;
264 }
265 }
bc965ab3 266
30a7eb97
TE
267 /* Remove xattr references. */
268 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
269 extra_credits);
270 if (err) {
271 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
272stop_handle:
273 ext4_journal_stop(handle);
274 ext4_orphan_del(NULL, inode);
46e294ef
JK
275 if (freeze_protected)
276 sb_end_intwrite(inode->i_sb);
30a7eb97
TE
277 ext4_xattr_inode_array_free(ea_inode_array);
278 goto no_delete;
bc965ab3
TT
279 }
280
ac27a0ec 281 /*
617ba13b 282 * Kill off the orphan record which ext4_truncate created.
ac27a0ec 283 * AKPM: I think this can be inside the above `if'.
617ba13b 284 * Note that ext4_orphan_del() has to be able to cope with the
ac27a0ec 285 * deletion of a non-existent orphan - this is because we don't
617ba13b 286 * know if ext4_truncate() actually created an orphan record.
ac27a0ec
DK
287 * (Well, we could do this if we need to, but heck - it works)
288 */
617ba13b 289 ext4_orphan_del(handle, inode);
5ffff834 290 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds();
ac27a0ec
DK
291
292 /*
293 * One subtle ordering requirement: if anything has gone wrong
294 * (transaction abort, IO errors, whatever), then we can still
295 * do these next steps (the fs will already have been marked as
296 * having errors), but we can't free the inode if the mark_dirty
297 * fails.
298 */
617ba13b 299 if (ext4_mark_inode_dirty(handle, inode))
ac27a0ec 300 /* If that failed, just do the required in-core inode clear. */
0930fcc1 301 ext4_clear_inode(inode);
ac27a0ec 302 else
617ba13b
MC
303 ext4_free_inode(handle, inode);
304 ext4_journal_stop(handle);
46e294ef
JK
305 if (freeze_protected)
306 sb_end_intwrite(inode->i_sb);
0421a189 307 ext4_xattr_inode_array_free(ea_inode_array);
ac27a0ec
DK
308 return;
309no_delete:
318cdc82
ZY
310 /*
311 * Check out some where else accidentally dirty the evicting inode,
312 * which may probably cause inode use-after-free issues later.
313 */
314 WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
315
b21ebf14 316 if (!list_empty(&EXT4_I(inode)->i_fc_list))
e85c81ba 317 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
0930fcc1 318 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
ac27a0ec
DK
319}
320
a9e7f447
DM
321#ifdef CONFIG_QUOTA
322qsize_t *ext4_get_reserved_space(struct inode *inode)
60e58e0f 323{
a9e7f447 324 return &EXT4_I(inode)->i_reserved_quota;
60e58e0f 325}
a9e7f447 326#endif
9d0be502 327
0637c6f4
TT
328/*
329 * Called with i_data_sem down, which is important since we can call
330 * ext4_discard_preallocations() from here.
331 */
5f634d06
AK
332void ext4_da_update_reserve_space(struct inode *inode,
333 int used, int quota_claim)
12219aea
AK
334{
335 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
0637c6f4 336 struct ext4_inode_info *ei = EXT4_I(inode);
0637c6f4
TT
337
338 spin_lock(&ei->i_block_reservation_lock);
d8990240 339 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
0637c6f4 340 if (unlikely(used > ei->i_reserved_data_blocks)) {
8de5c325 341 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
1084f252 342 "with only %d reserved data blocks",
0637c6f4
TT
343 __func__, inode->i_ino, used,
344 ei->i_reserved_data_blocks);
345 WARN_ON(1);
346 used = ei->i_reserved_data_blocks;
347 }
12219aea 348
0637c6f4
TT
349 /* Update per-inode reservations */
350 ei->i_reserved_data_blocks -= used;
71d4f7d0 351 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
6bc6e63f 352
f9505c72 353 spin_unlock(&ei->i_block_reservation_lock);
60e58e0f 354
72b8ab9d
ES
355 /* Update quota subsystem for data blocks */
356 if (quota_claim)
7b415bf6 357 dquot_claim_block(inode, EXT4_C2B(sbi, used));
72b8ab9d 358 else {
5f634d06
AK
359 /*
360 * We did fallocate with an offset that is already delayed
361 * allocated. So on delayed allocated writeback we should
72b8ab9d 362 * not re-claim the quota for fallocated blocks.
5f634d06 363 */
7b415bf6 364 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
5f634d06 365 }
d6014301
AK
366
367 /*
368 * If we have done all the pending block allocations and if
369 * there aren't any writers on the inode, we can discard the
370 * inode's preallocations.
371 */
0637c6f4 372 if ((ei->i_reserved_data_blocks == 0) &&
82dd124c 373 !inode_is_open_for_write(inode))
27bc446e 374 ext4_discard_preallocations(inode, 0);
12219aea
AK
375}
376
e29136f8 377static int __check_block_validity(struct inode *inode, const char *func,
c398eda0
TT
378 unsigned int line,
379 struct ext4_map_blocks *map)
6fd058f7 380{
345c0dbf
TT
381 if (ext4_has_feature_journal(inode->i_sb) &&
382 (inode->i_ino ==
383 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
384 return 0;
ce9f24cc 385 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
c398eda0 386 ext4_error_inode(inode, func, line, map->m_pblk,
bdbd6ce0 387 "lblock %lu mapped to illegal pblock %llu "
c398eda0 388 "(length %d)", (unsigned long) map->m_lblk,
bdbd6ce0 389 map->m_pblk, map->m_len);
6a797d27 390 return -EFSCORRUPTED;
6fd058f7
TT
391 }
392 return 0;
393}
394
53085fac
JK
395int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
396 ext4_lblk_t len)
397{
398 int ret;
399
33b4cc25 400 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
a7550b30 401 return fscrypt_zeroout_range(inode, lblk, pblk, len);
53085fac
JK
402
403 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
404 if (ret > 0)
405 ret = 0;
406
407 return ret;
408}
409
e29136f8 410#define check_block_validity(inode, map) \
c398eda0 411 __check_block_validity((inode), __func__, __LINE__, (map))
e29136f8 412
921f266b
DM
413#ifdef ES_AGGRESSIVE_TEST
414static void ext4_map_blocks_es_recheck(handle_t *handle,
415 struct inode *inode,
416 struct ext4_map_blocks *es_map,
417 struct ext4_map_blocks *map,
418 int flags)
419{
420 int retval;
421
422 map->m_flags = 0;
423 /*
424 * There is a race window that the result is not the same.
425 * e.g. xfstests #223 when dioread_nolock enables. The reason
426 * is that we lookup a block mapping in extent status tree with
427 * out taking i_data_sem. So at the time the unwritten extent
428 * could be converted.
429 */
2dcba478 430 down_read(&EXT4_I(inode)->i_data_sem);
921f266b 431 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
9e52484c 432 retval = ext4_ext_map_blocks(handle, inode, map, 0);
921f266b 433 } else {
9e52484c 434 retval = ext4_ind_map_blocks(handle, inode, map, 0);
921f266b 435 }
2dcba478 436 up_read((&EXT4_I(inode)->i_data_sem));
921f266b
DM
437
438 /*
439 * We don't check m_len because extent will be collpased in status
440 * tree. So the m_len might not equal.
441 */
442 if (es_map->m_lblk != map->m_lblk ||
443 es_map->m_flags != map->m_flags ||
444 es_map->m_pblk != map->m_pblk) {
bdafe42a 445 printk("ES cache assertion failed for inode: %lu "
921f266b
DM
446 "es_cached ex [%d/%d/%llu/%x] != "
447 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
448 inode->i_ino, es_map->m_lblk, es_map->m_len,
449 es_map->m_pblk, es_map->m_flags, map->m_lblk,
450 map->m_len, map->m_pblk, map->m_flags,
451 retval, flags);
452 }
453}
454#endif /* ES_AGGRESSIVE_TEST */
455
f5ab0d1f 456/*
e35fd660 457 * The ext4_map_blocks() function tries to look up the requested blocks,
2b2d6d01 458 * and returns if the blocks are already mapped.
f5ab0d1f 459 *
f5ab0d1f
MC
460 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
461 * and store the allocated blocks in the result buffer head and mark it
462 * mapped.
463 *
e35fd660
TT
464 * If file type is extents based, it will call ext4_ext_map_blocks(),
465 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
f5ab0d1f
MC
466 * based files
467 *
facab4d9
JK
468 * On success, it returns the number of blocks being mapped or allocated. if
469 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
470 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
f5ab0d1f
MC
471 *
472 * It returns 0 if plain look up failed (blocks have not been allocated), in
facab4d9
JK
473 * that case, @map is returned as unmapped but we still do fill map->m_len to
474 * indicate the length of a hole starting at map->m_lblk.
f5ab0d1f
MC
475 *
476 * It returns the error in case of allocation failure.
477 */
e35fd660
TT
478int ext4_map_blocks(handle_t *handle, struct inode *inode,
479 struct ext4_map_blocks *map, int flags)
0e855ac8 480{
d100eef2 481 struct extent_status es;
0e855ac8 482 int retval;
b8a86845 483 int ret = 0;
921f266b
DM
484#ifdef ES_AGGRESSIVE_TEST
485 struct ext4_map_blocks orig_map;
486
487 memcpy(&orig_map, map, sizeof(*map));
488#endif
f5ab0d1f 489
e35fd660 490 map->m_flags = 0;
70aa1554
RH
491 ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
492 flags, map->m_len, (unsigned long) map->m_lblk);
d100eef2 493
e861b5e9
TT
494 /*
495 * ext4_map_blocks returns an int, and m_len is an unsigned int
496 */
497 if (unlikely(map->m_len > INT_MAX))
498 map->m_len = INT_MAX;
499
4adb6ab3
KM
500 /* We can handle the block number less than EXT_MAX_BLOCKS */
501 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
6a797d27 502 return -EFSCORRUPTED;
4adb6ab3 503
d100eef2 504 /* Lookup extent status tree firstly */
8016e29f
HS
505 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
506 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
d100eef2
ZL
507 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
508 map->m_pblk = ext4_es_pblock(&es) +
509 map->m_lblk - es.es_lblk;
510 map->m_flags |= ext4_es_is_written(&es) ?
511 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
512 retval = es.es_len - (map->m_lblk - es.es_lblk);
513 if (retval > map->m_len)
514 retval = map->m_len;
515 map->m_len = retval;
516 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
facab4d9
JK
517 map->m_pblk = 0;
518 retval = es.es_len - (map->m_lblk - es.es_lblk);
519 if (retval > map->m_len)
520 retval = map->m_len;
521 map->m_len = retval;
d100eef2
ZL
522 retval = 0;
523 } else {
1e83bc81 524 BUG();
d100eef2 525 }
9558cf14
ZY
526
527 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
528 return retval;
921f266b
DM
529#ifdef ES_AGGRESSIVE_TEST
530 ext4_map_blocks_es_recheck(handle, inode, map,
531 &orig_map, flags);
532#endif
d100eef2
ZL
533 goto found;
534 }
9558cf14
ZY
535 /*
536 * In the query cache no-wait mode, nothing we can do more if we
537 * cannot find extent in the cache.
538 */
539 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
540 return 0;
d100eef2 541
4df3d265 542 /*
b920c755
TT
543 * Try to see if we can get the block without requesting a new
544 * file system block.
4df3d265 545 */
2dcba478 546 down_read(&EXT4_I(inode)->i_data_sem);
12e9b892 547 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
9e52484c 548 retval = ext4_ext_map_blocks(handle, inode, map, 0);
0e855ac8 549 } else {
9e52484c 550 retval = ext4_ind_map_blocks(handle, inode, map, 0);
0e855ac8 551 }
f7fec032 552 if (retval > 0) {
3be78c73 553 unsigned int status;
f7fec032 554
44fb851d
ZL
555 if (unlikely(retval != map->m_len)) {
556 ext4_warning(inode->i_sb,
557 "ES len assertion failed for inode "
558 "%lu: retval %d != map->m_len %d",
559 inode->i_ino, retval, map->m_len);
560 WARN_ON(1);
921f266b 561 }
921f266b 562
f7fec032
ZL
563 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
564 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
565 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
d2dc317d 566 !(status & EXTENT_STATUS_WRITTEN) &&
ad431025
EW
567 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
568 map->m_lblk + map->m_len - 1))
f7fec032
ZL
569 status |= EXTENT_STATUS_DELAYED;
570 ret = ext4_es_insert_extent(inode, map->m_lblk,
571 map->m_len, map->m_pblk, status);
572 if (ret < 0)
573 retval = ret;
574 }
2dcba478 575 up_read((&EXT4_I(inode)->i_data_sem));
f5ab0d1f 576
d100eef2 577found:
e35fd660 578 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
b8a86845 579 ret = check_block_validity(inode, map);
6fd058f7
TT
580 if (ret != 0)
581 return ret;
582 }
583
f5ab0d1f 584 /* If it is only a block(s) look up */
c2177057 585 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
f5ab0d1f
MC
586 return retval;
587
588 /*
589 * Returns if the blocks have already allocated
590 *
591 * Note that if blocks have been preallocated
df3ab170 592 * ext4_ext_get_block() returns the create = 0
f5ab0d1f
MC
593 * with buffer head unmapped.
594 */
e35fd660 595 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
b8a86845
LC
596 /*
597 * If we need to convert extent to unwritten
598 * we continue and do the actual work in
599 * ext4_ext_map_blocks()
600 */
601 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
602 return retval;
4df3d265 603
2a8964d6 604 /*
a25a4e1a
ZL
605 * Here we clear m_flags because after allocating an new extent,
606 * it will be set again.
2a8964d6 607 */
a25a4e1a 608 map->m_flags &= ~EXT4_MAP_FLAGS;
2a8964d6 609
4df3d265 610 /*
556615dc 611 * New blocks allocate and/or writing to unwritten extent
f5ab0d1f 612 * will possibly result in updating i_data, so we take
d91bd2c1 613 * the write lock of i_data_sem, and call get_block()
f5ab0d1f 614 * with create == 1 flag.
4df3d265 615 */
c8b459f4 616 down_write(&EXT4_I(inode)->i_data_sem);
d2a17637 617
4df3d265
AK
618 /*
619 * We need to check for EXT4 here because migrate
620 * could have changed the inode type in between
621 */
12e9b892 622 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
e35fd660 623 retval = ext4_ext_map_blocks(handle, inode, map, flags);
0e855ac8 624 } else {
e35fd660 625 retval = ext4_ind_map_blocks(handle, inode, map, flags);
267e4db9 626
e35fd660 627 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
267e4db9
AK
628 /*
629 * We allocated new blocks which will result in
630 * i_data's format changing. Force the migrate
631 * to fail by clearing migrate flags
632 */
19f5fb7a 633 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
267e4db9 634 }
d2a17637 635
5f634d06
AK
636 /*
637 * Update reserved blocks/metadata blocks after successful
638 * block allocation which had been deferred till now. We don't
639 * support fallocate for non extent files. So we can update
640 * reserve space here.
641 */
642 if ((retval > 0) &&
1296cc85 643 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
5f634d06
AK
644 ext4_da_update_reserve_space(inode, retval, 1);
645 }
2ac3b6e0 646
f7fec032 647 if (retval > 0) {
3be78c73 648 unsigned int status;
f7fec032 649
44fb851d
ZL
650 if (unlikely(retval != map->m_len)) {
651 ext4_warning(inode->i_sb,
652 "ES len assertion failed for inode "
653 "%lu: retval %d != map->m_len %d",
654 inode->i_ino, retval, map->m_len);
655 WARN_ON(1);
921f266b 656 }
921f266b 657
c86d8db3
JK
658 /*
659 * We have to zeroout blocks before inserting them into extent
660 * status tree. Otherwise someone could look them up there and
9b623df6
JK
661 * use them before they are really zeroed. We also have to
662 * unmap metadata before zeroing as otherwise writeback can
663 * overwrite zeros with stale data from block device.
c86d8db3
JK
664 */
665 if (flags & EXT4_GET_BLOCKS_ZERO &&
666 map->m_flags & EXT4_MAP_MAPPED &&
667 map->m_flags & EXT4_MAP_NEW) {
668 ret = ext4_issue_zeroout(inode, map->m_lblk,
669 map->m_pblk, map->m_len);
670 if (ret) {
671 retval = ret;
672 goto out_sem;
673 }
674 }
675
adb23551
ZL
676 /*
677 * If the extent has been zeroed out, we don't need to update
678 * extent status tree.
679 */
680 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
bb5835ed 681 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
adb23551 682 if (ext4_es_is_written(&es))
c86d8db3 683 goto out_sem;
adb23551 684 }
f7fec032
ZL
685 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
686 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
687 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
d2dc317d 688 !(status & EXTENT_STATUS_WRITTEN) &&
ad431025
EW
689 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
690 map->m_lblk + map->m_len - 1))
f7fec032
ZL
691 status |= EXTENT_STATUS_DELAYED;
692 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
693 map->m_pblk, status);
c86d8db3 694 if (ret < 0) {
f7fec032 695 retval = ret;
c86d8db3
JK
696 goto out_sem;
697 }
5356f261
AK
698 }
699
c86d8db3 700out_sem:
4df3d265 701 up_write((&EXT4_I(inode)->i_data_sem));
e35fd660 702 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
b8a86845 703 ret = check_block_validity(inode, map);
6fd058f7
TT
704 if (ret != 0)
705 return ret;
06bd3c36
JK
706
707 /*
708 * Inodes with freshly allocated blocks where contents will be
709 * visible after transaction commit must be on transaction's
710 * ordered data list.
711 */
712 if (map->m_flags & EXT4_MAP_NEW &&
713 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
714 !(flags & EXT4_GET_BLOCKS_ZERO) &&
02749a4c 715 !ext4_is_quota_file(inode) &&
06bd3c36 716 ext4_should_order_data(inode)) {
73131fbb
RZ
717 loff_t start_byte =
718 (loff_t)map->m_lblk << inode->i_blkbits;
719 loff_t length = (loff_t)map->m_len << inode->i_blkbits;
720
ee0876bc 721 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
73131fbb
RZ
722 ret = ext4_jbd2_inode_add_wait(handle, inode,
723 start_byte, length);
ee0876bc 724 else
73131fbb
RZ
725 ret = ext4_jbd2_inode_add_write(handle, inode,
726 start_byte, length);
06bd3c36
JK
727 if (ret)
728 return ret;
729 }
6fd058f7 730 }
5e4d0eba
XY
731 if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
732 map->m_flags & EXT4_MAP_MAPPED))
733 ext4_fc_track_range(handle, inode, map->m_lblk,
734 map->m_lblk + map->m_len - 1);
ec8c60be 735 if (retval < 0)
70aa1554 736 ext_debug(inode, "failed with err %d\n", retval);
0e855ac8
AK
737 return retval;
738}
739
ed8ad838
JK
740/*
741 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
742 * we have to be careful as someone else may be manipulating b_state as well.
743 */
744static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
745{
746 unsigned long old_state;
747 unsigned long new_state;
748
749 flags &= EXT4_MAP_FLAGS;
750
751 /* Dummy buffer_head? Set non-atomically. */
752 if (!bh->b_page) {
753 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
754 return;
755 }
756 /*
757 * Someone else may be modifying b_state. Be careful! This is ugly but
758 * once we get rid of using bh as a container for mapping information
759 * to pass to / from get_block functions, this can go away.
760 */
3ee2a3e7 761 old_state = READ_ONCE(bh->b_state);
ed8ad838 762 do {
ed8ad838 763 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
3ee2a3e7 764 } while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state)));
ed8ad838
JK
765}
766
2ed88685
TT
767static int _ext4_get_block(struct inode *inode, sector_t iblock,
768 struct buffer_head *bh, int flags)
ac27a0ec 769{
2ed88685 770 struct ext4_map_blocks map;
efe70c29 771 int ret = 0;
ac27a0ec 772
46c7f254
TM
773 if (ext4_has_inline_data(inode))
774 return -ERANGE;
775
2ed88685
TT
776 map.m_lblk = iblock;
777 map.m_len = bh->b_size >> inode->i_blkbits;
778
efe70c29
JK
779 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
780 flags);
7fb5409d 781 if (ret > 0) {
2ed88685 782 map_bh(bh, inode->i_sb, map.m_pblk);
ed8ad838 783 ext4_update_bh_state(bh, map.m_flags);
2ed88685 784 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
7fb5409d 785 ret = 0;
547edce3
RZ
786 } else if (ret == 0) {
787 /* hole case, need to fill in bh->b_size */
788 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
ac27a0ec
DK
789 }
790 return ret;
791}
792
2ed88685
TT
793int ext4_get_block(struct inode *inode, sector_t iblock,
794 struct buffer_head *bh, int create)
795{
796 return _ext4_get_block(inode, iblock, bh,
797 create ? EXT4_GET_BLOCKS_CREATE : 0);
798}
799
705965bd
JK
800/*
801 * Get block function used when preparing for buffered write if we require
802 * creating an unwritten extent if blocks haven't been allocated. The extent
803 * will be converted to written after the IO is complete.
804 */
805int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
806 struct buffer_head *bh_result, int create)
807{
808 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
809 inode->i_ino, create);
810 return _ext4_get_block(inode, iblock, bh_result,
8d5459c1 811 EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
705965bd
JK
812}
813
efe70c29
JK
814/* Maximum number of blocks we map for direct IO at once. */
815#define DIO_MAX_BLOCKS 4096
816
ac27a0ec
DK
817/*
818 * `handle' can be NULL if create is zero
819 */
617ba13b 820struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
c5e298ae 821 ext4_lblk_t block, int map_flags)
ac27a0ec 822{
2ed88685
TT
823 struct ext4_map_blocks map;
824 struct buffer_head *bh;
c5e298ae 825 int create = map_flags & EXT4_GET_BLOCKS_CREATE;
9558cf14 826 bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT;
10560082 827 int err;
ac27a0ec 828
837c23fb
CX
829 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
830 || handle != NULL || create == 0);
9558cf14 831 ASSERT(create == 0 || !nowait);
ac27a0ec 832
2ed88685
TT
833 map.m_lblk = block;
834 map.m_len = 1;
c5e298ae 835 err = ext4_map_blocks(handle, inode, &map, map_flags);
ac27a0ec 836
10560082
TT
837 if (err == 0)
838 return create ? ERR_PTR(-ENOSPC) : NULL;
2ed88685 839 if (err < 0)
10560082 840 return ERR_PTR(err);
2ed88685 841
9558cf14
ZY
842 if (nowait)
843 return sb_find_get_block(inode->i_sb, map.m_pblk);
844
2ed88685 845 bh = sb_getblk(inode->i_sb, map.m_pblk);
10560082
TT
846 if (unlikely(!bh))
847 return ERR_PTR(-ENOMEM);
2ed88685 848 if (map.m_flags & EXT4_MAP_NEW) {
837c23fb
CX
849 ASSERT(create != 0);
850 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
851 || (handle != NULL));
ac27a0ec 852
2ed88685
TT
853 /*
854 * Now that we do not always journal data, we should
855 * keep in mind whether this should always journal the
856 * new buffer as metadata. For now, regular file
857 * writes use ext4_get_block instead, so it's not a
858 * problem.
859 */
860 lock_buffer(bh);
861 BUFFER_TRACE(bh, "call get_create_access");
188c299e
JK
862 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
863 EXT4_JTR_NONE);
10560082
TT
864 if (unlikely(err)) {
865 unlock_buffer(bh);
866 goto errout;
867 }
868 if (!buffer_uptodate(bh)) {
2ed88685
TT
869 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
870 set_buffer_uptodate(bh);
ac27a0ec 871 }
2ed88685
TT
872 unlock_buffer(bh);
873 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
874 err = ext4_handle_dirty_metadata(handle, inode, bh);
10560082
TT
875 if (unlikely(err))
876 goto errout;
877 } else
2ed88685 878 BUFFER_TRACE(bh, "not a new buffer");
2ed88685 879 return bh;
10560082
TT
880errout:
881 brelse(bh);
882 return ERR_PTR(err);
ac27a0ec
DK
883}
884
617ba13b 885struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
c5e298ae 886 ext4_lblk_t block, int map_flags)
ac27a0ec 887{
af5bc92d 888 struct buffer_head *bh;
2d069c08 889 int ret;
ac27a0ec 890
c5e298ae 891 bh = ext4_getblk(handle, inode, block, map_flags);
1c215028 892 if (IS_ERR(bh))
ac27a0ec 893 return bh;
7963e5ac 894 if (!bh || ext4_buffer_uptodate(bh))
ac27a0ec 895 return bh;
2d069c08 896
897 ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
898 if (ret) {
899 put_bh(bh);
900 return ERR_PTR(ret);
901 }
902 return bh;
ac27a0ec
DK
903}
904
9699d4f9
TE
905/* Read a contiguous batch of blocks. */
906int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
907 bool wait, struct buffer_head **bhs)
908{
909 int i, err;
910
911 for (i = 0; i < bh_count; i++) {
912 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
913 if (IS_ERR(bhs[i])) {
914 err = PTR_ERR(bhs[i]);
915 bh_count = i;
916 goto out_brelse;
917 }
918 }
919
920 for (i = 0; i < bh_count; i++)
921 /* Note that NULL bhs[i] is valid because of holes. */
2d069c08 922 if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
923 ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
9699d4f9
TE
924
925 if (!wait)
926 return 0;
927
928 for (i = 0; i < bh_count; i++)
929 if (bhs[i])
930 wait_on_buffer(bhs[i]);
931
932 for (i = 0; i < bh_count; i++) {
933 if (bhs[i] && !buffer_uptodate(bhs[i])) {
934 err = -EIO;
935 goto out_brelse;
936 }
937 }
938 return 0;
939
940out_brelse:
941 for (i = 0; i < bh_count; i++) {
942 brelse(bhs[i]);
943 bhs[i] = NULL;
944 }
945 return err;
946}
947
188c299e 948int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
f19d5870
TM
949 struct buffer_head *head,
950 unsigned from,
951 unsigned to,
952 int *partial,
188c299e 953 int (*fn)(handle_t *handle, struct inode *inode,
f19d5870 954 struct buffer_head *bh))
ac27a0ec
DK
955{
956 struct buffer_head *bh;
957 unsigned block_start, block_end;
958 unsigned blocksize = head->b_size;
959 int err, ret = 0;
960 struct buffer_head *next;
961
af5bc92d
TT
962 for (bh = head, block_start = 0;
963 ret == 0 && (bh != head || !block_start);
de9a55b8 964 block_start = block_end, bh = next) {
ac27a0ec
DK
965 next = bh->b_this_page;
966 block_end = block_start + blocksize;
967 if (block_end <= from || block_start >= to) {
968 if (partial && !buffer_uptodate(bh))
969 *partial = 1;
970 continue;
971 }
188c299e 972 err = (*fn)(handle, inode, bh);
ac27a0ec
DK
973 if (!ret)
974 ret = err;
975 }
976 return ret;
977}
978
d84c9ebd
JK
979/*
980 * Helper for handling dirtying of journalled data. We also mark the folio as
981 * dirty so that writeback code knows about this page (and inode) contains
982 * dirty data. ext4_writepages() then commits appropriate transaction to
983 * make data stable.
984 */
985static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
986{
987 folio_mark_dirty(bh->b_folio);
988 return ext4_handle_dirty_metadata(handle, NULL, bh);
989}
990
188c299e 991int do_journal_get_write_access(handle_t *handle, struct inode *inode,
f19d5870 992 struct buffer_head *bh)
ac27a0ec 993{
56d35a4c
JK
994 int dirty = buffer_dirty(bh);
995 int ret;
996
ac27a0ec
DK
997 if (!buffer_mapped(bh) || buffer_freed(bh))
998 return 0;
56d35a4c 999 /*
ebdec241 1000 * __block_write_begin() could have dirtied some buffers. Clean
56d35a4c
JK
1001 * the dirty bit as jbd2_journal_get_write_access() could complain
1002 * otherwise about fs integrity issues. Setting of the dirty bit
ebdec241 1003 * by __block_write_begin() isn't a real problem here as we clear
56d35a4c
JK
1004 * the bit before releasing a page lock and thus writeback cannot
1005 * ever write the buffer.
1006 */
1007 if (dirty)
1008 clear_buffer_dirty(bh);
5d601255 1009 BUFFER_TRACE(bh, "get write access");
188c299e
JK
1010 ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
1011 EXT4_JTR_NONE);
56d35a4c 1012 if (!ret && dirty)
d84c9ebd 1013 ret = ext4_dirty_journalled_data(handle, bh);
56d35a4c 1014 return ret;
ac27a0ec
DK
1015}
1016
643fa961 1017#ifdef CONFIG_FS_ENCRYPTION
86b38c27 1018static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
2058f83a
MH
1019 get_block_t *get_block)
1020{
09cbfeaf 1021 unsigned from = pos & (PAGE_SIZE - 1);
2058f83a 1022 unsigned to = from + len;
86b38c27 1023 struct inode *inode = folio->mapping->host;
2058f83a
MH
1024 unsigned block_start, block_end;
1025 sector_t block;
1026 int err = 0;
1027 unsigned blocksize = inode->i_sb->s_blocksize;
1028 unsigned bbits;
0b578f35
CR
1029 struct buffer_head *bh, *head, *wait[2];
1030 int nr_wait = 0;
1031 int i;
2058f83a 1032
86b38c27 1033 BUG_ON(!folio_test_locked(folio));
09cbfeaf
KS
1034 BUG_ON(from > PAGE_SIZE);
1035 BUG_ON(to > PAGE_SIZE);
2058f83a
MH
1036 BUG_ON(from > to);
1037
86b38c27
MW
1038 head = folio_buffers(folio);
1039 if (!head) {
1040 create_empty_buffers(&folio->page, blocksize, 0);
1041 head = folio_buffers(folio);
1042 }
2058f83a 1043 bbits = ilog2(blocksize);
86b38c27 1044 block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2058f83a
MH
1045
1046 for (bh = head, block_start = 0; bh != head || !block_start;
1047 block++, block_start = block_end, bh = bh->b_this_page) {
1048 block_end = block_start + blocksize;
1049 if (block_end <= from || block_start >= to) {
86b38c27 1050 if (folio_test_uptodate(folio)) {
3cd46171 1051 set_buffer_uptodate(bh);
2058f83a
MH
1052 }
1053 continue;
1054 }
1055 if (buffer_new(bh))
1056 clear_buffer_new(bh);
1057 if (!buffer_mapped(bh)) {
1058 WARN_ON(bh->b_size != blocksize);
1059 err = get_block(inode, block, bh, 1);
1060 if (err)
1061 break;
1062 if (buffer_new(bh)) {
86b38c27 1063 if (folio_test_uptodate(folio)) {
2058f83a
MH
1064 clear_buffer_new(bh);
1065 set_buffer_uptodate(bh);
1066 mark_buffer_dirty(bh);
1067 continue;
1068 }
1069 if (block_end > to || block_start < from)
86b38c27
MW
1070 folio_zero_segments(folio, to,
1071 block_end,
1072 block_start, from);
2058f83a
MH
1073 continue;
1074 }
1075 }
86b38c27 1076 if (folio_test_uptodate(folio)) {
3cd46171 1077 set_buffer_uptodate(bh);
2058f83a
MH
1078 continue;
1079 }
1080 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1081 !buffer_unwritten(bh) &&
1082 (block_start < from || block_end > to)) {
2d069c08 1083 ext4_read_bh_lock(bh, 0, false);
0b578f35 1084 wait[nr_wait++] = bh;
2058f83a
MH
1085 }
1086 }
1087 /*
1088 * If we issued read requests, let them complete.
1089 */
0b578f35
CR
1090 for (i = 0; i < nr_wait; i++) {
1091 wait_on_buffer(wait[i]);
1092 if (!buffer_uptodate(wait[i]))
2058f83a
MH
1093 err = -EIO;
1094 }
7e0785fc 1095 if (unlikely(err)) {
86b38c27 1096 page_zero_new_buffers(&folio->page, from, to);
4f74d15f 1097 } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
0b578f35
CR
1098 for (i = 0; i < nr_wait; i++) {
1099 int err2;
1100
86b38c27
MW
1101 err2 = fscrypt_decrypt_pagecache_blocks(folio,
1102 blocksize, bh_offset(wait[i]));
0b578f35
CR
1103 if (err2) {
1104 clear_buffer_uptodate(wait[i]);
1105 err = err2;
1106 }
1107 }
7e0785fc
CR
1108 }
1109
2058f83a
MH
1110 return err;
1111}
1112#endif
1113
9462f770
JK
1114/*
1115 * To preserve ordering, it is essential that the hole instantiation and
1116 * the data write be encapsulated in a single transaction. We cannot
1117 * close off a transaction and start a new one between the ext4_get_block()
1118 * and the ext4_write_end(). So doing the jbd2_journal_start at the start of
1119 * ext4_write_begin() is the right place.
1120 */
bfc1af65 1121static int ext4_write_begin(struct file *file, struct address_space *mapping,
9d6b0cd7 1122 loff_t pos, unsigned len,
de9a55b8 1123 struct page **pagep, void **fsdata)
ac27a0ec 1124{
af5bc92d 1125 struct inode *inode = mapping->host;
1938a150 1126 int ret, needed_blocks;
ac27a0ec
DK
1127 handle_t *handle;
1128 int retries = 0;
4d934a5e 1129 struct folio *folio;
de9a55b8 1130 pgoff_t index;
af5bc92d 1131 unsigned from, to;
bfc1af65 1132
0db1ff22
TT
1133 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
1134 return -EIO;
1135
9d6b0cd7 1136 trace_ext4_write_begin(inode, pos, len);
1938a150
AK
1137 /*
1138 * Reserve one block more for addition to orphan list in case
1139 * we allocate blocks but write fails for some reason
1140 */
1141 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
09cbfeaf
KS
1142 index = pos >> PAGE_SHIFT;
1143 from = pos & (PAGE_SIZE - 1);
af5bc92d 1144 to = from + len;
ac27a0ec 1145
f19d5870
TM
1146 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1147 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
832ee62d 1148 pagep);
f19d5870 1149 if (ret < 0)
47564bfb
TT
1150 return ret;
1151 if (ret == 1)
1152 return 0;
f19d5870
TM
1153 }
1154
47564bfb 1155 /*
4d934a5e
MW
1156 * __filemap_get_folio() can take a long time if the
1157 * system is thrashing due to memory pressure, or if the folio
47564bfb
TT
1158 * is being written back. So grab it first before we start
1159 * the transaction handle. This also allows us to allocate
4d934a5e 1160 * the folio (if needed) without using GFP_NOFS.
47564bfb
TT
1161 */
1162retry_grab:
4d934a5e
MW
1163 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
1164 mapping_gfp_mask(mapping));
7fa8a8ee
LT
1165 if (IS_ERR(folio))
1166 return PTR_ERR(folio);
d1052d23
JH
1167 /*
1168 * The same as page allocation, we prealloc buffer heads before
1169 * starting the handle.
1170 */
4d934a5e
MW
1171 if (!folio_buffers(folio))
1172 create_empty_buffers(&folio->page, inode->i_sb->s_blocksize, 0);
d1052d23 1173
4d934a5e 1174 folio_unlock(folio);
47564bfb
TT
1175
1176retry_journal:
9924a92a 1177 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
af5bc92d 1178 if (IS_ERR(handle)) {
4d934a5e 1179 folio_put(folio);
47564bfb 1180 return PTR_ERR(handle);
7479d2b9 1181 }
ac27a0ec 1182
4d934a5e
MW
1183 folio_lock(folio);
1184 if (folio->mapping != mapping) {
1185 /* The folio got truncated from under us */
1186 folio_unlock(folio);
1187 folio_put(folio);
cf108bca 1188 ext4_journal_stop(handle);
47564bfb 1189 goto retry_grab;
cf108bca 1190 }
4d934a5e
MW
1191 /* In case writeback began while the folio was unlocked */
1192 folio_wait_stable(folio);
cf108bca 1193
643fa961 1194#ifdef CONFIG_FS_ENCRYPTION
2058f83a 1195 if (ext4_should_dioread_nolock(inode))
86b38c27 1196 ret = ext4_block_write_begin(folio, pos, len,
705965bd 1197 ext4_get_block_unwritten);
2058f83a 1198 else
86b38c27 1199 ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
2058f83a 1200#else
744692dc 1201 if (ext4_should_dioread_nolock(inode))
4d934a5e 1202 ret = __block_write_begin(&folio->page, pos, len,
705965bd 1203 ext4_get_block_unwritten);
744692dc 1204 else
4d934a5e 1205 ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
2058f83a 1206#endif
bfc1af65 1207 if (!ret && ext4_should_journal_data(inode)) {
188c299e 1208 ret = ext4_walk_page_buffers(handle, inode,
4d934a5e
MW
1209 folio_buffers(folio), from, to,
1210 NULL, do_journal_get_write_access);
ac27a0ec 1211 }
bfc1af65
NP
1212
1213 if (ret) {
c93d8f88
EB
1214 bool extended = (pos + len > inode->i_size) &&
1215 !ext4_verity_in_progress(inode);
1216
4d934a5e 1217 folio_unlock(folio);
ae4d5372 1218 /*
6e1db88d 1219 * __block_write_begin may have instantiated a few blocks
ae4d5372 1220 * outside i_size. Trim these off again. Don't need
f340b3d9 1221 * i_size_read because we hold i_rwsem.
1938a150
AK
1222 *
1223 * Add inode to orphan list in case we crash before
1224 * truncate finishes
ae4d5372 1225 */
c93d8f88 1226 if (extended && ext4_can_truncate(inode))
1938a150
AK
1227 ext4_orphan_add(handle, inode);
1228
1229 ext4_journal_stop(handle);
c93d8f88 1230 if (extended) {
b9a4207d 1231 ext4_truncate_failed_write(inode);
de9a55b8 1232 /*
ffacfa7a 1233 * If truncate failed early the inode might
1938a150
AK
1234 * still be on the orphan list; we need to
1235 * make sure the inode is removed from the
1236 * orphan list in that case.
1237 */
1238 if (inode->i_nlink)
1239 ext4_orphan_del(NULL, inode);
1240 }
bfc1af65 1241
47564bfb
TT
1242 if (ret == -ENOSPC &&
1243 ext4_should_retry_alloc(inode->i_sb, &retries))
1244 goto retry_journal;
4d934a5e 1245 folio_put(folio);
47564bfb
TT
1246 return ret;
1247 }
4d934a5e 1248 *pagep = &folio->page;
ac27a0ec
DK
1249 return ret;
1250}
1251
bfc1af65 1252/* For write_end() in data=journal mode */
188c299e
JK
1253static int write_end_fn(handle_t *handle, struct inode *inode,
1254 struct buffer_head *bh)
ac27a0ec 1255{
13fca323 1256 int ret;
ac27a0ec
DK
1257 if (!buffer_mapped(bh) || buffer_freed(bh))
1258 return 0;
1259 set_buffer_uptodate(bh);
d84c9ebd 1260 ret = ext4_dirty_journalled_data(handle, bh);
13fca323
TT
1261 clear_buffer_meta(bh);
1262 clear_buffer_prio(bh);
1263 return ret;
ac27a0ec
DK
1264}
1265
eed4333f
ZL
1266/*
1267 * We need to pick up the new inode size which generic_commit_write gave us
1268 * `file' can be NULL - eg, when called from page_symlink().
1269 *
1270 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1271 * buffers are managed internally.
1272 */
1273static int ext4_write_end(struct file *file,
1274 struct address_space *mapping,
1275 loff_t pos, unsigned len, unsigned copied,
1276 struct page *page, void *fsdata)
f8514083 1277{
64fb3136 1278 struct folio *folio = page_folio(page);
f8514083 1279 handle_t *handle = ext4_journal_current_handle();
eed4333f 1280 struct inode *inode = mapping->host;
0572639f 1281 loff_t old_size = inode->i_size;
eed4333f
ZL
1282 int ret = 0, ret2;
1283 int i_size_changed = 0;
c93d8f88 1284 bool verity = ext4_verity_in_progress(inode);
eed4333f
ZL
1285
1286 trace_ext4_write_end(inode, pos, len, copied);
6984aef5 1287
5c099c4f
YB
1288 if (ext4_has_inline_data(inode) &&
1289 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
6984aef5
ZY
1290 return ext4_write_inline_data_end(inode, pos, len, copied, page);
1291
1292 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
f8514083 1293 /*
64fb3136 1294 * it's important to update i_size while still holding folio lock:
f8514083 1295 * page writeout could otherwise come in and zero beyond i_size.
c93d8f88
EB
1296 *
1297 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1298 * blocks are being written past EOF, so skip the i_size update.
f8514083 1299 */
c93d8f88
EB
1300 if (!verity)
1301 i_size_changed = ext4_update_inode_size(inode, pos + copied);
64fb3136
MW
1302 folio_unlock(folio);
1303 folio_put(folio);
f8514083 1304
c93d8f88 1305 if (old_size < pos && !verity)
0572639f 1306 pagecache_isize_extended(inode, old_size, pos);
f8514083 1307 /*
64fb3136
MW
1308 * Don't mark the inode dirty under folio lock. First, it unnecessarily
1309 * makes the holding time of folio lock longer. Second, it forces lock
1310 * ordering of folio lock and transaction start for journaling
f8514083
AK
1311 * filesystems.
1312 */
6984aef5 1313 if (i_size_changed)
4209ae12 1314 ret = ext4_mark_inode_dirty(handle, inode);
f8514083 1315
c93d8f88 1316 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
f8514083
AK
1317 /* if we have allocated more blocks and copied
1318 * less. We will have blocks allocated outside
1319 * inode->i_size. So truncate them
1320 */
1321 ext4_orphan_add(handle, inode);
55ce2f64 1322
617ba13b 1323 ret2 = ext4_journal_stop(handle);
ac27a0ec
DK
1324 if (!ret)
1325 ret = ret2;
bfc1af65 1326
c93d8f88 1327 if (pos + len > inode->i_size && !verity) {
b9a4207d 1328 ext4_truncate_failed_write(inode);
de9a55b8 1329 /*
ffacfa7a 1330 * If truncate failed early the inode might still be
f8514083
AK
1331 * on the orphan list; we need to make sure the inode
1332 * is removed from the orphan list in that case.
1333 */
1334 if (inode->i_nlink)
1335 ext4_orphan_del(NULL, inode);
1336 }
1337
bfc1af65 1338 return ret ? ret : copied;
ac27a0ec
DK
1339}
1340
b90197b6
TT
1341/*
1342 * This is a private version of page_zero_new_buffers() which doesn't
1343 * set the buffer to be dirty, since in data=journalled mode we need
d84c9ebd 1344 * to call ext4_dirty_journalled_data() instead.
b90197b6 1345 */
3b136499 1346static void ext4_journalled_zero_new_buffers(handle_t *handle,
188c299e 1347 struct inode *inode,
86324a21 1348 struct folio *folio,
3b136499 1349 unsigned from, unsigned to)
b90197b6
TT
1350{
1351 unsigned int block_start = 0, block_end;
1352 struct buffer_head *head, *bh;
1353
86324a21 1354 bh = head = folio_buffers(folio);
b90197b6
TT
1355 do {
1356 block_end = block_start + bh->b_size;
1357 if (buffer_new(bh)) {
1358 if (block_end > from && block_start < to) {
86324a21 1359 if (!folio_test_uptodate(folio)) {
b90197b6
TT
1360 unsigned start, size;
1361
1362 start = max(from, block_start);
1363 size = min(to, block_end) - start;
1364
86324a21 1365 folio_zero_range(folio, start, size);
188c299e 1366 write_end_fn(handle, inode, bh);
b90197b6
TT
1367 }
1368 clear_buffer_new(bh);
1369 }
1370 }
1371 block_start = block_end;
1372 bh = bh->b_this_page;
1373 } while (bh != head);
1374}
1375
bfc1af65 1376static int ext4_journalled_write_end(struct file *file,
de9a55b8
TT
1377 struct address_space *mapping,
1378 loff_t pos, unsigned len, unsigned copied,
1379 struct page *page, void *fsdata)
ac27a0ec 1380{
feb22b77 1381 struct folio *folio = page_folio(page);
617ba13b 1382 handle_t *handle = ext4_journal_current_handle();
bfc1af65 1383 struct inode *inode = mapping->host;
0572639f 1384 loff_t old_size = inode->i_size;
ac27a0ec
DK
1385 int ret = 0, ret2;
1386 int partial = 0;
bfc1af65 1387 unsigned from, to;
4631dbf6 1388 int size_changed = 0;
c93d8f88 1389 bool verity = ext4_verity_in_progress(inode);
ac27a0ec 1390
9bffad1e 1391 trace_ext4_journalled_write_end(inode, pos, len, copied);
09cbfeaf 1392 from = pos & (PAGE_SIZE - 1);
bfc1af65
NP
1393 to = from + len;
1394
441c8508
CW
1395 BUG_ON(!ext4_handle_valid(handle));
1396
6984aef5
ZY
1397 if (ext4_has_inline_data(inode))
1398 return ext4_write_inline_data_end(inode, pos, len, copied, page);
1399
feb22b77 1400 if (unlikely(copied < len) && !folio_test_uptodate(folio)) {
3b136499 1401 copied = 0;
86324a21
MW
1402 ext4_journalled_zero_new_buffers(handle, inode, folio,
1403 from, to);
3b136499
JK
1404 } else {
1405 if (unlikely(copied < len))
86324a21 1406 ext4_journalled_zero_new_buffers(handle, inode, folio,
3b136499 1407 from + copied, to);
feb22b77
MW
1408 ret = ext4_walk_page_buffers(handle, inode,
1409 folio_buffers(folio),
188c299e 1410 from, from + copied, &partial,
3b136499 1411 write_end_fn);
3fdcfb66 1412 if (!partial)
feb22b77 1413 folio_mark_uptodate(folio);
3fdcfb66 1414 }
c93d8f88
EB
1415 if (!verity)
1416 size_changed = ext4_update_inode_size(inode, pos + copied);
2d859db3 1417 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
feb22b77
MW
1418 folio_unlock(folio);
1419 folio_put(folio);
4631dbf6 1420
c93d8f88 1421 if (old_size < pos && !verity)
0572639f
XW
1422 pagecache_isize_extended(inode, old_size, pos);
1423
6984aef5 1424 if (size_changed) {
617ba13b 1425 ret2 = ext4_mark_inode_dirty(handle, inode);
ac27a0ec
DK
1426 if (!ret)
1427 ret = ret2;
1428 }
bfc1af65 1429
c93d8f88 1430 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
f8514083
AK
1431 /* if we have allocated more blocks and copied
1432 * less. We will have blocks allocated outside
1433 * inode->i_size. So truncate them
1434 */
1435 ext4_orphan_add(handle, inode);
1436
617ba13b 1437 ret2 = ext4_journal_stop(handle);
ac27a0ec
DK
1438 if (!ret)
1439 ret = ret2;
c93d8f88 1440 if (pos + len > inode->i_size && !verity) {
b9a4207d 1441 ext4_truncate_failed_write(inode);
de9a55b8 1442 /*
ffacfa7a 1443 * If truncate failed early the inode might still be
f8514083
AK
1444 * on the orphan list; we need to make sure the inode
1445 * is removed from the orphan list in that case.
1446 */
1447 if (inode->i_nlink)
1448 ext4_orphan_del(NULL, inode);
1449 }
bfc1af65
NP
1450
1451 return ret ? ret : copied;
ac27a0ec 1452}
d2a17637 1453
9d0be502 1454/*
c27e43a1 1455 * Reserve space for a single cluster
9d0be502 1456 */
c27e43a1 1457static int ext4_da_reserve_space(struct inode *inode)
d2a17637 1458{
60e58e0f 1459 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
0637c6f4 1460 struct ext4_inode_info *ei = EXT4_I(inode);
5dd4056d 1461 int ret;
03179fe9
TT
1462
1463 /*
1464 * We will charge metadata quota at writeout time; this saves
1465 * us from metadata over-estimation, though we may go over by
1466 * a small amount in the end. Here we just reserve for data.
1467 */
1468 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1469 if (ret)
1470 return ret;
d2a17637 1471
0637c6f4 1472 spin_lock(&ei->i_block_reservation_lock);
71d4f7d0 1473 if (ext4_claim_free_clusters(sbi, 1, 0)) {
03179fe9 1474 spin_unlock(&ei->i_block_reservation_lock);
03179fe9 1475 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
d2a17637
MC
1476 return -ENOSPC;
1477 }
9d0be502 1478 ei->i_reserved_data_blocks++;
c27e43a1 1479 trace_ext4_da_reserve_space(inode);
0637c6f4 1480 spin_unlock(&ei->i_block_reservation_lock);
39bc680a 1481
d2a17637
MC
1482 return 0; /* success */
1483}
1484
f456767d 1485void ext4_da_release_space(struct inode *inode, int to_free)
d2a17637
MC
1486{
1487 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
0637c6f4 1488 struct ext4_inode_info *ei = EXT4_I(inode);
d2a17637 1489
cd213226
MC
1490 if (!to_free)
1491 return; /* Nothing to release, exit */
1492
d2a17637 1493 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
cd213226 1494
5a58ec87 1495 trace_ext4_da_release_space(inode, to_free);
0637c6f4 1496 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
cd213226 1497 /*
0637c6f4
TT
1498 * if there aren't enough reserved blocks, then the
1499 * counter is messed up somewhere. Since this
1500 * function is called from invalidate page, it's
1501 * harmless to return without any action.
cd213226 1502 */
8de5c325 1503 ext4_warning(inode->i_sb, "ext4_da_release_space: "
0637c6f4 1504 "ino %lu, to_free %d with only %d reserved "
1084f252 1505 "data blocks", inode->i_ino, to_free,
0637c6f4
TT
1506 ei->i_reserved_data_blocks);
1507 WARN_ON(1);
1508 to_free = ei->i_reserved_data_blocks;
cd213226 1509 }
0637c6f4 1510 ei->i_reserved_data_blocks -= to_free;
cd213226 1511
72b8ab9d 1512 /* update fs dirty data blocks counter */
57042651 1513 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
d2a17637 1514
d2a17637 1515 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
60e58e0f 1516
7b415bf6 1517 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
d2a17637
MC
1518}
1519
64769240
AT
1520/*
1521 * Delayed allocation stuff
1522 */
1523
4e7ea81d 1524struct mpage_da_data {
15648d59 1525 /* These are input fields for ext4_do_writepages() */
4e7ea81d
JK
1526 struct inode *inode;
1527 struct writeback_control *wbc;
15648d59 1528 unsigned int can_map:1; /* Can writepages call map blocks? */
6b523df4 1529
15648d59 1530 /* These are internal state of ext4_do_writepages() */
4e7ea81d
JK
1531 pgoff_t first_page; /* The first page to write */
1532 pgoff_t next_page; /* Current page to examine */
1533 pgoff_t last_page; /* Last page to examine */
791b7f08 1534 /*
4e7ea81d
JK
1535 * Extent to map - this can be after first_page because that can be
1536 * fully mapped. We somewhat abuse m_flags to store whether the extent
1537 * is delalloc or unwritten.
791b7f08 1538 */
4e7ea81d
JK
1539 struct ext4_map_blocks map;
1540 struct ext4_io_submit io_submit; /* IO submission data */
dddbd6ac 1541 unsigned int do_map:1;
6b8ed620 1542 unsigned int scanned_until_end:1;
1f1a55f0 1543 unsigned int journalled_more_data:1;
4e7ea81d 1544};
64769240 1545
4e7ea81d
JK
1546static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1547 bool invalidate)
c4a0c46e 1548{
fb5a5be0 1549 unsigned nr, i;
c4a0c46e 1550 pgoff_t index, end;
fb5a5be0 1551 struct folio_batch fbatch;
c4a0c46e
AK
1552 struct inode *inode = mpd->inode;
1553 struct address_space *mapping = inode->i_mapping;
4e7ea81d
JK
1554
1555 /* This is necessary when next_page == 0. */
1556 if (mpd->first_page >= mpd->next_page)
1557 return;
c4a0c46e 1558
6b8ed620 1559 mpd->scanned_until_end = 0;
c7f5938a
CW
1560 index = mpd->first_page;
1561 end = mpd->next_page - 1;
4e7ea81d
JK
1562 if (invalidate) {
1563 ext4_lblk_t start, last;
09cbfeaf
KS
1564 start = index << (PAGE_SHIFT - inode->i_blkbits);
1565 last = end << (PAGE_SHIFT - inode->i_blkbits);
7f0d8e1d
EW
1566
1567 /*
1568 * avoid racing with extent status tree scans made by
1569 * ext4_insert_delayed_block()
1570 */
1571 down_write(&EXT4_I(inode)->i_data_sem);
4e7ea81d 1572 ext4_es_remove_extent(inode, start, last - start + 1);
7f0d8e1d 1573 up_write(&EXT4_I(inode)->i_data_sem);
4e7ea81d 1574 }
51865fda 1575
fb5a5be0 1576 folio_batch_init(&fbatch);
c4a0c46e 1577 while (index <= end) {
fb5a5be0
MWO
1578 nr = filemap_get_folios(mapping, &index, end, &fbatch);
1579 if (nr == 0)
c4a0c46e 1580 break;
fb5a5be0
MWO
1581 for (i = 0; i < nr; i++) {
1582 struct folio *folio = fbatch.folios[i];
2b85a617 1583
fb5a5be0
MWO
1584 if (folio->index < mpd->first_page)
1585 continue;
1586 if (folio->index + folio_nr_pages(folio) - 1 > end)
1587 continue;
7ba13abb
MWO
1588 BUG_ON(!folio_test_locked(folio));
1589 BUG_ON(folio_test_writeback(folio));
4e7ea81d 1590 if (invalidate) {
7ba13abb
MWO
1591 if (folio_mapped(folio))
1592 folio_clear_dirty_for_io(folio);
1593 block_invalidate_folio(folio, 0,
1594 folio_size(folio));
1595 folio_clear_uptodate(folio);
4e7ea81d 1596 }
7ba13abb 1597 folio_unlock(folio);
c4a0c46e 1598 }
fb5a5be0 1599 folio_batch_release(&fbatch);
c4a0c46e 1600 }
c4a0c46e
AK
1601}
1602
df22291f
AK
1603static void ext4_print_free_blocks(struct inode *inode)
1604{
1605 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
92b97816 1606 struct super_block *sb = inode->i_sb;
f78ee70d 1607 struct ext4_inode_info *ei = EXT4_I(inode);
92b97816
TT
1608
1609 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
5dee5437 1610 EXT4_C2B(EXT4_SB(inode->i_sb),
f78ee70d 1611 ext4_count_free_clusters(sb)));
92b97816
TT
1612 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1613 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
f78ee70d 1614 (long long) EXT4_C2B(EXT4_SB(sb),
57042651 1615 percpu_counter_sum(&sbi->s_freeclusters_counter)));
92b97816 1616 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
f78ee70d 1617 (long long) EXT4_C2B(EXT4_SB(sb),
7b415bf6 1618 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
92b97816
TT
1619 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1620 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
f78ee70d 1621 ei->i_reserved_data_blocks);
df22291f
AK
1622 return;
1623}
1624
0b02f4c0
EW
1625/*
1626 * ext4_insert_delayed_block - adds a delayed block to the extents status
1627 * tree, incrementing the reserved cluster/block
1628 * count or making a pending reservation
1629 * where needed
1630 *
1631 * @inode - file containing the newly added block
1632 * @lblk - logical block to be added
1633 *
1634 * Returns 0 on success, negative error code on failure.
1635 */
1636static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1637{
1638 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1639 int ret;
1640 bool allocated = false;
6fed8395 1641 bool reserved = false;
0b02f4c0
EW
1642
1643 /*
1644 * If the cluster containing lblk is shared with a delayed,
1645 * written, or unwritten extent in a bigalloc file system, it's
1646 * already been accounted for and does not need to be reserved.
1647 * A pending reservation must be made for the cluster if it's
1648 * shared with a written or unwritten extent and doesn't already
1649 * have one. Written and unwritten extents can be purged from the
1650 * extents status tree if the system is under memory pressure, so
1651 * it's necessary to examine the extent tree if a search of the
1652 * extents status tree doesn't get a match.
1653 */
1654 if (sbi->s_cluster_ratio == 1) {
1655 ret = ext4_da_reserve_space(inode);
1656 if (ret != 0) /* ENOSPC */
1657 goto errout;
6fed8395 1658 reserved = true;
0b02f4c0
EW
1659 } else { /* bigalloc */
1660 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1661 if (!ext4_es_scan_clu(inode,
1662 &ext4_es_is_mapped, lblk)) {
1663 ret = ext4_clu_mapped(inode,
1664 EXT4_B2C(sbi, lblk));
1665 if (ret < 0)
1666 goto errout;
1667 if (ret == 0) {
1668 ret = ext4_da_reserve_space(inode);
1669 if (ret != 0) /* ENOSPC */
1670 goto errout;
6fed8395 1671 reserved = true;
0b02f4c0
EW
1672 } else {
1673 allocated = true;
1674 }
1675 } else {
1676 allocated = true;
1677 }
1678 }
1679 }
1680
1681 ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
6fed8395
JX
1682 if (ret && reserved)
1683 ext4_da_release_space(inode, 1);
0b02f4c0
EW
1684
1685errout:
1686 return ret;
1687}
1688
5356f261
AK
1689/*
1690 * This function is grabs code from the very beginning of
1691 * ext4_map_blocks, but assumes that the caller is from delayed write
1692 * time. This function looks up the requested blocks and sets the
1693 * buffer delay bit under the protection of i_data_sem.
1694 */
1695static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1696 struct ext4_map_blocks *map,
1697 struct buffer_head *bh)
1698{
d100eef2 1699 struct extent_status es;
5356f261
AK
1700 int retval;
1701 sector_t invalid_block = ~((sector_t) 0xffff);
921f266b
DM
1702#ifdef ES_AGGRESSIVE_TEST
1703 struct ext4_map_blocks orig_map;
1704
1705 memcpy(&orig_map, map, sizeof(*map));
1706#endif
5356f261
AK
1707
1708 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1709 invalid_block = ~0;
1710
1711 map->m_flags = 0;
70aa1554 1712 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
5356f261 1713 (unsigned long) map->m_lblk);
d100eef2
ZL
1714
1715 /* Lookup extent status tree firstly */
bb5835ed 1716 if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
d100eef2
ZL
1717 if (ext4_es_is_hole(&es)) {
1718 retval = 0;
c8b459f4 1719 down_read(&EXT4_I(inode)->i_data_sem);
d100eef2
ZL
1720 goto add_delayed;
1721 }
1722
1723 /*
3eda41df
EW
1724 * Delayed extent could be allocated by fallocate.
1725 * So we need to check it.
d100eef2 1726 */
3eda41df
EW
1727 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1728 map_bh(bh, inode->i_sb, invalid_block);
1729 set_buffer_new(bh);
1730 set_buffer_delay(bh);
d100eef2
ZL
1731 return 0;
1732 }
1733
1734 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1735 retval = es.es_len - (iblock - es.es_lblk);
1736 if (retval > map->m_len)
1737 retval = map->m_len;
1738 map->m_len = retval;
1739 if (ext4_es_is_written(&es))
1740 map->m_flags |= EXT4_MAP_MAPPED;
1741 else if (ext4_es_is_unwritten(&es))
1742 map->m_flags |= EXT4_MAP_UNWRITTEN;
1743 else
1e83bc81 1744 BUG();
d100eef2 1745
921f266b
DM
1746#ifdef ES_AGGRESSIVE_TEST
1747 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1748#endif
d100eef2
ZL
1749 return retval;
1750 }
1751
5356f261
AK
1752 /*
1753 * Try to see if we can get the block without requesting a new
1754 * file system block.
1755 */
c8b459f4 1756 down_read(&EXT4_I(inode)->i_data_sem);
cbd7584e 1757 if (ext4_has_inline_data(inode))
9c3569b5 1758 retval = 0;
cbd7584e 1759 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
2f8e0a7c 1760 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
5356f261 1761 else
2f8e0a7c 1762 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
5356f261 1763
d100eef2 1764add_delayed:
5356f261 1765 if (retval == 0) {
f7fec032 1766 int ret;
ad431025 1767
5356f261
AK
1768 /*
1769 * XXX: __block_prepare_write() unmaps passed block,
1770 * is it OK?
1771 */
5356f261 1772
0b02f4c0
EW
1773 ret = ext4_insert_delayed_block(inode, map->m_lblk);
1774 if (ret != 0) {
f7fec032 1775 retval = ret;
51865fda 1776 goto out_unlock;
f7fec032 1777 }
51865fda 1778
5356f261
AK
1779 map_bh(bh, inode->i_sb, invalid_block);
1780 set_buffer_new(bh);
1781 set_buffer_delay(bh);
f7fec032
ZL
1782 } else if (retval > 0) {
1783 int ret;
3be78c73 1784 unsigned int status;
f7fec032 1785
44fb851d
ZL
1786 if (unlikely(retval != map->m_len)) {
1787 ext4_warning(inode->i_sb,
1788 "ES len assertion failed for inode "
1789 "%lu: retval %d != map->m_len %d",
1790 inode->i_ino, retval, map->m_len);
1791 WARN_ON(1);
921f266b 1792 }
921f266b 1793
f7fec032
ZL
1794 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1795 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1796 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1797 map->m_pblk, status);
1798 if (ret != 0)
1799 retval = ret;
5356f261
AK
1800 }
1801
1802out_unlock:
1803 up_read((&EXT4_I(inode)->i_data_sem));
1804
1805 return retval;
1806}
1807
64769240 1808/*
d91bd2c1 1809 * This is a special get_block_t callback which is used by
b920c755
TT
1810 * ext4_da_write_begin(). It will either return mapped block or
1811 * reserve space for a single block.
29fa89d0
AK
1812 *
1813 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1814 * We also have b_blocknr = -1 and b_bdev initialized properly
1815 *
1816 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1817 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1818 * initialized properly.
64769240 1819 */
9c3569b5
TM
1820int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1821 struct buffer_head *bh, int create)
64769240 1822{
2ed88685 1823 struct ext4_map_blocks map;
64769240
AT
1824 int ret = 0;
1825
1826 BUG_ON(create == 0);
2ed88685
TT
1827 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1828
1829 map.m_lblk = iblock;
1830 map.m_len = 1;
64769240
AT
1831
1832 /*
1833 * first, we need to know whether the block is allocated already
1834 * preallocated blocks are unmapped but should treated
1835 * the same as allocated blocks.
1836 */
5356f261
AK
1837 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1838 if (ret <= 0)
2ed88685 1839 return ret;
64769240 1840
2ed88685 1841 map_bh(bh, inode->i_sb, map.m_pblk);
ed8ad838 1842 ext4_update_bh_state(bh, map.m_flags);
2ed88685
TT
1843
1844 if (buffer_unwritten(bh)) {
1845 /* A delayed write to unwritten bh should be marked
1846 * new and mapped. Mapped ensures that we don't do
1847 * get_block multiple times when we write to the same
1848 * offset and new ensures that we do proper zero out
1849 * for partial write.
1850 */
1851 set_buffer_new(bh);
c8205636 1852 set_buffer_mapped(bh);
2ed88685
TT
1853 }
1854 return 0;
64769240 1855}
61628a3f 1856
33483b3b 1857static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio)
eaf2ca10 1858{
33483b3b
MW
1859 mpd->first_page += folio_nr_pages(folio);
1860 folio_unlock(folio);
eaf2ca10
JK
1861}
1862
81a0d3e1 1863static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
5f1132b2 1864{
81a0d3e1 1865 size_t len;
a056bdaa 1866 loff_t size;
5f1132b2
JK
1867 int err;
1868
81a0d3e1
MW
1869 BUG_ON(folio->index != mpd->first_page);
1870 folio_clear_dirty_for_io(folio);
a056bdaa
JK
1871 /*
1872 * We have to be very careful here! Nothing protects writeback path
1873 * against i_size changes and the page can be writeably mapped into
1874 * page tables. So an application can be growing i_size and writing
81a0d3e1 1875 * data through mmap while writeback runs. folio_clear_dirty_for_io()
a056bdaa 1876 * write-protects our page in page tables and the page cannot get
81a0d3e1
MW
1877 * written to again until we release folio lock. So only after
1878 * folio_clear_dirty_for_io() we are safe to sample i_size for
e8d6062c
MW
1879 * ext4_bio_write_folio() to zero-out tail of the written page. We rely
1880 * on the barrier provided by folio_test_clear_dirty() in
81a0d3e1 1881 * folio_clear_dirty_for_io() to make sure i_size is really sampled only
a056bdaa
JK
1882 * after page tables are updated.
1883 */
1884 size = i_size_read(mpd->inode);
81a0d3e1
MW
1885 len = folio_size(folio);
1886 if (folio_pos(folio) + len > size &&
c93d8f88 1887 !ext4_verity_in_progress(mpd->inode))
09cbfeaf 1888 len = size & ~PAGE_MASK;
e8d6062c 1889 err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
5f1132b2
JK
1890 if (!err)
1891 mpd->wbc->nr_to_write--;
5f1132b2
JK
1892
1893 return err;
1894}
1895
6db07461 1896#define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
4e7ea81d 1897
61628a3f 1898/*
fffb2739
JK
1899 * mballoc gives us at most this number of blocks...
1900 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
70261f56 1901 * The rest of mballoc seems to handle chunks up to full group size.
61628a3f 1902 */
fffb2739 1903#define MAX_WRITEPAGES_EXTENT_LEN 2048
525f4ed8 1904
4e7ea81d
JK
1905/*
1906 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1907 *
1908 * @mpd - extent of blocks
1909 * @lblk - logical number of the block in the file
09930042 1910 * @bh - buffer head we want to add to the extent
4e7ea81d 1911 *
09930042
JK
1912 * The function is used to collect contig. blocks in the same state. If the
1913 * buffer doesn't require mapping for writeback and we haven't started the
1914 * extent of buffers to map yet, the function returns 'true' immediately - the
1915 * caller can write the buffer right away. Otherwise the function returns true
1916 * if the block has been added to the extent, false if the block couldn't be
1917 * added.
4e7ea81d 1918 */
09930042
JK
1919static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1920 struct buffer_head *bh)
4e7ea81d
JK
1921{
1922 struct ext4_map_blocks *map = &mpd->map;
1923
09930042
JK
1924 /* Buffer that doesn't need mapping for writeback? */
1925 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
1926 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1927 /* So far no extent to map => we write the buffer right away */
1928 if (map->m_len == 0)
1929 return true;
1930 return false;
1931 }
4e7ea81d
JK
1932
1933 /* First block in the extent? */
1934 if (map->m_len == 0) {
dddbd6ac
JK
1935 /* We cannot map unless handle is started... */
1936 if (!mpd->do_map)
1937 return false;
4e7ea81d
JK
1938 map->m_lblk = lblk;
1939 map->m_len = 1;
09930042
JK
1940 map->m_flags = bh->b_state & BH_FLAGS;
1941 return true;
4e7ea81d
JK
1942 }
1943
09930042
JK
1944 /* Don't go larger than mballoc is willing to allocate */
1945 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1946 return false;
1947
4e7ea81d
JK
1948 /* Can we merge the block to our big extent? */
1949 if (lblk == map->m_lblk + map->m_len &&
09930042 1950 (bh->b_state & BH_FLAGS) == map->m_flags) {
4e7ea81d 1951 map->m_len++;
09930042 1952 return true;
4e7ea81d 1953 }
09930042 1954 return false;
4e7ea81d
JK
1955}
1956
5f1132b2
JK
1957/*
1958 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
1959 *
1960 * @mpd - extent of blocks for mapping
1961 * @head - the first buffer in the page
1962 * @bh - buffer we should start processing from
1963 * @lblk - logical number of the block in the file corresponding to @bh
1964 *
1965 * Walk through page buffers from @bh upto @head (exclusive) and either submit
1966 * the page for IO if all buffers in this page were mapped and there's no
1967 * accumulated extent of buffers to map or add buffers in the page to the
1968 * extent of buffers to map. The function returns 1 if the caller can continue
1969 * by processing the next page, 0 if it should stop adding buffers to the
1970 * extent to map because we cannot extend it anymore. It can also return value
1971 * < 0 in case of error during IO submission.
1972 */
1973static int mpage_process_page_bufs(struct mpage_da_data *mpd,
1974 struct buffer_head *head,
1975 struct buffer_head *bh,
1976 ext4_lblk_t lblk)
4e7ea81d
JK
1977{
1978 struct inode *inode = mpd->inode;
5f1132b2 1979 int err;
93407472 1980 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
4e7ea81d
JK
1981 >> inode->i_blkbits;
1982
c93d8f88
EB
1983 if (ext4_verity_in_progress(inode))
1984 blocks = EXT_MAX_BLOCKS;
1985
4e7ea81d
JK
1986 do {
1987 BUG_ON(buffer_locked(bh));
1988
09930042 1989 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
4e7ea81d
JK
1990 /* Found extent to map? */
1991 if (mpd->map.m_len)
5f1132b2 1992 return 0;
dddbd6ac
JK
1993 /* Buffer needs mapping and handle is not started? */
1994 if (!mpd->do_map)
1995 return 0;
09930042 1996 /* Everything mapped so far and we hit EOF */
5f1132b2 1997 break;
4e7ea81d 1998 }
4e7ea81d 1999 } while (lblk++, (bh = bh->b_this_page) != head);
5f1132b2
JK
2000 /* So far everything mapped? Submit the page for IO. */
2001 if (mpd->map.m_len == 0) {
81a0d3e1 2002 err = mpage_submit_folio(mpd, head->b_folio);
5f1132b2
JK
2003 if (err < 0)
2004 return err;
33483b3b 2005 mpage_folio_done(mpd, head->b_folio);
5f1132b2 2006 }
6b8ed620
JK
2007 if (lblk >= blocks) {
2008 mpd->scanned_until_end = 1;
2009 return 0;
2010 }
2011 return 1;
4e7ea81d
JK
2012}
2013
2943fdbc 2014/*
4da2f6e3
MW
2015 * mpage_process_folio - update folio buffers corresponding to changed extent
2016 * and may submit fully mapped page for IO
2017 * @mpd: description of extent to map, on return next extent to map
2018 * @folio: Contains these buffers.
2019 * @m_lblk: logical block mapping.
2020 * @m_pblk: corresponding physical mapping.
2021 * @map_bh: determines on return whether this page requires any further
2943fdbc 2022 * mapping or not.
4da2f6e3
MW
2023 *
2024 * Scan given folio buffers corresponding to changed extent and update buffer
2943fdbc
RH
2025 * state according to new extent state.
2026 * We map delalloc buffers to their physical location, clear unwritten bits.
4da2f6e3
MW
2027 * If the given folio is not fully mapped, we update @mpd to the next extent in
2028 * the given folio that needs mapping & return @map_bh as true.
2943fdbc 2029 */
4da2f6e3 2030static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
2943fdbc
RH
2031 ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
2032 bool *map_bh)
2033{
2034 struct buffer_head *head, *bh;
2035 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2036 ext4_lblk_t lblk = *m_lblk;
2037 ext4_fsblk_t pblock = *m_pblk;
2038 int err = 0;
c8cc8816
RH
2039 int blkbits = mpd->inode->i_blkbits;
2040 ssize_t io_end_size = 0;
2041 struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
2943fdbc 2042
4da2f6e3 2043 bh = head = folio_buffers(folio);
2943fdbc
RH
2044 do {
2045 if (lblk < mpd->map.m_lblk)
2046 continue;
2047 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2048 /*
2049 * Buffer after end of mapped extent.
4da2f6e3 2050 * Find next buffer in the folio to map.
2943fdbc
RH
2051 */
2052 mpd->map.m_len = 0;
2053 mpd->map.m_flags = 0;
c8cc8816 2054 io_end_vec->size += io_end_size;
2943fdbc 2055
2943fdbc
RH
2056 err = mpage_process_page_bufs(mpd, head, bh, lblk);
2057 if (err > 0)
2058 err = 0;
c8cc8816
RH
2059 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2060 io_end_vec = ext4_alloc_io_end_vec(io_end);
4d06bfb9
RH
2061 if (IS_ERR(io_end_vec)) {
2062 err = PTR_ERR(io_end_vec);
2063 goto out;
2064 }
d1e18b88 2065 io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
c8cc8816 2066 }
2943fdbc
RH
2067 *map_bh = true;
2068 goto out;
2069 }
2070 if (buffer_delay(bh)) {
2071 clear_buffer_delay(bh);
2072 bh->b_blocknr = pblock++;
2073 }
2074 clear_buffer_unwritten(bh);
c8cc8816 2075 io_end_size += (1 << blkbits);
2943fdbc 2076 } while (lblk++, (bh = bh->b_this_page) != head);
c8cc8816
RH
2077
2078 io_end_vec->size += io_end_size;
2943fdbc
RH
2079 *map_bh = false;
2080out:
2081 *m_lblk = lblk;
2082 *m_pblk = pblock;
2083 return err;
2084}
2085
4e7ea81d
JK
2086/*
2087 * mpage_map_buffers - update buffers corresponding to changed extent and
2088 * submit fully mapped pages for IO
2089 *
2090 * @mpd - description of extent to map, on return next extent to map
2091 *
2092 * Scan buffers corresponding to changed extent (we expect corresponding pages
2093 * to be already locked) and update buffer state according to new extent state.
2094 * We map delalloc buffers to their physical location, clear unwritten bits,
556615dc 2095 * and mark buffers as uninit when we perform writes to unwritten extents
4e7ea81d
JK
2096 * and do extent conversion after IO is finished. If the last page is not fully
2097 * mapped, we update @map to the next extent in the last page that needs
2098 * mapping. Otherwise we submit the page for IO.
2099 */
2100static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2101{
7530d093
MWO
2102 struct folio_batch fbatch;
2103 unsigned nr, i;
4e7ea81d 2104 struct inode *inode = mpd->inode;
09cbfeaf 2105 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
4e7ea81d
JK
2106 pgoff_t start, end;
2107 ext4_lblk_t lblk;
2943fdbc 2108 ext4_fsblk_t pblock;
4e7ea81d 2109 int err;
2943fdbc 2110 bool map_bh = false;
4e7ea81d
JK
2111
2112 start = mpd->map.m_lblk >> bpp_bits;
2113 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2114 lblk = start << bpp_bits;
2115 pblock = mpd->map.m_pblk;
2116
7530d093 2117 folio_batch_init(&fbatch);
4e7ea81d 2118 while (start <= end) {
7530d093
MWO
2119 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
2120 if (nr == 0)
4e7ea81d 2121 break;
7530d093 2122 for (i = 0; i < nr; i++) {
4da2f6e3 2123 struct folio *folio = fbatch.folios[i];
4e7ea81d 2124
4da2f6e3 2125 err = mpage_process_folio(mpd, folio, &lblk, &pblock,
2943fdbc 2126 &map_bh);
4e7ea81d 2127 /*
2943fdbc
RH
2128 * If map_bh is true, means page may require further bh
2129 * mapping, or maybe the page was submitted for IO.
2130 * So we return to call further extent mapping.
4e7ea81d 2131 */
39c0ae16 2132 if (err < 0 || map_bh)
2943fdbc 2133 goto out;
4e7ea81d 2134 /* Page fully mapped - let IO run! */
81a0d3e1 2135 err = mpage_submit_folio(mpd, folio);
2943fdbc
RH
2136 if (err < 0)
2137 goto out;
33483b3b 2138 mpage_folio_done(mpd, folio);
4e7ea81d 2139 }
7530d093 2140 folio_batch_release(&fbatch);
4e7ea81d
JK
2141 }
2142 /* Extent fully mapped and matches with page boundary. We are done. */
2143 mpd->map.m_len = 0;
2144 mpd->map.m_flags = 0;
2145 return 0;
2943fdbc 2146out:
7530d093 2147 folio_batch_release(&fbatch);
2943fdbc 2148 return err;
4e7ea81d
JK
2149}
2150
2151static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2152{
2153 struct inode *inode = mpd->inode;
2154 struct ext4_map_blocks *map = &mpd->map;
2155 int get_blocks_flags;
090f32ee 2156 int err, dioread_nolock;
4e7ea81d
JK
2157
2158 trace_ext4_da_write_pages_extent(inode, map);
2159 /*
2160 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
556615dc 2161 * to convert an unwritten extent to be initialized (in the case
4e7ea81d
JK
2162 * where we have written into one or more preallocated blocks). It is
2163 * possible that we're going to need more metadata blocks than
2164 * previously reserved. However we must not fail because we're in
2165 * writeback and there is nothing we can do about it so it might result
2166 * in data loss. So use reserved blocks to allocate metadata if
2167 * possible.
2168 *
754cfed6
TT
2169 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2170 * the blocks in question are delalloc blocks. This indicates
2171 * that the blocks and quotas has already been checked when
2172 * the data was copied into the page cache.
4e7ea81d
JK
2173 */
2174 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
ee0876bc
JK
2175 EXT4_GET_BLOCKS_METADATA_NOFAIL |
2176 EXT4_GET_BLOCKS_IO_SUBMIT;
090f32ee
LC
2177 dioread_nolock = ext4_should_dioread_nolock(inode);
2178 if (dioread_nolock)
4e7ea81d 2179 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
6db07461 2180 if (map->m_flags & BIT(BH_Delay))
4e7ea81d
JK
2181 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2182
2183 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2184 if (err < 0)
2185 return err;
090f32ee 2186 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
6b523df4
JK
2187 if (!mpd->io_submit.io_end->handle &&
2188 ext4_handle_valid(handle)) {
2189 mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2190 handle->h_rsv_handle = NULL;
2191 }
3613d228 2192 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
6b523df4 2193 }
4e7ea81d
JK
2194
2195 BUG_ON(map->m_len == 0);
4e7ea81d
JK
2196 return 0;
2197}
2198
2199/*
2200 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2201 * mpd->len and submit pages underlying it for IO
2202 *
2203 * @handle - handle for journal operations
2204 * @mpd - extent to map
7534e854
JK
2205 * @give_up_on_write - we set this to true iff there is a fatal error and there
2206 * is no hope of writing the data. The caller should discard
2207 * dirty pages to avoid infinite loops.
4e7ea81d
JK
2208 *
2209 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2210 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2211 * them to initialized or split the described range from larger unwritten
2212 * extent. Note that we need not map all the described range since allocation
2213 * can return less blocks or the range is covered by more unwritten extents. We
2214 * cannot map more because we are limited by reserved transaction credits. On
2215 * the other hand we always make sure that the last touched page is fully
2216 * mapped so that it can be written out (and thus forward progress is
2217 * guaranteed). After mapping we submit all mapped pages for IO.
2218 */
2219static int mpage_map_and_submit_extent(handle_t *handle,
cb530541
TT
2220 struct mpage_da_data *mpd,
2221 bool *give_up_on_write)
4e7ea81d
JK
2222{
2223 struct inode *inode = mpd->inode;
2224 struct ext4_map_blocks *map = &mpd->map;
2225 int err;
2226 loff_t disksize;
6603120e 2227 int progress = 0;
c8cc8816 2228 ext4_io_end_t *io_end = mpd->io_submit.io_end;
4d06bfb9 2229 struct ext4_io_end_vec *io_end_vec;
4e7ea81d 2230
4d06bfb9
RH
2231 io_end_vec = ext4_alloc_io_end_vec(io_end);
2232 if (IS_ERR(io_end_vec))
2233 return PTR_ERR(io_end_vec);
c8cc8816 2234 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
27d7c4ed 2235 do {
4e7ea81d
JK
2236 err = mpage_map_one_extent(handle, mpd);
2237 if (err < 0) {
2238 struct super_block *sb = inode->i_sb;
2239
0db1ff22 2240 if (ext4_forced_shutdown(EXT4_SB(sb)) ||
9b5f6c9b 2241 ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
cb530541 2242 goto invalidate_dirty_pages;
4e7ea81d 2243 /*
cb530541
TT
2244 * Let the uper layers retry transient errors.
2245 * In the case of ENOSPC, if ext4_count_free_blocks()
2246 * is non-zero, a commit should free up blocks.
4e7ea81d 2247 */
cb530541 2248 if ((err == -ENOMEM) ||
6603120e
DM
2249 (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2250 if (progress)
2251 goto update_disksize;
cb530541 2252 return err;
6603120e 2253 }
cb530541
TT
2254 ext4_msg(sb, KERN_CRIT,
2255 "Delayed block allocation failed for "
2256 "inode %lu at logical offset %llu with"
2257 " max blocks %u with error %d",
2258 inode->i_ino,
2259 (unsigned long long)map->m_lblk,
2260 (unsigned)map->m_len, -err);
2261 ext4_msg(sb, KERN_CRIT,
2262 "This should not happen!! Data will "
2263 "be lost\n");
2264 if (err == -ENOSPC)
2265 ext4_print_free_blocks(inode);
2266 invalidate_dirty_pages:
2267 *give_up_on_write = true;
4e7ea81d
JK
2268 return err;
2269 }
6603120e 2270 progress = 1;
4e7ea81d
JK
2271 /*
2272 * Update buffer state, submit mapped pages, and get us new
2273 * extent to map
2274 */
2275 err = mpage_map_and_submit_buffers(mpd);
2276 if (err < 0)
6603120e 2277 goto update_disksize;
27d7c4ed 2278 } while (map->m_len);
4e7ea81d 2279
6603120e 2280update_disksize:
622cad13
TT
2281 /*
2282 * Update on-disk size after IO is submitted. Races with
2283 * truncate are avoided by checking i_size under i_data_sem.
2284 */
09cbfeaf 2285 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
35df4299 2286 if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
4e7ea81d 2287 int err2;
622cad13
TT
2288 loff_t i_size;
2289
2290 down_write(&EXT4_I(inode)->i_data_sem);
2291 i_size = i_size_read(inode);
2292 if (disksize > i_size)
2293 disksize = i_size;
2294 if (disksize > EXT4_I(inode)->i_disksize)
2295 EXT4_I(inode)->i_disksize = disksize;
622cad13 2296 up_write(&EXT4_I(inode)->i_data_sem);
b907f2d5 2297 err2 = ext4_mark_inode_dirty(handle, inode);
878520ac 2298 if (err2) {
54d3adbc
TT
2299 ext4_error_err(inode->i_sb, -err2,
2300 "Failed to mark inode %lu dirty",
2301 inode->i_ino);
878520ac 2302 }
4e7ea81d
JK
2303 if (!err)
2304 err = err2;
2305 }
2306 return err;
2307}
2308
fffb2739
JK
2309/*
2310 * Calculate the total number of credits to reserve for one writepages
20970ba6 2311 * iteration. This is called from ext4_writepages(). We map an extent of
70261f56 2312 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
fffb2739
JK
2313 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2314 * bpp - 1 blocks in bpp different extents.
2315 */
525f4ed8
MC
2316static int ext4_da_writepages_trans_blocks(struct inode *inode)
2317{
fffb2739 2318 int bpp = ext4_journal_blocks_per_page(inode);
525f4ed8 2319
fffb2739
JK
2320 return ext4_meta_trans_blocks(inode,
2321 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
525f4ed8 2322}
61628a3f 2323
3f079114
JK
2324static int ext4_journal_page_buffers(handle_t *handle, struct page *page,
2325 int len)
2326{
2327 struct buffer_head *page_bufs = page_buffers(page);
2328 struct inode *inode = page->mapping->host;
2329 int ret, err;
2330
2331 ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2332 NULL, do_journal_get_write_access);
2333 err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2334 NULL, write_end_fn);
2335 if (ret == 0)
2336 ret = err;
2337 err = ext4_jbd2_inode_add_write(handle, inode, page_offset(page), len);
2338 if (ret == 0)
2339 ret = err;
2340 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2341
3f079114
JK
2342 return ret;
2343}
2344
2345static int mpage_journal_page_buffers(handle_t *handle,
2346 struct mpage_da_data *mpd,
2347 struct page *page)
2348{
2349 struct inode *inode = mpd->inode;
2350 loff_t size = i_size_read(inode);
2351 int len;
2352
2353 ClearPageChecked(page);
3f079114
JK
2354 mpd->wbc->nr_to_write--;
2355
2356 if (page->index == size >> PAGE_SHIFT &&
2357 !ext4_verity_in_progress(inode))
2358 len = size & ~PAGE_MASK;
2359 else
2360 len = PAGE_SIZE;
2361
2362 return ext4_journal_page_buffers(handle, page, len);
2363}
2364
8e48dcfb 2365/*
4e7ea81d 2366 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
de0039f6 2367 * needing mapping, submit mapped pages
4e7ea81d
JK
2368 *
2369 * @mpd - where to look for pages
2370 *
2371 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
de0039f6
JK
2372 * IO immediately. If we cannot map blocks, we submit just already mapped
2373 * buffers in the page for IO and keep page dirty. When we can map blocks and
2374 * we find a page which isn't mapped we start accumulating extent of buffers
2375 * underlying these pages that needs mapping (formed by either delayed or
2376 * unwritten buffers). We also lock the pages containing these buffers. The
2377 * extent found is returned in @mpd structure (starting at mpd->lblk with
2378 * length mpd->len blocks).
4e7ea81d
JK
2379 *
2380 * Note that this function can attach bios to one io_end structure which are
2381 * neither logically nor physically contiguous. Although it may seem as an
2382 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2383 * case as we need to track IO to all buffers underlying a page in one io_end.
8e48dcfb 2384 */
4e7ea81d 2385static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
8e48dcfb 2386{
4e7ea81d 2387 struct address_space *mapping = mpd->inode->i_mapping;
50ead253
VMO
2388 struct folio_batch fbatch;
2389 unsigned int nr_folios;
4e7ea81d
JK
2390 pgoff_t index = mpd->first_page;
2391 pgoff_t end = mpd->last_page;
10bbd235 2392 xa_mark_t tag;
4e7ea81d
JK
2393 int i, err = 0;
2394 int blkbits = mpd->inode->i_blkbits;
2395 ext4_lblk_t lblk;
2396 struct buffer_head *head;
3f079114
JK
2397 handle_t *handle = NULL;
2398 int bpp = ext4_journal_blocks_per_page(mpd->inode);
8e48dcfb 2399
4e7ea81d 2400 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
5b41d924
ES
2401 tag = PAGECACHE_TAG_TOWRITE;
2402 else
2403 tag = PAGECACHE_TAG_DIRTY;
3f079114 2404
e6c28a26
JK
2405 mpd->map.m_len = 0;
2406 mpd->next_page = index;
d0ab8368 2407 if (ext4_should_journal_data(mpd->inode)) {
3f079114
JK
2408 handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE,
2409 bpp);
2410 if (IS_ERR(handle))
2411 return PTR_ERR(handle);
2412 }
50ead253 2413 folio_batch_init(&fbatch);
4f01b02c 2414 while (index <= end) {
50ead253
VMO
2415 nr_folios = filemap_get_folios_tag(mapping, &index, end,
2416 tag, &fbatch);
2417 if (nr_folios == 0)
6b8ed620 2418 break;
8e48dcfb 2419
50ead253
VMO
2420 for (i = 0; i < nr_folios; i++) {
2421 struct folio *folio = fbatch.folios[i];
8e48dcfb 2422
aeac589a
ML
2423 /*
2424 * Accumulated enough dirty pages? This doesn't apply
2425 * to WB_SYNC_ALL mode. For integrity sync we have to
2426 * keep going because someone may be concurrently
2427 * dirtying pages, and we might have synced a lot of
2428 * newly appeared dirty pages, but have not synced all
2429 * of the old dirty pages.
2430 */
c8e8e16d
JK
2431 if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
2432 mpd->wbc->nr_to_write <=
2433 mpd->map.m_len >> (PAGE_SHIFT - blkbits))
aeac589a
ML
2434 goto out;
2435
4e7ea81d 2436 /* If we can't merge this page, we are done. */
50ead253 2437 if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
4e7ea81d 2438 goto out;
78aaced3 2439
3f079114
JK
2440 if (handle) {
2441 err = ext4_journal_ensure_credits(handle, bpp,
2442 0);
2443 if (err < 0)
2444 goto out;
2445 }
2446
50ead253 2447 folio_lock(folio);
8e48dcfb 2448 /*
4e7ea81d
JK
2449 * If the page is no longer dirty, or its mapping no
2450 * longer corresponds to inode we are writing (which
2451 * means it has been truncated or invalidated), or the
2452 * page is already under writeback and we are not doing
2453 * a data integrity writeback, skip the page
8e48dcfb 2454 */
50ead253
VMO
2455 if (!folio_test_dirty(folio) ||
2456 (folio_test_writeback(folio) &&
4e7ea81d 2457 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
50ead253
VMO
2458 unlikely(folio->mapping != mapping)) {
2459 folio_unlock(folio);
8e48dcfb
TT
2460 continue;
2461 }
2462
50ead253
VMO
2463 folio_wait_writeback(folio);
2464 BUG_ON(folio_test_writeback(folio));
8e48dcfb 2465
cc509574
TT
2466 /*
2467 * Should never happen but for buggy code in
2468 * other subsystems that call
2469 * set_page_dirty() without properly warning
2470 * the file system first. See [1] for more
2471 * information.
2472 *
2473 * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
2474 */
50ead253
VMO
2475 if (!folio_buffers(folio)) {
2476 ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
2477 folio_clear_dirty(folio);
2478 folio_unlock(folio);
cc509574
TT
2479 continue;
2480 }
2481
4e7ea81d 2482 if (mpd->map.m_len == 0)
50ead253
VMO
2483 mpd->first_page = folio->index;
2484 mpd->next_page = folio->index + folio_nr_pages(folio);
de0039f6 2485 /*
3f079114
JK
2486 * Writeout when we cannot modify metadata is simple.
2487 * Just submit the page. For data=journal mode we
2488 * first handle writeout of the page for checkpoint and
2489 * only after that handle delayed page dirtying. This
ab382539
JK
2490 * makes sure current data is checkpointed to the final
2491 * location before possibly journalling it again which
2492 * is desirable when the page is frequently dirtied
2493 * through a pin.
de0039f6
JK
2494 */
2495 if (!mpd->can_map) {
5e1bdea6
JK
2496 err = mpage_submit_folio(mpd, folio);
2497 if (err < 0)
2498 goto out;
3f079114 2499 /* Pending dirtying of journalled data? */
81a0d3e1 2500 if (folio_test_checked(folio)) {
3f079114
JK
2501 err = mpage_journal_page_buffers(handle,
2502 mpd, &folio->page);
2503 if (err < 0)
2504 goto out;
1f1a55f0 2505 mpd->journalled_more_data = 1;
3f079114 2506 }
33483b3b 2507 mpage_folio_done(mpd, folio);
de0039f6
JK
2508 } else {
2509 /* Add all dirty buffers to mpd */
50ead253 2510 lblk = ((ext4_lblk_t)folio->index) <<
de0039f6 2511 (PAGE_SHIFT - blkbits);
50ead253 2512 head = folio_buffers(folio);
de0039f6 2513 err = mpage_process_page_bufs(mpd, head, head,
50ead253 2514 lblk);
de0039f6
JK
2515 if (err <= 0)
2516 goto out;
2517 err = 0;
2518 }
8e48dcfb 2519 }
50ead253 2520 folio_batch_release(&fbatch);
8e48dcfb
TT
2521 cond_resched();
2522 }
6b8ed620 2523 mpd->scanned_until_end = 1;
3f079114
JK
2524 if (handle)
2525 ext4_journal_stop(handle);
4f01b02c 2526 return 0;
8eb9e5ce 2527out:
50ead253 2528 folio_batch_release(&fbatch);
3f079114
JK
2529 if (handle)
2530 ext4_journal_stop(handle);
4e7ea81d 2531 return err;
8e48dcfb
TT
2532}
2533
15648d59 2534static int ext4_do_writepages(struct mpage_da_data *mpd)
64769240 2535{
15648d59 2536 struct writeback_control *wbc = mpd->wbc;
4e7ea81d
JK
2537 pgoff_t writeback_index = 0;
2538 long nr_to_write = wbc->nr_to_write;
22208ded 2539 int range_whole = 0;
4e7ea81d 2540 int cycled = 1;
61628a3f 2541 handle_t *handle = NULL;
15648d59
JK
2542 struct inode *inode = mpd->inode;
2543 struct address_space *mapping = inode->i_mapping;
6b523df4 2544 int needed_blocks, rsv_blocks = 0, ret = 0;
5e745b04 2545 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
1bce63d1 2546 struct blk_plug plug;
cb530541 2547 bool give_up_on_write = false;
61628a3f 2548
20970ba6 2549 trace_ext4_writepages(inode, wbc);
ba80b101 2550
61628a3f
MC
2551 /*
2552 * No pages to write? This is mainly a kludge to avoid starting
2553 * a transaction for special inodes like journal inode on last iput()
2554 * because that could violate lock ordering on umount
2555 */
a1d6cc56 2556 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
bbf023c7 2557 goto out_writepages;
2a21e37e
TT
2558
2559 /*
2560 * If the filesystem has aborted, it is read-only, so return
2561 * right away instead of dumping stack traces later on that
2562 * will obscure the real source of the problem. We test
1751e8a6 2563 * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
2a21e37e 2564 * the latter could be true if the filesystem is mounted
20970ba6 2565 * read-only, and in that case, ext4_writepages should
2a21e37e
TT
2566 * *never* be called, so if that ever happens, we would want
2567 * the stack trace.
2568 */
0db1ff22 2569 if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) ||
9b5f6c9b 2570 ext4_test_mount_flag(inode->i_sb, EXT4_MF_FS_ABORTED))) {
bbf023c7
ML
2571 ret = -EROFS;
2572 goto out_writepages;
2573 }
2a21e37e 2574
4e7ea81d
JK
2575 /*
2576 * If we have inline data and arrive here, it means that
2577 * we will soon create the block for the 1st page, so
2578 * we'd better clear the inline data here.
2579 */
2580 if (ext4_has_inline_data(inode)) {
2581 /* Just inode will be modified... */
2582 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2583 if (IS_ERR(handle)) {
2584 ret = PTR_ERR(handle);
2585 goto out_writepages;
2586 }
2587 BUG_ON(ext4_test_inode_state(inode,
2588 EXT4_STATE_MAY_INLINE_DATA));
2589 ext4_destroy_inline_data(handle, inode);
2590 ext4_journal_stop(handle);
2591 }
2592
3f079114
JK
2593 /*
2594 * data=journal mode does not do delalloc so we just need to writeout /
1f1a55f0
JK
2595 * journal already mapped buffers. On the other hand we need to commit
2596 * transaction to make data stable. We expect all the data to be
2597 * already in the journal (the only exception are DMA pinned pages
2598 * dirtied behind our back) so we commit transaction here and run the
2599 * writeback loop to checkpoint them. The checkpointing is not actually
2600 * necessary to make data persistent *but* quite a few places (extent
2601 * shifting operations, fsverity, ...) depend on being able to drop
2602 * pagecache pages after calling filemap_write_and_wait() and for that
2603 * checkpointing needs to happen.
3f079114 2604 */
1f1a55f0 2605 if (ext4_should_journal_data(inode)) {
3f079114 2606 mpd->can_map = 0;
1f1a55f0
JK
2607 if (wbc->sync_mode == WB_SYNC_ALL)
2608 ext4_fc_commit(sbi->s_journal,
2609 EXT4_I(inode)->i_datasync_tid);
2610 }
2611 mpd->journalled_more_data = 0;
3f079114 2612
4e343231 2613 if (ext4_should_dioread_nolock(inode)) {
2614 /*
2615 * We may need to convert up to one extent per block in
2616 * the page and we may dirty the inode.
2617 */
2618 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2619 PAGE_SIZE >> inode->i_blkbits);
2620 }
2621
22208ded
AK
2622 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2623 range_whole = 1;
61628a3f 2624
2acf2c26 2625 if (wbc->range_cyclic) {
4e7ea81d
JK
2626 writeback_index = mapping->writeback_index;
2627 if (writeback_index)
2acf2c26 2628 cycled = 0;
15648d59
JK
2629 mpd->first_page = writeback_index;
2630 mpd->last_page = -1;
5b41d924 2631 } else {
15648d59
JK
2632 mpd->first_page = wbc->range_start >> PAGE_SHIFT;
2633 mpd->last_page = wbc->range_end >> PAGE_SHIFT;
5b41d924 2634 }
a1d6cc56 2635
15648d59 2636 ext4_io_submit_init(&mpd->io_submit, wbc);
2acf2c26 2637retry:
6e6938b6 2638 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
15648d59
JK
2639 tag_pages_for_writeback(mapping, mpd->first_page,
2640 mpd->last_page);
1bce63d1 2641 blk_start_plug(&plug);
dddbd6ac
JK
2642
2643 /*
2644 * First writeback pages that don't need mapping - we can avoid
2645 * starting a transaction unnecessarily and also avoid being blocked
2646 * in the block layer on device congestion while having transaction
2647 * started.
2648 */
15648d59
JK
2649 mpd->do_map = 0;
2650 mpd->scanned_until_end = 0;
2651 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2652 if (!mpd->io_submit.io_end) {
dddbd6ac
JK
2653 ret = -ENOMEM;
2654 goto unplug;
2655 }
15648d59 2656 ret = mpage_prepare_extent_to_map(mpd);
a297b2fc 2657 /* Unlock pages we didn't use */
15648d59 2658 mpage_release_unused_pages(mpd, false);
dddbd6ac 2659 /* Submit prepared bio */
15648d59
JK
2660 ext4_io_submit(&mpd->io_submit);
2661 ext4_put_io_end_defer(mpd->io_submit.io_end);
2662 mpd->io_submit.io_end = NULL;
dddbd6ac
JK
2663 if (ret < 0)
2664 goto unplug;
2665
15648d59 2666 while (!mpd->scanned_until_end && wbc->nr_to_write > 0) {
4e7ea81d 2667 /* For each extent of pages we use new io_end */
15648d59
JK
2668 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2669 if (!mpd->io_submit.io_end) {
4e7ea81d
JK
2670 ret = -ENOMEM;
2671 break;
2672 }
a1d6cc56 2673
de0039f6 2674 WARN_ON_ONCE(!mpd->can_map);
a1d6cc56 2675 /*
4e7ea81d
JK
2676 * We have two constraints: We find one extent to map and we
2677 * must always write out whole page (makes a difference when
2678 * blocksize < pagesize) so that we don't block on IO when we
2679 * try to write out the rest of the page. Journalled mode is
2680 * not supported by delalloc.
a1d6cc56
AK
2681 */
2682 BUG_ON(ext4_should_journal_data(inode));
525f4ed8 2683 needed_blocks = ext4_da_writepages_trans_blocks(inode);
a1d6cc56 2684
4e7ea81d 2685 /* start a new transaction */
6b523df4
JK
2686 handle = ext4_journal_start_with_reserve(inode,
2687 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
61628a3f
MC
2688 if (IS_ERR(handle)) {
2689 ret = PTR_ERR(handle);
1693918e 2690 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
fbe845dd 2691 "%ld pages, ino %lu; err %d", __func__,
a1d6cc56 2692 wbc->nr_to_write, inode->i_ino, ret);
4e7ea81d 2693 /* Release allocated io_end */
15648d59
JK
2694 ext4_put_io_end(mpd->io_submit.io_end);
2695 mpd->io_submit.io_end = NULL;
4e7ea81d 2696 break;
61628a3f 2697 }
15648d59 2698 mpd->do_map = 1;
f63e6005 2699
15648d59
JK
2700 trace_ext4_da_write_pages(inode, mpd->first_page, wbc);
2701 ret = mpage_prepare_extent_to_map(mpd);
2702 if (!ret && mpd->map.m_len)
2703 ret = mpage_map_and_submit_extent(handle, mpd,
cb530541 2704 &give_up_on_write);
646caa9c
JK
2705 /*
2706 * Caution: If the handle is synchronous,
2707 * ext4_journal_stop() can wait for transaction commit
2708 * to finish which may depend on writeback of pages to
2709 * complete or on page lock to be released. In that
b483bb77 2710 * case, we have to wait until after we have
646caa9c
JK
2711 * submitted all the IO, released page locks we hold,
2712 * and dropped io_end reference (for extent conversion
2713 * to be able to complete) before stopping the handle.
2714 */
2715 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2716 ext4_journal_stop(handle);
2717 handle = NULL;
15648d59 2718 mpd->do_map = 0;
646caa9c 2719 }
4e7ea81d 2720 /* Unlock pages we didn't use */
15648d59 2721 mpage_release_unused_pages(mpd, give_up_on_write);
a297b2fc 2722 /* Submit prepared bio */
15648d59 2723 ext4_io_submit(&mpd->io_submit);
a297b2fc 2724
646caa9c
JK
2725 /*
2726 * Drop our io_end reference we got from init. We have
2727 * to be careful and use deferred io_end finishing if
2728 * we are still holding the transaction as we can
2729 * release the last reference to io_end which may end
2730 * up doing unwritten extent conversion.
2731 */
2732 if (handle) {
15648d59 2733 ext4_put_io_end_defer(mpd->io_submit.io_end);
646caa9c
JK
2734 ext4_journal_stop(handle);
2735 } else
15648d59
JK
2736 ext4_put_io_end(mpd->io_submit.io_end);
2737 mpd->io_submit.io_end = NULL;
4e7ea81d
JK
2738
2739 if (ret == -ENOSPC && sbi->s_journal) {
2740 /*
2741 * Commit the transaction which would
22208ded
AK
2742 * free blocks released in the transaction
2743 * and try again
2744 */
df22291f 2745 jbd2_journal_force_commit_nested(sbi->s_journal);
22208ded 2746 ret = 0;
4e7ea81d
JK
2747 continue;
2748 }
2749 /* Fatal error - ENOMEM, EIO... */
2750 if (ret)
61628a3f 2751 break;
a1d6cc56 2752 }
dddbd6ac 2753unplug:
1bce63d1 2754 blk_finish_plug(&plug);
9c12a831 2755 if (!ret && !cycled && wbc->nr_to_write > 0) {
2acf2c26 2756 cycled = 1;
15648d59
JK
2757 mpd->last_page = writeback_index - 1;
2758 mpd->first_page = 0;
2acf2c26
AK
2759 goto retry;
2760 }
22208ded
AK
2761
2762 /* Update index */
22208ded
AK
2763 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2764 /*
4e7ea81d 2765 * Set the writeback_index so that range_cyclic
22208ded
AK
2766 * mode will write it back later
2767 */
15648d59 2768 mapping->writeback_index = mpd->first_page;
a1d6cc56 2769
61628a3f 2770out_writepages:
20970ba6
TT
2771 trace_ext4_writepages_result(inode, wbc, ret,
2772 nr_to_write - wbc->nr_to_write);
61628a3f 2773 return ret;
64769240
AT
2774}
2775
15648d59
JK
2776static int ext4_writepages(struct address_space *mapping,
2777 struct writeback_control *wbc)
2778{
29bc9cea 2779 struct super_block *sb = mapping->host->i_sb;
15648d59
JK
2780 struct mpage_da_data mpd = {
2781 .inode = mapping->host,
2782 .wbc = wbc,
2783 .can_map = 1,
2784 };
29bc9cea 2785 int ret;
00d873c1 2786 int alloc_ctx;
29bc9cea
JK
2787
2788 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
2789 return -EIO;
15648d59 2790
00d873c1 2791 alloc_ctx = ext4_writepages_down_read(sb);
29bc9cea 2792 ret = ext4_do_writepages(&mpd);
1f1a55f0
JK
2793 /*
2794 * For data=journal writeback we could have come across pages marked
2795 * for delayed dirtying (PageChecked) which were just added to the
2796 * running transaction. Try once more to get them to stable storage.
2797 */
2798 if (!ret && mpd.journalled_more_data)
2799 ret = ext4_do_writepages(&mpd);
00d873c1 2800 ext4_writepages_up_read(sb, alloc_ctx);
29bc9cea
JK
2801
2802 return ret;
15648d59
JK
2803}
2804
59205c8d
JK
2805int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode)
2806{
2807 struct writeback_control wbc = {
2808 .sync_mode = WB_SYNC_ALL,
2809 .nr_to_write = LONG_MAX,
2810 .range_start = jinode->i_dirty_start,
2811 .range_end = jinode->i_dirty_end,
2812 };
2813 struct mpage_da_data mpd = {
2814 .inode = jinode->i_vfs_inode,
2815 .wbc = &wbc,
2816 .can_map = 0,
2817 };
2818 return ext4_do_writepages(&mpd);
2819}
2820
5f0663bb
DW
2821static int ext4_dax_writepages(struct address_space *mapping,
2822 struct writeback_control *wbc)
2823{
2824 int ret;
2825 long nr_to_write = wbc->nr_to_write;
2826 struct inode *inode = mapping->host;
2827 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
00d873c1 2828 int alloc_ctx;
5f0663bb
DW
2829
2830 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2831 return -EIO;
2832
00d873c1 2833 alloc_ctx = ext4_writepages_down_read(inode->i_sb);
5f0663bb
DW
2834 trace_ext4_writepages(inode, wbc);
2835
3f666c56 2836 ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
5f0663bb
DW
2837 trace_ext4_writepages_result(inode, wbc, ret,
2838 nr_to_write - wbc->nr_to_write);
00d873c1 2839 ext4_writepages_up_read(inode->i_sb, alloc_ctx);
5f0663bb
DW
2840 return ret;
2841}
2842
79f0be8d
AK
2843static int ext4_nonda_switch(struct super_block *sb)
2844{
5c1ff336 2845 s64 free_clusters, dirty_clusters;
79f0be8d
AK
2846 struct ext4_sb_info *sbi = EXT4_SB(sb);
2847
2848 /*
2849 * switch to non delalloc mode if we are running low
2850 * on free block. The free block accounting via percpu
179f7ebf 2851 * counters can get slightly wrong with percpu_counter_batch getting
79f0be8d
AK
2852 * accumulated on each CPU without updating global counters
2853 * Delalloc need an accurate free block accounting. So switch
2854 * to non delalloc when we are near to error range.
2855 */
5c1ff336
EW
2856 free_clusters =
2857 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2858 dirty_clusters =
2859 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
00d4e736
TT
2860 /*
2861 * Start pushing delalloc when 1/2 of free blocks are dirty.
2862 */
5c1ff336 2863 if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
10ee27a0 2864 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
00d4e736 2865
5c1ff336
EW
2866 if (2 * free_clusters < 3 * dirty_clusters ||
2867 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
79f0be8d 2868 /*
c8afb446
ES
2869 * free block count is less than 150% of dirty blocks
2870 * or free blocks is less than watermark
79f0be8d
AK
2871 */
2872 return 1;
2873 }
2874 return 0;
2875}
2876
64769240 2877static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
9d6b0cd7 2878 loff_t pos, unsigned len,
de9a55b8 2879 struct page **pagep, void **fsdata)
64769240 2880{
72b8ab9d 2881 int ret, retries = 0;
0b5a2543 2882 struct folio *folio;
64769240 2883 pgoff_t index;
64769240 2884 struct inode *inode = mapping->host;
64769240 2885
0db1ff22
TT
2886 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2887 return -EIO;
2888
09cbfeaf 2889 index = pos >> PAGE_SHIFT;
79f0be8d 2890
6493792d 2891 if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
79f0be8d
AK
2892 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2893 return ext4_write_begin(file, mapping, pos,
9d6b0cd7 2894 len, pagep, fsdata);
79f0be8d
AK
2895 }
2896 *fsdata = (void *)0;
9d6b0cd7 2897 trace_ext4_da_write_begin(inode, pos, len);
9c3569b5
TM
2898
2899 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
36d116e9 2900 ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
9c3569b5
TM
2901 pagep, fsdata);
2902 if (ret < 0)
47564bfb
TT
2903 return ret;
2904 if (ret == 1)
2905 return 0;
9c3569b5
TM
2906 }
2907
cc883236 2908retry:
0b5a2543
MW
2909 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2910 mapping_gfp_mask(mapping));
7fa8a8ee
LT
2911 if (IS_ERR(folio))
2912 return PTR_ERR(folio);
47564bfb 2913
0b5a2543
MW
2914 /* In case writeback began while the folio was unlocked */
2915 folio_wait_stable(folio);
64769240 2916
643fa961 2917#ifdef CONFIG_FS_ENCRYPTION
86b38c27 2918 ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
2058f83a 2919#else
0b5a2543 2920 ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep);
2058f83a 2921#endif
64769240 2922 if (ret < 0) {
0b5a2543
MW
2923 folio_unlock(folio);
2924 folio_put(folio);
ae4d5372
AK
2925 /*
2926 * block_write_begin may have instantiated a few blocks
2927 * outside i_size. Trim these off again. Don't need
cc883236 2928 * i_size_read because we hold inode lock.
ae4d5372
AK
2929 */
2930 if (pos + len > inode->i_size)
b9a4207d 2931 ext4_truncate_failed_write(inode);
47564bfb
TT
2932
2933 if (ret == -ENOSPC &&
2934 ext4_should_retry_alloc(inode->i_sb, &retries))
cc883236 2935 goto retry;
47564bfb 2936 return ret;
64769240
AT
2937 }
2938
0b5a2543 2939 *pagep = &folio->page;
64769240
AT
2940 return ret;
2941}
2942
632eaeab
MC
2943/*
2944 * Check if we should update i_disksize
2945 * when write to the end of file but not require block allocation
2946 */
2947static int ext4_da_should_update_i_disksize(struct page *page,
de9a55b8 2948 unsigned long offset)
632eaeab
MC
2949{
2950 struct buffer_head *bh;
2951 struct inode *inode = page->mapping->host;
2952 unsigned int idx;
2953 int i;
2954
2955 bh = page_buffers(page);
2956 idx = offset >> inode->i_blkbits;
2957
af5bc92d 2958 for (i = 0; i < idx; i++)
632eaeab
MC
2959 bh = bh->b_this_page;
2960
29fa89d0 2961 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
632eaeab
MC
2962 return 0;
2963 return 1;
2964}
2965
64769240 2966static int ext4_da_write_end(struct file *file,
de9a55b8
TT
2967 struct address_space *mapping,
2968 loff_t pos, unsigned len, unsigned copied,
2969 struct page *page, void *fsdata)
64769240
AT
2970{
2971 struct inode *inode = mapping->host;
64769240 2972 loff_t new_i_size;
632eaeab 2973 unsigned long start, end;
79f0be8d
AK
2974 int write_mode = (int)(unsigned long)fsdata;
2975
74d553aa
TT
2976 if (write_mode == FALL_BACK_TO_NONDELALLOC)
2977 return ext4_write_end(file, mapping, pos,
2978 len, copied, page, fsdata);
632eaeab 2979
9bffad1e 2980 trace_ext4_da_write_end(inode, pos, len, copied);
6984aef5
ZY
2981
2982 if (write_mode != CONVERT_INLINE_DATA &&
2983 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
2984 ext4_has_inline_data(inode))
2985 return ext4_write_inline_data_end(inode, pos, len, copied, page);
2986
1dedde69
ZC
2987 if (unlikely(copied < len) && !PageUptodate(page))
2988 copied = 0;
2989
09cbfeaf 2990 start = pos & (PAGE_SIZE - 1);
af5bc92d 2991 end = start + copied - 1;
64769240
AT
2992
2993 /*
4df031ff
ZY
2994 * Since we are holding inode lock, we are sure i_disksize <=
2995 * i_size. We also know that if i_disksize < i_size, there are
2996 * delalloc writes pending in the range upto i_size. If the end of
2997 * the current write is <= i_size, there's no need to touch
2998 * i_disksize since writeback will push i_disksize upto i_size
2999 * eventually. If the end of the current write is > i_size and
3000 * inside an allocated block (ext4_da_should_update_i_disksize()
3f079114
JK
3001 * check), we need to update i_disksize here as certain
3002 * ext4_writepages() paths not allocating blocks update i_disksize.
4df031ff
ZY
3003 *
3004 * Note that we defer inode dirtying to generic_write_end() /
3005 * ext4_da_write_inline_data_end().
64769240 3006 */
64769240 3007 new_i_size = pos + copied;
6984aef5
ZY
3008 if (copied && new_i_size > inode->i_size &&
3009 ext4_da_should_update_i_disksize(page, end))
3010 ext4_update_i_disksize(inode, new_i_size);
9c3569b5 3011
cc883236 3012 return generic_write_end(file, mapping, pos, len, copied, page, fsdata);
64769240
AT
3013}
3014
ccd2506b
TT
3015/*
3016 * Force all delayed allocation blocks to be allocated for a given inode.
3017 */
3018int ext4_alloc_da_blocks(struct inode *inode)
3019{
fb40ba0d
TT
3020 trace_ext4_alloc_da_blocks(inode);
3021
71d4f7d0 3022 if (!EXT4_I(inode)->i_reserved_data_blocks)
ccd2506b
TT
3023 return 0;
3024
3025 /*
3026 * We do something simple for now. The filemap_flush() will
3027 * also start triggering a write of the data blocks, which is
3028 * not strictly speaking necessary (and for users of
3029 * laptop_mode, not even desirable). However, to do otherwise
3030 * would require replicating code paths in:
de9a55b8 3031 *
20970ba6 3032 * ext4_writepages() ->
ccd2506b
TT
3033 * write_cache_pages() ---> (via passed in callback function)
3034 * __mpage_da_writepage() -->
3035 * mpage_add_bh_to_extent()
3036 * mpage_da_map_blocks()
3037 *
3038 * The problem is that write_cache_pages(), located in
3039 * mm/page-writeback.c, marks pages clean in preparation for
3040 * doing I/O, which is not desirable if we're not planning on
3041 * doing I/O at all.
3042 *
3043 * We could call write_cache_pages(), and then redirty all of
380cf090 3044 * the pages by calling redirty_page_for_writepage() but that
ccd2506b
TT
3045 * would be ugly in the extreme. So instead we would need to
3046 * replicate parts of the code in the above functions,
25985edc 3047 * simplifying them because we wouldn't actually intend to
ccd2506b
TT
3048 * write out the pages, but rather only collect contiguous
3049 * logical block extents, call the multi-block allocator, and
3050 * then update the buffer heads with the block allocations.
de9a55b8 3051 *
ccd2506b
TT
3052 * For now, though, we'll cheat by calling filemap_flush(),
3053 * which will map the blocks, and start the I/O, but not
3054 * actually wait for the I/O to complete.
3055 */
3056 return filemap_flush(inode->i_mapping);
3057}
64769240 3058
ac27a0ec
DK
3059/*
3060 * bmap() is special. It gets used by applications such as lilo and by
3061 * the swapper to find the on-disk block of a specific piece of data.
3062 *
3063 * Naturally, this is dangerous if the block concerned is still in the
617ba13b 3064 * journal. If somebody makes a swapfile on an ext4 data-journaling
ac27a0ec
DK
3065 * filesystem and enables swap, then they may get a nasty shock when the
3066 * data getting swapped to that swapfile suddenly gets overwritten by
3067 * the original zero's written out previously to the journal and
3068 * awaiting writeback in the kernel's buffer cache.
3069 *
3070 * So, if we see any bmap calls here on a modified, data-journaled file,
3071 * take extra steps to flush any blocks which might be in the cache.
3072 */
617ba13b 3073static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
ac27a0ec
DK
3074{
3075 struct inode *inode = mapping->host;
51ae846c 3076 sector_t ret = 0;
ac27a0ec 3077
51ae846c 3078 inode_lock_shared(inode);
46c7f254
TM
3079 /*
3080 * We can get here for an inline file via the FIBMAP ioctl
3081 */
3082 if (ext4_has_inline_data(inode))
51ae846c 3083 goto out;
46c7f254 3084
64769240 3085 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
951cafa6
JK
3086 (test_opt(inode->i_sb, DELALLOC) ||
3087 ext4_should_journal_data(inode))) {
64769240 3088 /*
951cafa6
JK
3089 * With delalloc or journalled data we want to sync the file so
3090 * that we can make sure we allocate blocks for file and data
3091 * is in place for the user to see it
64769240
AT
3092 */
3093 filemap_write_and_wait(mapping);
3094 }
3095
51ae846c
YB
3096 ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
3097
3098out:
3099 inode_unlock_shared(inode);
3100 return ret;
ac27a0ec
DK
3101}
3102
fe5ddf6b 3103static int ext4_read_folio(struct file *file, struct folio *folio)
ac27a0ec 3104{
46c7f254 3105 int ret = -EAGAIN;
c0be8e6f 3106 struct inode *inode = folio->mapping->host;
46c7f254 3107
c0be8e6f 3108 trace_ext4_readpage(&folio->page);
46c7f254
TM
3109
3110 if (ext4_has_inline_data(inode))
3edde93e 3111 ret = ext4_readpage_inline(inode, folio);
46c7f254
TM
3112
3113 if (ret == -EAGAIN)
c0be8e6f 3114 return ext4_mpage_readpages(inode, NULL, folio);
46c7f254
TM
3115
3116 return ret;
ac27a0ec
DK
3117}
3118
6311f91f 3119static void ext4_readahead(struct readahead_control *rac)
ac27a0ec 3120{
6311f91f 3121 struct inode *inode = rac->mapping->host;
46c7f254 3122
6311f91f 3123 /* If the file has inline data, no need to do readahead. */
46c7f254 3124 if (ext4_has_inline_data(inode))
6311f91f 3125 return;
46c7f254 3126
a07f624b 3127 ext4_mpage_readpages(inode, rac, NULL);
ac27a0ec
DK
3128}
3129
7ba13abb
MWO
3130static void ext4_invalidate_folio(struct folio *folio, size_t offset,
3131 size_t length)
ac27a0ec 3132{
ccd16945 3133 trace_ext4_invalidate_folio(folio, offset, length);
0562e0ba 3134
4520fb3c 3135 /* No journalling happens on data buffers when this function is used */
7ba13abb 3136 WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
4520fb3c 3137
7ba13abb 3138 block_invalidate_folio(folio, offset, length);
4520fb3c
JK
3139}
3140
ccd16945
MWO
3141static int __ext4_journalled_invalidate_folio(struct folio *folio,
3142 size_t offset, size_t length)
4520fb3c 3143{
ccd16945 3144 journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
4520fb3c 3145
ccd16945 3146 trace_ext4_journalled_invalidate_folio(folio, offset, length);
4520fb3c 3147
ac27a0ec
DK
3148 /*
3149 * If it's a full truncate we just forget about the pending dirtying
3150 */
ccd16945
MWO
3151 if (offset == 0 && length == folio_size(folio))
3152 folio_clear_checked(folio);
ac27a0ec 3153
ccd16945 3154 return jbd2_journal_invalidate_folio(journal, folio, offset, length);
53e87268
JK
3155}
3156
3157/* Wrapper for aops... */
ccd16945
MWO
3158static void ext4_journalled_invalidate_folio(struct folio *folio,
3159 size_t offset,
3160 size_t length)
53e87268 3161{
ccd16945 3162 WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
ac27a0ec
DK
3163}
3164
3c402f15 3165static bool ext4_release_folio(struct folio *folio, gfp_t wait)
ac27a0ec 3166{
3c402f15 3167 journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
ac27a0ec 3168
3c402f15 3169 trace_ext4_releasepage(&folio->page);
0562e0ba 3170
e1c36595 3171 /* Page has dirty journalled data -> cannot release */
3c402f15
MWO
3172 if (folio_test_checked(folio))
3173 return false;
0390131b 3174 if (journal)
c56a6eb0 3175 return jbd2_journal_try_to_free_buffers(journal, folio);
0390131b 3176 else
68189fef 3177 return try_to_free_buffers(folio);
ac27a0ec
DK
3178}
3179
b8a6176c
JK
3180static bool ext4_inode_datasync_dirty(struct inode *inode)
3181{
3182 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3183
aa75f4d3
HS
3184 if (journal) {
3185 if (jbd2_transaction_committed(journal,
d0520df7
AR
3186 EXT4_I(inode)->i_datasync_tid))
3187 return false;
3188 if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
1ceecb53 3189 return !list_empty(&EXT4_I(inode)->i_fc_list);
d0520df7 3190 return true;
aa75f4d3
HS
3191 }
3192
b8a6176c
JK
3193 /* Any metadata buffers to write? */
3194 if (!list_empty(&inode->i_mapping->private_list))
3195 return true;
3196 return inode->i_state & I_DIRTY_DATASYNC;
3197}
3198
c8fdfe29
MB
3199static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3200 struct ext4_map_blocks *map, loff_t offset,
de205114 3201 loff_t length, unsigned int flags)
364443cb 3202{
c8fdfe29 3203 u8 blkbits = inode->i_blkbits;
364443cb 3204
c8fdfe29
MB
3205 /*
3206 * Writes that span EOF might trigger an I/O size update on completion,
3207 * so consider them to be dirty for the purpose of O_DSYNC, even if
3208 * there is no other metadata changes being made or are pending.
3209 */
364443cb 3210 iomap->flags = 0;
c8fdfe29
MB
3211 if (ext4_inode_datasync_dirty(inode) ||
3212 offset + length > i_size_read(inode))
b8a6176c 3213 iomap->flags |= IOMAP_F_DIRTY;
c8fdfe29
MB
3214
3215 if (map->m_flags & EXT4_MAP_NEW)
3216 iomap->flags |= IOMAP_F_NEW;
3217
de205114
CH
3218 if (flags & IOMAP_DAX)
3219 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3220 else
3221 iomap->bdev = inode->i_sb->s_bdev;
c8fdfe29
MB
3222 iomap->offset = (u64) map->m_lblk << blkbits;
3223 iomap->length = (u64) map->m_len << blkbits;
364443cb 3224
6386722a
RH
3225 if ((map->m_flags & EXT4_MAP_MAPPED) &&
3226 !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3227 iomap->flags |= IOMAP_F_MERGED;
3228
c8fdfe29
MB
3229 /*
3230 * Flags passed to ext4_map_blocks() for direct I/O writes can result
3231 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
3232 * set. In order for any allocated unwritten extents to be converted
3233 * into written extents correctly within the ->end_io() handler, we
3234 * need to ensure that the iomap->type is set appropriately. Hence, the
3235 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
3236 * been set first.
3237 */
3238 if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3239 iomap->type = IOMAP_UNWRITTEN;
3240 iomap->addr = (u64) map->m_pblk << blkbits;
de205114
CH
3241 if (flags & IOMAP_DAX)
3242 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
c8fdfe29
MB
3243 } else if (map->m_flags & EXT4_MAP_MAPPED) {
3244 iomap->type = IOMAP_MAPPED;
3245 iomap->addr = (u64) map->m_pblk << blkbits;
de205114
CH
3246 if (flags & IOMAP_DAX)
3247 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
364443cb 3248 } else {
c8fdfe29
MB
3249 iomap->type = IOMAP_HOLE;
3250 iomap->addr = IOMAP_NULL_ADDR;
364443cb 3251 }
364443cb
JK
3252}
3253
f063db5e
MB
3254static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3255 unsigned int flags)
776722e8 3256{
776722e8 3257 handle_t *handle;
378f32ba
MB
3258 u8 blkbits = inode->i_blkbits;
3259 int ret, dio_credits, m_flags = 0, retries = 0;
776722e8 3260
776722e8 3261 /*
f063db5e
MB
3262 * Trim the mapping request to the maximum value that we can map at
3263 * once for direct I/O.
776722e8 3264 */
f063db5e
MB
3265 if (map->m_len > DIO_MAX_BLOCKS)
3266 map->m_len = DIO_MAX_BLOCKS;
3267 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
776722e8 3268
f063db5e 3269retry:
776722e8 3270 /*
f063db5e
MB
3271 * Either we allocate blocks and then don't get an unwritten extent, so
3272 * in that case we have reserved enough credits. Or, the blocks are
3273 * already allocated and unwritten. In that case, the extent conversion
3274 * fits into the credits as well.
776722e8 3275 */
f063db5e
MB
3276 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3277 if (IS_ERR(handle))
3278 return PTR_ERR(handle);
4c0425ff 3279
378f32ba
MB
3280 /*
3281 * DAX and direct I/O are the only two operations that are currently
3282 * supported with IOMAP_WRITE.
3283 */
952da063
CH
3284 WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT)));
3285 if (flags & IOMAP_DAX)
378f32ba
MB
3286 m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3287 /*
3288 * We use i_size instead of i_disksize here because delalloc writeback
3289 * can complete at any point during the I/O and subsequently push the
3290 * i_disksize out to i_size. This could be beyond where direct I/O is
3291 * happening and thus expose allocated blocks to direct I/O reads.
3292 */
d0b040f5 3293 else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
378f32ba
MB
3294 m_flags = EXT4_GET_BLOCKS_CREATE;
3295 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3296 m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
4b70df18 3297
378f32ba 3298 ret = ext4_map_blocks(handle, inode, map, m_flags);
8d5d02e6 3299
74c66bcb 3300 /*
378f32ba
MB
3301 * We cannot fill holes in indirect tree based inodes as that could
3302 * expose stale data in the case of a crash. Use the magic error code
3303 * to fallback to buffered I/O.
74c66bcb 3304 */
378f32ba
MB
3305 if (!m_flags && !ret)
3306 ret = -ENOTBLK;
187372a3 3307
f063db5e
MB
3308 ext4_journal_stop(handle);
3309 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3310 goto retry;
3311
3312 return ret;
4c0425ff 3313}
c7064ef1 3314
f063db5e 3315
364443cb 3316static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
c039b997 3317 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
4c0425ff 3318{
364443cb 3319 int ret;
09edf4d3
MB
3320 struct ext4_map_blocks map;
3321 u8 blkbits = inode->i_blkbits;
729f52c6 3322
bcd8e91f
TT
3323 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3324 return -EINVAL;
4bd809db 3325
09edf4d3
MB
3326 if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3327 return -ERANGE;
4bd809db 3328
e8340395 3329 /*
09edf4d3 3330 * Calculate the first and last logical blocks respectively.
e8340395 3331 */
09edf4d3
MB
3332 map.m_lblk = offset >> blkbits;
3333 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3334 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
e8340395 3335
9faac62d
RH
3336 if (flags & IOMAP_WRITE) {
3337 /*
3338 * We check here if the blocks are already allocated, then we
3339 * don't need to start a journal txn and we can directly return
3340 * the mapping information. This could boost performance
3341 * especially in multi-threaded overwrite requests.
3342 */
3343 if (offset + length <= i_size_read(inode)) {
3344 ret = ext4_map_blocks(NULL, inode, &map, 0);
3345 if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3346 goto out;
3347 }
f063db5e 3348 ret = ext4_iomap_alloc(inode, &map, flags);
9faac62d 3349 } else {
545052e9 3350 ret = ext4_map_blocks(NULL, inode, &map, 0);
9faac62d 3351 }
4bd809db 3352
f063db5e
MB
3353 if (ret < 0)
3354 return ret;
9faac62d 3355out:
38ea50da
EB
3356 /*
3357 * When inline encryption is enabled, sometimes I/O to an encrypted file
3358 * has to be broken up to guarantee DUN contiguity. Handle this by
3359 * limiting the length of the mapping returned.
3360 */
3361 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3362
de205114 3363 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
4bd809db 3364
364443cb
JK
3365 return 0;
3366}
8d5d02e6 3367
8cd115bd
JK
3368static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3369 loff_t length, unsigned flags, struct iomap *iomap,
3370 struct iomap *srcmap)
3371{
3372 int ret;
3373
3374 /*
3375 * Even for writes we don't need to allocate blocks, so just pretend
3376 * we are reading to save overhead of starting a transaction.
3377 */
3378 flags &= ~IOMAP_WRITE;
3379 ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
fa83c34e 3380 WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED);
8cd115bd
JK
3381 return ret;
3382}
3383
776722e8
JK
3384static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3385 ssize_t written, unsigned flags, struct iomap *iomap)
3386{
69c499d1 3387 /*
378f32ba
MB
3388 * Check to see whether an error occurred while writing out the data to
3389 * the allocated blocks. If so, return the magic error code so that we
3390 * fallback to buffered I/O and attempt to complete the remainder of
3391 * the I/O. Any blocks that may have been allocated in preparation for
3392 * the direct I/O will be reused during buffered I/O.
69c499d1 3393 */
378f32ba
MB
3394 if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
3395 return -ENOTBLK;
69c499d1 3396
569342dc 3397 return 0;
776722e8 3398}
4bd809db 3399
8ff6daa1 3400const struct iomap_ops ext4_iomap_ops = {
364443cb 3401 .iomap_begin = ext4_iomap_begin,
776722e8 3402 .iomap_end = ext4_iomap_end,
364443cb 3403};
8d5d02e6 3404
8cd115bd
JK
3405const struct iomap_ops ext4_iomap_overwrite_ops = {
3406 .iomap_begin = ext4_iomap_overwrite_begin,
3407 .iomap_end = ext4_iomap_end,
3408};
3409
09edf4d3
MB
3410static bool ext4_iomap_is_delalloc(struct inode *inode,
3411 struct ext4_map_blocks *map)
3412{
3413 struct extent_status es;
3414 ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
914f82a3 3415
09edf4d3
MB
3416 ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3417 map->m_lblk, end, &es);
914f82a3 3418
09edf4d3
MB
3419 if (!es.es_len || es.es_lblk > end)
3420 return false;
914f82a3 3421
09edf4d3
MB
3422 if (es.es_lblk > map->m_lblk) {
3423 map->m_len = es.es_lblk - map->m_lblk;
3424 return false;
914f82a3 3425 }
914f82a3 3426
09edf4d3
MB
3427 offset = map->m_lblk - es.es_lblk;
3428 map->m_len = es.es_len - offset;
914f82a3 3429
09edf4d3 3430 return true;
4c0425ff
MC
3431}
3432
09edf4d3
MB
3433static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3434 loff_t length, unsigned int flags,
3435 struct iomap *iomap, struct iomap *srcmap)
4c0425ff 3436{
09edf4d3
MB
3437 int ret;
3438 bool delalloc = false;
3439 struct ext4_map_blocks map;
3440 u8 blkbits = inode->i_blkbits;
4c0425ff 3441
09edf4d3
MB
3442 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3443 return -EINVAL;
3444
3445 if (ext4_has_inline_data(inode)) {
3446 ret = ext4_inline_data_iomap(inode, iomap);
3447 if (ret != -EAGAIN) {
3448 if (ret == 0 && offset >= iomap->length)
3449 ret = -ENOENT;
3450 return ret;
3451 }
3452 }
2058f83a 3453
84ebd795 3454 /*
09edf4d3 3455 * Calculate the first and last logical block respectively.
84ebd795 3456 */
09edf4d3
MB
3457 map.m_lblk = offset >> blkbits;
3458 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3459 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
84ebd795 3460
b2c57642
RH
3461 /*
3462 * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
3463 * So handle it here itself instead of querying ext4_map_blocks().
3464 * Since ext4_map_blocks() will warn about it and will return
3465 * -EIO error.
3466 */
3467 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3468 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3469
3470 if (offset >= sbi->s_bitmap_maxbytes) {
3471 map.m_flags = 0;
3472 goto set_iomap;
3473 }
3474 }
3475
09edf4d3
MB
3476 ret = ext4_map_blocks(NULL, inode, &map, 0);
3477 if (ret < 0)
3478 return ret;
3479 if (ret == 0)
3480 delalloc = ext4_iomap_is_delalloc(inode, &map);
46c7f254 3481
b2c57642 3482set_iomap:
de205114 3483 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
09edf4d3
MB
3484 if (delalloc && iomap->type == IOMAP_HOLE)
3485 iomap->type = IOMAP_DELALLOC;
3486
3487 return 0;
4c0425ff
MC
3488}
3489
09edf4d3
MB
3490const struct iomap_ops ext4_iomap_report_ops = {
3491 .iomap_begin = ext4_iomap_begin_report,
3492};
3493
ac27a0ec 3494/*
3f5d3063
JK
3495 * For data=journal mode, folio should be marked dirty only when it was
3496 * writeably mapped. When that happens, it was already attached to the
3497 * transaction and marked as jbddirty (we take care of this in
3498 * ext4_page_mkwrite()). On transaction commit, we writeprotect page mappings
3499 * so we should have nothing to do here, except for the case when someone
3500 * had the page pinned and dirtied the page through this pin (e.g. by doing
3501 * direct IO to it). In that case we'd need to attach buffers here to the
3502 * transaction but we cannot due to lock ordering. We cannot just dirty the
3503 * folio and leave attached buffers clean, because the buffers' dirty state is
3504 * "definitive". We cannot just set the buffers dirty or jbddirty because all
3505 * the journalling code will explode. So what we do is to mark the folio
3506 * "pending dirty" and next time ext4_writepages() is called, attach buffers
3507 * to the transaction appropriately.
ac27a0ec 3508 */
187c82cb
MWO
3509static bool ext4_journalled_dirty_folio(struct address_space *mapping,
3510 struct folio *folio)
ac27a0ec 3511{
0f252336 3512 WARN_ON_ONCE(!folio_buffers(folio));
3f5d3063
JK
3513 if (folio_maybe_dma_pinned(folio))
3514 folio_set_checked(folio);
187c82cb 3515 return filemap_dirty_folio(mapping, folio);
ac27a0ec
DK
3516}
3517
e621900a 3518static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
6dcc693b 3519{
e621900a
MWO
3520 WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
3521 WARN_ON_ONCE(!folio_buffers(folio));
3522 return block_dirty_folio(mapping, folio);
6dcc693b
JK
3523}
3524
0e6895ba
RH
3525static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
3526 struct file *file, sector_t *span)
3527{
3528 return iomap_swapfile_activate(sis, file, span,
3529 &ext4_iomap_report_ops);
3530}
3531
74d553aa 3532static const struct address_space_operations ext4_aops = {
fe5ddf6b 3533 .read_folio = ext4_read_folio,
6311f91f 3534 .readahead = ext4_readahead,
20970ba6 3535 .writepages = ext4_writepages,
8ab22b9a 3536 .write_begin = ext4_write_begin,
74d553aa 3537 .write_end = ext4_write_end,
e621900a 3538 .dirty_folio = ext4_dirty_folio,
8ab22b9a 3539 .bmap = ext4_bmap,
7ba13abb 3540 .invalidate_folio = ext4_invalidate_folio,
3c402f15 3541 .release_folio = ext4_release_folio,
378f32ba 3542 .direct_IO = noop_direct_IO,
67235182 3543 .migrate_folio = buffer_migrate_folio,
8ab22b9a 3544 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 3545 .error_remove_page = generic_error_remove_page,
0e6895ba 3546 .swap_activate = ext4_iomap_swap_activate,
ac27a0ec
DK
3547};
3548
617ba13b 3549static const struct address_space_operations ext4_journalled_aops = {
fe5ddf6b 3550 .read_folio = ext4_read_folio,
6311f91f 3551 .readahead = ext4_readahead,
20970ba6 3552 .writepages = ext4_writepages,
8ab22b9a
HH
3553 .write_begin = ext4_write_begin,
3554 .write_end = ext4_journalled_write_end,
187c82cb 3555 .dirty_folio = ext4_journalled_dirty_folio,
8ab22b9a 3556 .bmap = ext4_bmap,
ccd16945 3557 .invalidate_folio = ext4_journalled_invalidate_folio,
3c402f15 3558 .release_folio = ext4_release_folio,
378f32ba 3559 .direct_IO = noop_direct_IO,
dae99960 3560 .migrate_folio = buffer_migrate_folio_norefs,
8ab22b9a 3561 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 3562 .error_remove_page = generic_error_remove_page,
0e6895ba 3563 .swap_activate = ext4_iomap_swap_activate,
ac27a0ec
DK
3564};
3565
64769240 3566static const struct address_space_operations ext4_da_aops = {
fe5ddf6b 3567 .read_folio = ext4_read_folio,
6311f91f 3568 .readahead = ext4_readahead,
20970ba6 3569 .writepages = ext4_writepages,
8ab22b9a
HH
3570 .write_begin = ext4_da_write_begin,
3571 .write_end = ext4_da_write_end,
e621900a 3572 .dirty_folio = ext4_dirty_folio,
8ab22b9a 3573 .bmap = ext4_bmap,
7ba13abb 3574 .invalidate_folio = ext4_invalidate_folio,
3c402f15 3575 .release_folio = ext4_release_folio,
378f32ba 3576 .direct_IO = noop_direct_IO,
67235182 3577 .migrate_folio = buffer_migrate_folio,
8ab22b9a 3578 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 3579 .error_remove_page = generic_error_remove_page,
0e6895ba 3580 .swap_activate = ext4_iomap_swap_activate,
64769240
AT
3581};
3582
5f0663bb
DW
3583static const struct address_space_operations ext4_dax_aops = {
3584 .writepages = ext4_dax_writepages,
3585 .direct_IO = noop_direct_IO,
46de8b97 3586 .dirty_folio = noop_dirty_folio,
94dbb631 3587 .bmap = ext4_bmap,
0e6895ba 3588 .swap_activate = ext4_iomap_swap_activate,
5f0663bb
DW
3589};
3590
617ba13b 3591void ext4_set_aops(struct inode *inode)
ac27a0ec 3592{
3d2b1582
LC
3593 switch (ext4_inode_journal_mode(inode)) {
3594 case EXT4_INODE_ORDERED_DATA_MODE:
3d2b1582 3595 case EXT4_INODE_WRITEBACK_DATA_MODE:
3d2b1582
LC
3596 break;
3597 case EXT4_INODE_JOURNAL_DATA_MODE:
617ba13b 3598 inode->i_mapping->a_ops = &ext4_journalled_aops;
74d553aa 3599 return;
3d2b1582
LC
3600 default:
3601 BUG();
3602 }
5f0663bb
DW
3603 if (IS_DAX(inode))
3604 inode->i_mapping->a_ops = &ext4_dax_aops;
3605 else if (test_opt(inode->i_sb, DELALLOC))
74d553aa
TT
3606 inode->i_mapping->a_ops = &ext4_da_aops;
3607 else
3608 inode->i_mapping->a_ops = &ext4_aops;
ac27a0ec
DK
3609}
3610
923ae0ff 3611static int __ext4_block_zero_page_range(handle_t *handle,
d863dc36
LC
3612 struct address_space *mapping, loff_t from, loff_t length)
3613{
09cbfeaf
KS
3614 ext4_fsblk_t index = from >> PAGE_SHIFT;
3615 unsigned offset = from & (PAGE_SIZE-1);
923ae0ff 3616 unsigned blocksize, pos;
d863dc36
LC
3617 ext4_lblk_t iblock;
3618 struct inode *inode = mapping->host;
3619 struct buffer_head *bh;
9d3973de 3620 struct folio *folio;
d863dc36
LC
3621 int err = 0;
3622
9d3973de
MW
3623 folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT,
3624 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3625 mapping_gfp_constraint(mapping, ~__GFP_FS));
7fa8a8ee
LT
3626 if (IS_ERR(folio))
3627 return PTR_ERR(folio);
d863dc36
LC
3628
3629 blocksize = inode->i_sb->s_blocksize;
d863dc36 3630
09cbfeaf 3631 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
d863dc36 3632
9d3973de
MW
3633 bh = folio_buffers(folio);
3634 if (!bh) {
3635 create_empty_buffers(&folio->page, blocksize, 0);
3636 bh = folio_buffers(folio);
3637 }
d863dc36
LC
3638
3639 /* Find the buffer that contains "offset" */
d863dc36
LC
3640 pos = blocksize;
3641 while (offset >= pos) {
3642 bh = bh->b_this_page;
3643 iblock++;
3644 pos += blocksize;
3645 }
d863dc36
LC
3646 if (buffer_freed(bh)) {
3647 BUFFER_TRACE(bh, "freed: skip");
3648 goto unlock;
3649 }
d863dc36
LC
3650 if (!buffer_mapped(bh)) {
3651 BUFFER_TRACE(bh, "unmapped");
3652 ext4_get_block(inode, iblock, bh, 0);
3653 /* unmapped? It's a hole - nothing to do */
3654 if (!buffer_mapped(bh)) {
3655 BUFFER_TRACE(bh, "still unmapped");
3656 goto unlock;
3657 }
3658 }
3659
3660 /* Ok, it's mapped. Make sure it's up-to-date */
9d3973de 3661 if (folio_test_uptodate(folio))
d863dc36
LC
3662 set_buffer_uptodate(bh);
3663
3664 if (!buffer_uptodate(bh)) {
2d069c08 3665 err = ext4_read_bh_lock(bh, 0, true);
3666 if (err)
d863dc36 3667 goto unlock;
4f74d15f 3668 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
c9c7429c 3669 /* We expect the key to be set. */
a7550b30 3670 BUG_ON(!fscrypt_has_encryption_key(inode));
9d3973de 3671 err = fscrypt_decrypt_pagecache_blocks(folio,
51e4e315 3672 blocksize,
834f1565
EB
3673 bh_offset(bh));
3674 if (err) {
3675 clear_buffer_uptodate(bh);
3676 goto unlock;
3677 }
c9c7429c 3678 }
d863dc36 3679 }
d863dc36
LC
3680 if (ext4_should_journal_data(inode)) {
3681 BUFFER_TRACE(bh, "get write access");
188c299e
JK
3682 err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
3683 EXT4_JTR_NONE);
d863dc36
LC
3684 if (err)
3685 goto unlock;
3686 }
9d3973de 3687 folio_zero_range(folio, offset, length);
d863dc36
LC
3688 BUFFER_TRACE(bh, "zeroed end of block");
3689
d863dc36 3690 if (ext4_should_journal_data(inode)) {
d84c9ebd 3691 err = ext4_dirty_journalled_data(handle, bh);
0713ed0c 3692 } else {
353eefd3 3693 err = 0;
d863dc36 3694 mark_buffer_dirty(bh);
3957ef53 3695 if (ext4_should_order_data(inode))
73131fbb
RZ
3696 err = ext4_jbd2_inode_add_write(handle, inode, from,
3697 length);
0713ed0c 3698 }
d863dc36
LC
3699
3700unlock:
9d3973de
MW
3701 folio_unlock(folio);
3702 folio_put(folio);
d863dc36
LC
3703 return err;
3704}
3705
923ae0ff
RZ
3706/*
3707 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3708 * starting from file offset 'from'. The range to be zero'd must
3709 * be contained with in one block. If the specified range exceeds
3710 * the end of the block it will be shortened to end of the block
3088e5a5 3711 * that corresponds to 'from'
923ae0ff
RZ
3712 */
3713static int ext4_block_zero_page_range(handle_t *handle,
3714 struct address_space *mapping, loff_t from, loff_t length)
3715{
3716 struct inode *inode = mapping->host;
09cbfeaf 3717 unsigned offset = from & (PAGE_SIZE-1);
923ae0ff
RZ
3718 unsigned blocksize = inode->i_sb->s_blocksize;
3719 unsigned max = blocksize - (offset & (blocksize - 1));
3720
3721 /*
3722 * correct length if it does not fall between
3723 * 'from' and the end of the block
3724 */
3725 if (length > max || length < 0)
3726 length = max;
3727
47e69351 3728 if (IS_DAX(inode)) {
c6f40468
CH
3729 return dax_zero_range(inode, from, length, NULL,
3730 &ext4_iomap_ops);
47e69351 3731 }
923ae0ff
RZ
3732 return __ext4_block_zero_page_range(handle, mapping, from, length);
3733}
3734
94350ab5
MW
3735/*
3736 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3737 * up to the end of the block which corresponds to `from'.
3738 * This required during truncate. We need to physically zero the tail end
3739 * of that block so it doesn't yield old data if the file is later grown.
3740 */
c197855e 3741static int ext4_block_truncate_page(handle_t *handle,
94350ab5
MW
3742 struct address_space *mapping, loff_t from)
3743{
09cbfeaf 3744 unsigned offset = from & (PAGE_SIZE-1);
94350ab5
MW
3745 unsigned length;
3746 unsigned blocksize;
3747 struct inode *inode = mapping->host;
3748
0d06863f 3749 /* If we are processing an encrypted inode during orphan list handling */
592ddec7 3750 if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
0d06863f
TT
3751 return 0;
3752
94350ab5
MW
3753 blocksize = inode->i_sb->s_blocksize;
3754 length = blocksize - (offset & (blocksize - 1));
3755
3756 return ext4_block_zero_page_range(handle, mapping, from, length);
3757}
3758
a87dd18c
LC
3759int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3760 loff_t lstart, loff_t length)
3761{
3762 struct super_block *sb = inode->i_sb;
3763 struct address_space *mapping = inode->i_mapping;
e1be3a92 3764 unsigned partial_start, partial_end;
a87dd18c
LC
3765 ext4_fsblk_t start, end;
3766 loff_t byte_end = (lstart + length - 1);
3767 int err = 0;
3768
e1be3a92
LC
3769 partial_start = lstart & (sb->s_blocksize - 1);
3770 partial_end = byte_end & (sb->s_blocksize - 1);
3771
a87dd18c
LC
3772 start = lstart >> sb->s_blocksize_bits;
3773 end = byte_end >> sb->s_blocksize_bits;
3774
3775 /* Handle partial zero within the single block */
e1be3a92
LC
3776 if (start == end &&
3777 (partial_start || (partial_end != sb->s_blocksize - 1))) {
a87dd18c
LC
3778 err = ext4_block_zero_page_range(handle, mapping,
3779 lstart, length);
3780 return err;
3781 }
3782 /* Handle partial zero out on the start of the range */
e1be3a92 3783 if (partial_start) {
a87dd18c
LC
3784 err = ext4_block_zero_page_range(handle, mapping,
3785 lstart, sb->s_blocksize);
3786 if (err)
3787 return err;
3788 }
3789 /* Handle partial zero out on the end of the range */
e1be3a92 3790 if (partial_end != sb->s_blocksize - 1)
a87dd18c 3791 err = ext4_block_zero_page_range(handle, mapping,
e1be3a92
LC
3792 byte_end - partial_end,
3793 partial_end + 1);
a87dd18c
LC
3794 return err;
3795}
3796
91ef4caf
DG
3797int ext4_can_truncate(struct inode *inode)
3798{
91ef4caf
DG
3799 if (S_ISREG(inode->i_mode))
3800 return 1;
3801 if (S_ISDIR(inode->i_mode))
3802 return 1;
3803 if (S_ISLNK(inode->i_mode))
3804 return !ext4_inode_is_fast_symlink(inode);
3805 return 0;
3806}
3807
01127848
JK
3808/*
3809 * We have to make sure i_disksize gets properly updated before we truncate
3810 * page cache due to hole punching or zero range. Otherwise i_disksize update
3811 * can get lost as it may have been postponed to submission of writeback but
3812 * that will never happen after we truncate page cache.
3813 */
3814int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3815 loff_t len)
3816{
3817 handle_t *handle;
4209ae12
HS
3818 int ret;
3819
01127848
JK
3820 loff_t size = i_size_read(inode);
3821
5955102c 3822 WARN_ON(!inode_is_locked(inode));
01127848
JK
3823 if (offset > size || offset + len < size)
3824 return 0;
3825
3826 if (EXT4_I(inode)->i_disksize >= size)
3827 return 0;
3828
3829 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
3830 if (IS_ERR(handle))
3831 return PTR_ERR(handle);
3832 ext4_update_i_disksize(inode, size);
4209ae12 3833 ret = ext4_mark_inode_dirty(handle, inode);
01127848
JK
3834 ext4_journal_stop(handle);
3835
4209ae12 3836 return ret;
01127848
JK
3837}
3838
d4f5258e 3839static void ext4_wait_dax_page(struct inode *inode)
430657b6 3840{
d4f5258e 3841 filemap_invalidate_unlock(inode->i_mapping);
430657b6 3842 schedule();
d4f5258e 3843 filemap_invalidate_lock(inode->i_mapping);
430657b6
RZ
3844}
3845
3846int ext4_break_layouts(struct inode *inode)
3847{
430657b6 3848 struct page *page;
430657b6
RZ
3849 int error;
3850
d4f5258e 3851 if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
430657b6
RZ
3852 return -EINVAL;
3853
3854 do {
430657b6
RZ
3855 page = dax_layout_busy_page(inode->i_mapping);
3856 if (!page)
3857 return 0;
3858
3859 error = ___wait_var_event(&page->_refcount,
3860 atomic_read(&page->_refcount) == 1,
3861 TASK_INTERRUPTIBLE, 0, 0,
d4f5258e 3862 ext4_wait_dax_page(inode));
b1f38217 3863 } while (error == 0);
430657b6
RZ
3864
3865 return error;
3866}
3867
a4bb6b64 3868/*
cca32b7e 3869 * ext4_punch_hole: punches a hole in a file by releasing the blocks
a4bb6b64
AH
3870 * associated with the given offset and length
3871 *
3872 * @inode: File inode
3873 * @offset: The offset where the hole will begin
3874 * @len: The length of the hole
3875 *
4907cb7b 3876 * Returns: 0 on success or negative on failure
a4bb6b64
AH
3877 */
3878
ad5cd4f4 3879int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
a4bb6b64 3880{
ad5cd4f4 3881 struct inode *inode = file_inode(file);
26a4c0c6
TT
3882 struct super_block *sb = inode->i_sb;
3883 ext4_lblk_t first_block, stop_block;
3884 struct address_space *mapping = inode->i_mapping;
2da37622
TS
3885 loff_t first_block_offset, last_block_offset, max_length;
3886 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
26a4c0c6
TT
3887 handle_t *handle;
3888 unsigned int credits;
4209ae12 3889 int ret = 0, ret2 = 0;
26a4c0c6 3890
b8a86845 3891 trace_ext4_punch_hole(inode, offset, length, 0);
aaddea81 3892
26a4c0c6
TT
3893 /*
3894 * Write out all dirty pages to avoid race conditions
3895 * Then release them.
3896 */
cca32b7e 3897 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
26a4c0c6
TT
3898 ret = filemap_write_and_wait_range(mapping, offset,
3899 offset + length - 1);
3900 if (ret)
3901 return ret;
3902 }
3903
5955102c 3904 inode_lock(inode);
9ef06cec 3905
26a4c0c6
TT
3906 /* No need to punch hole beyond i_size */
3907 if (offset >= inode->i_size)
3908 goto out_mutex;
3909
3910 /*
3911 * If the hole extends beyond i_size, set the hole
3912 * to end after the page that contains i_size
3913 */
3914 if (offset + length > inode->i_size) {
3915 length = inode->i_size +
09cbfeaf 3916 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
26a4c0c6
TT
3917 offset;
3918 }
3919
2da37622
TS
3920 /*
3921 * For punch hole the length + offset needs to be within one block
3922 * before last range. Adjust the length if it goes beyond that limit.
3923 */
3924 max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
3925 if (offset + length > max_length)
3926 length = max_length - offset;
3927
a361293f
JK
3928 if (offset & (sb->s_blocksize - 1) ||
3929 (offset + length) & (sb->s_blocksize - 1)) {
3930 /*
3931 * Attach jinode to inode for jbd2 if we do any zeroing of
3932 * partial block
3933 */
3934 ret = ext4_inode_attach_jinode(inode);
3935 if (ret < 0)
3936 goto out_mutex;
3937
3938 }
3939
f340b3d9 3940 /* Wait all existing dio workers, newcomers will block on i_rwsem */
ea3d7209
JK
3941 inode_dio_wait(inode);
3942
ad5cd4f4
DW
3943 ret = file_modified(file);
3944 if (ret)
3945 goto out_mutex;
3946
ea3d7209
JK
3947 /*
3948 * Prevent page faults from reinstantiating pages we have released from
3949 * page cache.
3950 */
d4f5258e 3951 filemap_invalidate_lock(mapping);
430657b6
RZ
3952
3953 ret = ext4_break_layouts(inode);
3954 if (ret)
3955 goto out_dio;
3956
a87dd18c
LC
3957 first_block_offset = round_up(offset, sb->s_blocksize);
3958 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
26a4c0c6 3959
a87dd18c 3960 /* Now release the pages and zero block aligned part of pages*/
01127848
JK
3961 if (last_block_offset > first_block_offset) {
3962 ret = ext4_update_disksize_before_punch(inode, offset, length);
3963 if (ret)
3964 goto out_dio;
a87dd18c
LC
3965 truncate_pagecache_range(inode, first_block_offset,
3966 last_block_offset);
01127848 3967 }
26a4c0c6
TT
3968
3969 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3970 credits = ext4_writepage_trans_blocks(inode);
3971 else
3972 credits = ext4_blocks_for_truncate(inode);
3973 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3974 if (IS_ERR(handle)) {
3975 ret = PTR_ERR(handle);
3976 ext4_std_error(sb, ret);
3977 goto out_dio;
3978 }
3979
a87dd18c
LC
3980 ret = ext4_zero_partial_blocks(handle, inode, offset,
3981 length);
3982 if (ret)
3983 goto out_stop;
26a4c0c6
TT
3984
3985 first_block = (offset + sb->s_blocksize - 1) >>
3986 EXT4_BLOCK_SIZE_BITS(sb);
3987 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
3988
eee597ac
LC
3989 /* If there are blocks to remove, do it */
3990 if (stop_block > first_block) {
26a4c0c6 3991
eee597ac 3992 down_write(&EXT4_I(inode)->i_data_sem);
27bc446e 3993 ext4_discard_preallocations(inode, 0);
26a4c0c6 3994
eee597ac
LC
3995 ret = ext4_es_remove_extent(inode, first_block,
3996 stop_block - first_block);
3997 if (ret) {
3998 up_write(&EXT4_I(inode)->i_data_sem);
3999 goto out_stop;
4000 }
26a4c0c6 4001
eee597ac
LC
4002 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4003 ret = ext4_ext_remove_space(inode, first_block,
4004 stop_block - 1);
4005 else
4006 ret = ext4_ind_remove_space(handle, inode, first_block,
4007 stop_block);
26a4c0c6 4008
eee597ac
LC
4009 up_write(&EXT4_I(inode)->i_data_sem);
4010 }
a80f7fcf 4011 ext4_fc_track_range(handle, inode, first_block, stop_block);
26a4c0c6
TT
4012 if (IS_SYNC(inode))
4013 ext4_handle_sync(handle);
e251f9bc 4014
eeca7ea1 4015 inode->i_mtime = inode->i_ctime = current_time(inode);
4209ae12
HS
4016 ret2 = ext4_mark_inode_dirty(handle, inode);
4017 if (unlikely(ret2))
4018 ret = ret2;
67a7d5f5
JK
4019 if (ret >= 0)
4020 ext4_update_inode_fsync_trans(handle, inode, 1);
26a4c0c6
TT
4021out_stop:
4022 ext4_journal_stop(handle);
4023out_dio:
d4f5258e 4024 filemap_invalidate_unlock(mapping);
26a4c0c6 4025out_mutex:
5955102c 4026 inode_unlock(inode);
26a4c0c6 4027 return ret;
a4bb6b64
AH
4028}
4029
a361293f
JK
4030int ext4_inode_attach_jinode(struct inode *inode)
4031{
4032 struct ext4_inode_info *ei = EXT4_I(inode);
4033 struct jbd2_inode *jinode;
4034
4035 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4036 return 0;
4037
4038 jinode = jbd2_alloc_inode(GFP_KERNEL);
4039 spin_lock(&inode->i_lock);
4040 if (!ei->jinode) {
4041 if (!jinode) {
4042 spin_unlock(&inode->i_lock);
4043 return -ENOMEM;
4044 }
4045 ei->jinode = jinode;
4046 jbd2_journal_init_jbd_inode(ei->jinode, inode);
4047 jinode = NULL;
4048 }
4049 spin_unlock(&inode->i_lock);
4050 if (unlikely(jinode != NULL))
4051 jbd2_free_inode(jinode);
4052 return 0;
4053}
4054
ac27a0ec 4055/*
617ba13b 4056 * ext4_truncate()
ac27a0ec 4057 *
617ba13b
MC
4058 * We block out ext4_get_block() block instantiations across the entire
4059 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
ac27a0ec
DK
4060 * simultaneously on behalf of the same inode.
4061 *
42b2aa86 4062 * As we work through the truncate and commit bits of it to the journal there
ac27a0ec
DK
4063 * is one core, guiding principle: the file's tree must always be consistent on
4064 * disk. We must be able to restart the truncate after a crash.
4065 *
4066 * The file's tree may be transiently inconsistent in memory (although it
4067 * probably isn't), but whenever we close off and commit a journal transaction,
4068 * the contents of (the filesystem + the journal) must be consistent and
4069 * restartable. It's pretty simple, really: bottom up, right to left (although
4070 * left-to-right works OK too).
4071 *
4072 * Note that at recovery time, journal replay occurs *before* the restart of
4073 * truncate against the orphan inode list.
4074 *
4075 * The committed inode has the new, desired i_size (which is the same as
617ba13b 4076 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
ac27a0ec 4077 * that this inode's truncate did not complete and it will again call
617ba13b
MC
4078 * ext4_truncate() to have another go. So there will be instantiated blocks
4079 * to the right of the truncation point in a crashed ext4 filesystem. But
ac27a0ec 4080 * that's fine - as long as they are linked from the inode, the post-crash
617ba13b 4081 * ext4_truncate() run will find them and release them.
ac27a0ec 4082 */
2c98eb5e 4083int ext4_truncate(struct inode *inode)
ac27a0ec 4084{
819c4920
TT
4085 struct ext4_inode_info *ei = EXT4_I(inode);
4086 unsigned int credits;
4209ae12 4087 int err = 0, err2;
819c4920
TT
4088 handle_t *handle;
4089 struct address_space *mapping = inode->i_mapping;
819c4920 4090
19b5ef61
TT
4091 /*
4092 * There is a possibility that we're either freeing the inode
e04027e8 4093 * or it's a completely new inode. In those cases we might not
f340b3d9 4094 * have i_rwsem locked because it's not necessary.
19b5ef61
TT
4095 */
4096 if (!(inode->i_state & (I_NEW|I_FREEING)))
5955102c 4097 WARN_ON(!inode_is_locked(inode));
0562e0ba
JZ
4098 trace_ext4_truncate_enter(inode);
4099
91ef4caf 4100 if (!ext4_can_truncate(inode))
9a5d265f 4101 goto out_trace;
ac27a0ec 4102
5534fb5b 4103 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
19f5fb7a 4104 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
7d8f9f7d 4105
aef1c851
TM
4106 if (ext4_has_inline_data(inode)) {
4107 int has_inline = 1;
4108
01daf945 4109 err = ext4_inline_data_truncate(inode, &has_inline);
9a5d265f 4110 if (err || has_inline)
4111 goto out_trace;
aef1c851
TM
4112 }
4113
a361293f
JK
4114 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
4115 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
a71248b1
BL
4116 err = ext4_inode_attach_jinode(inode);
4117 if (err)
9a5d265f 4118 goto out_trace;
a361293f
JK
4119 }
4120
819c4920
TT
4121 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4122 credits = ext4_writepage_trans_blocks(inode);
4123 else
4124 credits = ext4_blocks_for_truncate(inode);
4125
4126 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
9a5d265f 4127 if (IS_ERR(handle)) {
4128 err = PTR_ERR(handle);
4129 goto out_trace;
4130 }
819c4920 4131
eb3544c6
LC
4132 if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4133 ext4_block_truncate_page(handle, mapping, inode->i_size);
819c4920
TT
4134
4135 /*
4136 * We add the inode to the orphan list, so that if this
4137 * truncate spans multiple transactions, and we crash, we will
4138 * resume the truncate when the filesystem recovers. It also
4139 * marks the inode dirty, to catch the new size.
4140 *
4141 * Implication: the file must always be in a sane, consistent
4142 * truncatable state while each transaction commits.
4143 */
2c98eb5e
TT
4144 err = ext4_orphan_add(handle, inode);
4145 if (err)
819c4920
TT
4146 goto out_stop;
4147
4148 down_write(&EXT4_I(inode)->i_data_sem);
4149
27bc446e 4150 ext4_discard_preallocations(inode, 0);
819c4920 4151
ff9893dc 4152 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
d0abb36d 4153 err = ext4_ext_truncate(handle, inode);
ff9893dc 4154 else
819c4920
TT
4155 ext4_ind_truncate(handle, inode);
4156
4157 up_write(&ei->i_data_sem);
d0abb36d
TT
4158 if (err)
4159 goto out_stop;
819c4920
TT
4160
4161 if (IS_SYNC(inode))
4162 ext4_handle_sync(handle);
4163
4164out_stop:
4165 /*
4166 * If this was a simple ftruncate() and the file will remain alive,
4167 * then we need to clear up the orphan record which we created above.
4168 * However, if this was a real unlink then we were called by
58d86a50 4169 * ext4_evict_inode(), and we allow that function to clean up the
819c4920
TT
4170 * orphan info for us.
4171 */
4172 if (inode->i_nlink)
4173 ext4_orphan_del(handle, inode);
4174
eeca7ea1 4175 inode->i_mtime = inode->i_ctime = current_time(inode);
4209ae12
HS
4176 err2 = ext4_mark_inode_dirty(handle, inode);
4177 if (unlikely(err2 && !err))
4178 err = err2;
819c4920 4179 ext4_journal_stop(handle);
ac27a0ec 4180
9a5d265f 4181out_trace:
0562e0ba 4182 trace_ext4_truncate_exit(inode);
2c98eb5e 4183 return err;
ac27a0ec
DK
4184}
4185
9a1bf32c
ZY
4186static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4187{
4188 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4189 return inode_peek_iversion_raw(inode);
4190 else
4191 return inode_peek_iversion(inode);
4192}
4193
4194static int ext4_inode_blocks_set(struct ext4_inode *raw_inode,
4195 struct ext4_inode_info *ei)
4196{
4197 struct inode *inode = &(ei->vfs_inode);
4198 u64 i_blocks = READ_ONCE(inode->i_blocks);
4199 struct super_block *sb = inode->i_sb;
4200
4201 if (i_blocks <= ~0U) {
4202 /*
4203 * i_blocks can be represented in a 32 bit variable
4204 * as multiple of 512 bytes
4205 */
4206 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4207 raw_inode->i_blocks_high = 0;
4208 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4209 return 0;
4210 }
4211
4212 /*
4213 * This should never happen since sb->s_maxbytes should not have
4214 * allowed this, sb->s_maxbytes was set according to the huge_file
4215 * feature in ext4_fill_super().
4216 */
4217 if (!ext4_has_feature_huge_file(sb))
4218 return -EFSCORRUPTED;
4219
4220 if (i_blocks <= 0xffffffffffffULL) {
4221 /*
4222 * i_blocks can be represented in a 48 bit variable
4223 * as multiple of 512 bytes
4224 */
4225 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4226 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4227 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4228 } else {
4229 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4230 /* i_block is stored in file system block size */
4231 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4232 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4233 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4234 }
4235 return 0;
4236}
4237
4238static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode)
4239{
4240 struct ext4_inode_info *ei = EXT4_I(inode);
4241 uid_t i_uid;
4242 gid_t i_gid;
4243 projid_t i_projid;
4244 int block;
4245 int err;
4246
4247 err = ext4_inode_blocks_set(raw_inode, ei);
4248
4249 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4250 i_uid = i_uid_read(inode);
4251 i_gid = i_gid_read(inode);
4252 i_projid = from_kprojid(&init_user_ns, ei->i_projid);
4253 if (!(test_opt(inode->i_sb, NO_UID32))) {
4254 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4255 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4256 /*
4257 * Fix up interoperability with old kernels. Otherwise,
4258 * old inodes get re-used with the upper 16 bits of the
4259 * uid/gid intact.
4260 */
4261 if (ei->i_dtime && list_empty(&ei->i_orphan)) {
4262 raw_inode->i_uid_high = 0;
4263 raw_inode->i_gid_high = 0;
4264 } else {
4265 raw_inode->i_uid_high =
4266 cpu_to_le16(high_16_bits(i_uid));
4267 raw_inode->i_gid_high =
4268 cpu_to_le16(high_16_bits(i_gid));
4269 }
4270 } else {
4271 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4272 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4273 raw_inode->i_uid_high = 0;
4274 raw_inode->i_gid_high = 0;
4275 }
4276 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4277
4278 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4279 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4280 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4281 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4282
4283 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4284 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4285 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4286 raw_inode->i_file_acl_high =
4287 cpu_to_le16(ei->i_file_acl >> 32);
4288 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4289 ext4_isize_set(raw_inode, ei->i_disksize);
4290
4291 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4292 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4293 if (old_valid_dev(inode->i_rdev)) {
4294 raw_inode->i_block[0] =
4295 cpu_to_le32(old_encode_dev(inode->i_rdev));
4296 raw_inode->i_block[1] = 0;
4297 } else {
4298 raw_inode->i_block[0] = 0;
4299 raw_inode->i_block[1] =
4300 cpu_to_le32(new_encode_dev(inode->i_rdev));
4301 raw_inode->i_block[2] = 0;
4302 }
4303 } else if (!ext4_has_inline_data(inode)) {
4304 for (block = 0; block < EXT4_N_BLOCKS; block++)
4305 raw_inode->i_block[block] = ei->i_data[block];
4306 }
4307
4308 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4309 u64 ivers = ext4_inode_peek_iversion(inode);
4310
4311 raw_inode->i_disk_version = cpu_to_le32(ivers);
4312 if (ei->i_extra_isize) {
4313 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4314 raw_inode->i_version_hi =
4315 cpu_to_le32(ivers >> 32);
4316 raw_inode->i_extra_isize =
4317 cpu_to_le16(ei->i_extra_isize);
4318 }
4319 }
4320
4321 if (i_projid != EXT4_DEF_PROJID &&
4322 !ext4_has_feature_project(inode->i_sb))
4323 err = err ?: -EFSCORRUPTED;
4324
4325 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4326 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4327 raw_inode->i_projid = cpu_to_le32(i_projid);
4328
4329 ext4_inode_csum_set(inode, raw_inode, ei);
4330 return err;
4331}
4332
ac27a0ec 4333/*
617ba13b 4334 * ext4_get_inode_loc returns with an extra refcount against the inode's
de01f484
ZY
4335 * underlying buffer_head on success. If we pass 'inode' and it does not
4336 * have in-inode xattr, we have all inode data in memory that is needed
4337 * to recreate the on-disk version of this inode.
ac27a0ec 4338 */
8016e29f 4339static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
de01f484 4340 struct inode *inode, struct ext4_iloc *iloc,
8016e29f 4341 ext4_fsblk_t *ret_block)
ac27a0ec 4342{
240799cd
TT
4343 struct ext4_group_desc *gdp;
4344 struct buffer_head *bh;
240799cd 4345 ext4_fsblk_t block;
02f03c42 4346 struct blk_plug plug;
240799cd
TT
4347 int inodes_per_block, inode_offset;
4348
3a06d778 4349 iloc->bh = NULL;
8016e29f
HS
4350 if (ino < EXT4_ROOT_INO ||
4351 ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
6a797d27 4352 return -EFSCORRUPTED;
ac27a0ec 4353
8016e29f 4354 iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
240799cd
TT
4355 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4356 if (!gdp)
ac27a0ec
DK
4357 return -EIO;
4358
240799cd
TT
4359 /*
4360 * Figure out the offset within the block group inode table
4361 */
00d09882 4362 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
8016e29f 4363 inode_offset = ((ino - 1) %
240799cd 4364 EXT4_INODES_PER_GROUP(sb));
240799cd
TT
4365 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4366
eee22187
BL
4367 block = ext4_inode_table(sb, gdp);
4368 if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
4369 (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
4370 ext4_error(sb, "Invalid inode table block %llu in "
4371 "block_group %u", block, iloc->block_group);
4372 return -EFSCORRUPTED;
4373 }
4374 block += (inode_offset / inodes_per_block);
4375
240799cd 4376 bh = sb_getblk(sb, block);
aebf0243 4377 if (unlikely(!bh))
860d21e2 4378 return -ENOMEM;
8e33fadf
ZY
4379 if (ext4_buffer_uptodate(bh))
4380 goto has_buffer;
9c83a923 4381
8e33fadf 4382 lock_buffer(bh);
f2c77973
ZY
4383 if (ext4_buffer_uptodate(bh)) {
4384 /* Someone brought it uptodate while we waited */
4385 unlock_buffer(bh);
4386 goto has_buffer;
4387 }
4388
8e33fadf
ZY
4389 /*
4390 * If we have all information of the inode in memory and this
4391 * is the only valid inode in the block, we need not read the
4392 * block.
4393 */
de01f484 4394 if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
8e33fadf
ZY
4395 struct buffer_head *bitmap_bh;
4396 int i, start;
ac27a0ec 4397
8e33fadf 4398 start = inode_offset & ~(inodes_per_block - 1);
ac27a0ec 4399
8e33fadf
ZY
4400 /* Is the inode bitmap in cache? */
4401 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4402 if (unlikely(!bitmap_bh))
4403 goto make_io;
ac27a0ec 4404
8e33fadf
ZY
4405 /*
4406 * If the inode bitmap isn't in cache then the
4407 * optimisation may end up performing two reads instead
4408 * of one, so skip it.
4409 */
4410 if (!buffer_uptodate(bitmap_bh)) {
ac27a0ec 4411 brelse(bitmap_bh);
8e33fadf 4412 goto make_io;
ac27a0ec 4413 }
8e33fadf
ZY
4414 for (i = start; i < start + inodes_per_block; i++) {
4415 if (i == inode_offset)
4416 continue;
4417 if (ext4_test_bit(i, bitmap_bh->b_data))
4418 break;
ac27a0ec 4419 }
8e33fadf
ZY
4420 brelse(bitmap_bh);
4421 if (i == start + inodes_per_block) {
de01f484
ZY
4422 struct ext4_inode *raw_inode =
4423 (struct ext4_inode *) (bh->b_data + iloc->offset);
4424
8e33fadf
ZY
4425 /* all other inodes are free, so skip I/O */
4426 memset(bh->b_data, 0, bh->b_size);
de01f484
ZY
4427 if (!ext4_test_inode_state(inode, EXT4_STATE_NEW))
4428 ext4_fill_raw_inode(inode, raw_inode);
8e33fadf
ZY
4429 set_buffer_uptodate(bh);
4430 unlock_buffer(bh);
4431 goto has_buffer;
4432 }
4433 }
ac27a0ec
DK
4434
4435make_io:
8e33fadf
ZY
4436 /*
4437 * If we need to do any I/O, try to pre-readahead extra
4438 * blocks from the inode table.
4439 */
4440 blk_start_plug(&plug);
4441 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4442 ext4_fsblk_t b, end, table;
4443 unsigned num;
4444 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4445
4446 table = ext4_inode_table(sb, gdp);
4447 /* s_inode_readahead_blks is always a power of 2 */
4448 b = block & ~((ext4_fsblk_t) ra_blks - 1);
4449 if (table > b)
4450 b = table;
4451 end = b + ra_blks;
4452 num = EXT4_INODES_PER_GROUP(sb);
4453 if (ext4_has_group_desc_csum(sb))
4454 num -= ext4_itable_unused_count(sb, gdp);
4455 table += num / inodes_per_block;
4456 if (end > table)
4457 end = table;
4458 while (b <= end)
4459 ext4_sb_breadahead_unmovable(sb, b++);
4460 }
240799cd 4461
8e33fadf
ZY
4462 /*
4463 * There are other valid inodes in the buffer, this inode
4464 * has in-inode xattrs, or we don't have this inode in memory.
4465 * Read the block from disk.
4466 */
4467 trace_ext4_load_inode(sb, ino);
4468 ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
4469 blk_finish_plug(&plug);
4470 wait_on_buffer(bh);
4471 ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO);
4472 if (!buffer_uptodate(bh)) {
4473 if (ret_block)
4474 *ret_block = block;
4475 brelse(bh);
4476 return -EIO;
ac27a0ec
DK
4477 }
4478has_buffer:
4479 iloc->bh = bh;
4480 return 0;
4481}
4482
8016e29f
HS
4483static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4484 struct ext4_iloc *iloc)
4485{
c27c29c6 4486 ext4_fsblk_t err_blk = 0;
8016e29f
HS
4487 int ret;
4488
de01f484 4489 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
8016e29f
HS
4490 &err_blk);
4491
4492 if (ret == -EIO)
4493 ext4_error_inode_block(inode, err_blk, EIO,
4494 "unable to read itable block");
4495
4496 return ret;
4497}
4498
617ba13b 4499int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
ac27a0ec 4500{
c27c29c6 4501 ext4_fsblk_t err_blk = 0;
8016e29f
HS
4502 int ret;
4503
de01f484
ZY
4504 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
4505 &err_blk);
8016e29f
HS
4506
4507 if (ret == -EIO)
4508 ext4_error_inode_block(inode, err_blk, EIO,
4509 "unable to read itable block");
4510
4511 return ret;
4512}
4513
4514
4515int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
4516 struct ext4_iloc *iloc)
4517{
de01f484 4518 return __ext4_get_inode_loc(sb, ino, NULL, iloc, NULL);
ac27a0ec
DK
4519}
4520
a8ab6d38 4521static bool ext4_should_enable_dax(struct inode *inode)
6642586b 4522{
a8ab6d38
IW
4523 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4524
9cb20f94 4525 if (test_opt2(inode->i_sb, DAX_NEVER))
6642586b
RZ
4526 return false;
4527 if (!S_ISREG(inode->i_mode))
4528 return false;
4529 if (ext4_should_journal_data(inode))
4530 return false;
4531 if (ext4_has_inline_data(inode))
4532 return false;
592ddec7 4533 if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
6642586b 4534 return false;
c93d8f88
EB
4535 if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4536 return false;
a8ab6d38
IW
4537 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
4538 return false;
4539 if (test_opt(inode->i_sb, DAX_ALWAYS))
4540 return true;
4541
b383a73f 4542 return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
6642586b
RZ
4543}
4544
043546e4 4545void ext4_set_inode_flags(struct inode *inode, bool init)
ac27a0ec 4546{
617ba13b 4547 unsigned int flags = EXT4_I(inode)->i_flags;
00a1a053 4548 unsigned int new_fl = 0;
ac27a0ec 4549
043546e4
IW
4550 WARN_ON_ONCE(IS_DAX(inode) && init);
4551
617ba13b 4552 if (flags & EXT4_SYNC_FL)
00a1a053 4553 new_fl |= S_SYNC;
617ba13b 4554 if (flags & EXT4_APPEND_FL)
00a1a053 4555 new_fl |= S_APPEND;
617ba13b 4556 if (flags & EXT4_IMMUTABLE_FL)
00a1a053 4557 new_fl |= S_IMMUTABLE;
617ba13b 4558 if (flags & EXT4_NOATIME_FL)
00a1a053 4559 new_fl |= S_NOATIME;
617ba13b 4560 if (flags & EXT4_DIRSYNC_FL)
00a1a053 4561 new_fl |= S_DIRSYNC;
043546e4
IW
4562
4563 /* Because of the way inode_set_flags() works we must preserve S_DAX
4564 * here if already set. */
4565 new_fl |= (inode->i_flags & S_DAX);
4566 if (init && ext4_should_enable_dax(inode))
923ae0ff 4567 new_fl |= S_DAX;
043546e4 4568
2ee6a576
EB
4569 if (flags & EXT4_ENCRYPT_FL)
4570 new_fl |= S_ENCRYPTED;
b886ee3e
GKB
4571 if (flags & EXT4_CASEFOLD_FL)
4572 new_fl |= S_CASEFOLD;
c93d8f88
EB
4573 if (flags & EXT4_VERITY_FL)
4574 new_fl |= S_VERITY;
5f16f322 4575 inode_set_flags(inode, new_fl,
2ee6a576 4576 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
c93d8f88 4577 S_ENCRYPTED|S_CASEFOLD|S_VERITY);
ac27a0ec
DK
4578}
4579
0fc1b451 4580static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
de9a55b8 4581 struct ext4_inode_info *ei)
0fc1b451
AK
4582{
4583 blkcnt_t i_blocks ;
8180a562
AK
4584 struct inode *inode = &(ei->vfs_inode);
4585 struct super_block *sb = inode->i_sb;
0fc1b451 4586
e2b911c5 4587 if (ext4_has_feature_huge_file(sb)) {
0fc1b451
AK
4588 /* we are using combined 48 bit field */
4589 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4590 le32_to_cpu(raw_inode->i_blocks_lo);
07a03824 4591 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
8180a562
AK
4592 /* i_blocks represent file system block size */
4593 return i_blocks << (inode->i_blkbits - 9);
4594 } else {
4595 return i_blocks;
4596 }
0fc1b451
AK
4597 } else {
4598 return le32_to_cpu(raw_inode->i_blocks_lo);
4599 }
4600}
ff9ddf7e 4601
eb9b5f01 4602static inline int ext4_iget_extra_inode(struct inode *inode,
152a7b0a
TM
4603 struct ext4_inode *raw_inode,
4604 struct ext4_inode_info *ei)
4605{
4606 __le32 *magic = (void *)raw_inode +
4607 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
eb9b5f01 4608
fd7e672e 4609 if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
290ab230 4610 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
1dcdce59
YB
4611 int err;
4612
152a7b0a 4613 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
1dcdce59
YB
4614 err = ext4_find_inline_data_nolock(inode);
4615 if (!err && ext4_has_inline_data(inode))
4616 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4617 return err;
f19d5870
TM
4618 } else
4619 EXT4_I(inode)->i_inline_off = 0;
eb9b5f01 4620 return 0;
152a7b0a
TM
4621}
4622
040cb378
LX
4623int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4624{
0b7b7779 4625 if (!ext4_has_feature_project(inode->i_sb))
040cb378
LX
4626 return -EOPNOTSUPP;
4627 *projid = EXT4_I(inode)->i_projid;
4628 return 0;
4629}
4630
e254d1af
EG
4631/*
4632 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4633 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4634 * set.
4635 */
4636static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4637{
4638 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4639 inode_set_iversion_raw(inode, val);
4640 else
4641 inode_set_iversion_queried(inode, val);
4642}
e254d1af 4643
8a363970
TT
4644struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4645 ext4_iget_flags flags, const char *function,
4646 unsigned int line)
ac27a0ec 4647{
617ba13b
MC
4648 struct ext4_iloc iloc;
4649 struct ext4_inode *raw_inode;
1d1fe1ee 4650 struct ext4_inode_info *ei;
bd2c38cf 4651 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1d1fe1ee 4652 struct inode *inode;
b436b9be 4653 journal_t *journal = EXT4_SB(sb)->s_journal;
1d1fe1ee 4654 long ret;
7e6e1ef4 4655 loff_t size;
ac27a0ec 4656 int block;
08cefc7a
EB
4657 uid_t i_uid;
4658 gid_t i_gid;
040cb378 4659 projid_t i_projid;
ac27a0ec 4660
191ce178 4661 if ((!(flags & EXT4_IGET_SPECIAL) &&
bd2c38cf
JK
4662 ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ||
4663 ino == le32_to_cpu(es->s_usr_quota_inum) ||
4664 ino == le32_to_cpu(es->s_grp_quota_inum) ||
02f310fc
JK
4665 ino == le32_to_cpu(es->s_prj_quota_inum) ||
4666 ino == le32_to_cpu(es->s_orphan_file_inum))) ||
8a363970 4667 (ino < EXT4_ROOT_INO) ||
bd2c38cf 4668 (ino > le32_to_cpu(es->s_inodes_count))) {
8a363970
TT
4669 if (flags & EXT4_IGET_HANDLE)
4670 return ERR_PTR(-ESTALE);
014c9caa 4671 __ext4_error(sb, function, line, false, EFSCORRUPTED, 0,
8a363970
TT
4672 "inode #%lu: comm %s: iget: illegal inode #",
4673 ino, current->comm);
4674 return ERR_PTR(-EFSCORRUPTED);
4675 }
4676
1d1fe1ee
DH
4677 inode = iget_locked(sb, ino);
4678 if (!inode)
4679 return ERR_PTR(-ENOMEM);
4680 if (!(inode->i_state & I_NEW))
4681 return inode;
4682
4683 ei = EXT4_I(inode);
7dc57615 4684 iloc.bh = NULL;
ac27a0ec 4685
8016e29f 4686 ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
1d1fe1ee 4687 if (ret < 0)
ac27a0ec 4688 goto bad_inode;
617ba13b 4689 raw_inode = ext4_raw_inode(&iloc);
814525f4 4690
8a363970
TT
4691 if ((flags & EXT4_IGET_HANDLE) &&
4692 (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4693 ret = -ESTALE;
4694 goto bad_inode;
4695 }
4696
814525f4
DW
4697 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4698 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4699 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2dc8d9e1
EB
4700 EXT4_INODE_SIZE(inode->i_sb) ||
4701 (ei->i_extra_isize & 3)) {
8a363970
TT
4702 ext4_error_inode(inode, function, line, 0,
4703 "iget: bad extra_isize %u "
4704 "(inode size %u)",
2dc8d9e1
EB
4705 ei->i_extra_isize,
4706 EXT4_INODE_SIZE(inode->i_sb));
6a797d27 4707 ret = -EFSCORRUPTED;
814525f4
DW
4708 goto bad_inode;
4709 }
4710 } else
4711 ei->i_extra_isize = 0;
4712
4713 /* Precompute checksum seed for inode metadata */
9aa5d32b 4714 if (ext4_has_metadata_csum(sb)) {
814525f4
DW
4715 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4716 __u32 csum;
4717 __le32 inum = cpu_to_le32(inode->i_ino);
4718 __le32 gen = raw_inode->i_generation;
4719 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4720 sizeof(inum));
4721 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4722 sizeof(gen));
4723 }
4724
8016e29f
HS
4725 if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4726 ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
4727 (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
4728 ext4_error_inode_err(inode, function, line, 0,
4729 EFSBADCRC, "iget: checksum invalid");
6a797d27 4730 ret = -EFSBADCRC;
814525f4
DW
4731 goto bad_inode;
4732 }
4733
ac27a0ec 4734 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
08cefc7a
EB
4735 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4736 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
0b7b7779 4737 if (ext4_has_feature_project(sb) &&
040cb378
LX
4738 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4739 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4740 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4741 else
4742 i_projid = EXT4_DEF_PROJID;
4743
af5bc92d 4744 if (!(test_opt(inode->i_sb, NO_UID32))) {
08cefc7a
EB
4745 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4746 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
ac27a0ec 4747 }
08cefc7a
EB
4748 i_uid_write(inode, i_uid);
4749 i_gid_write(inode, i_gid);
040cb378 4750 ei->i_projid = make_kprojid(&init_user_ns, i_projid);
bfe86848 4751 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
ac27a0ec 4752
353eb83c 4753 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
67cf5b09 4754 ei->i_inline_off = 0;
ac27a0ec
DK
4755 ei->i_dir_start_lookup = 0;
4756 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4757 /* We now have enough fields to check if the inode was active or not.
4758 * This is needed because nfsd might try to access dead inodes
4759 * the test is that same one that e2fsck uses
4760 * NeilBrown 1999oct15
4761 */
4762 if (inode->i_nlink == 0) {
5cd74028 4763 if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
393d1d1d
DTB
4764 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4765 ino != EXT4_BOOT_LOADER_INO) {
5cd74028
BL
4766 /* this inode is deleted or unallocated */
4767 if (flags & EXT4_IGET_SPECIAL) {
4768 ext4_error_inode(inode, function, line, 0,
4769 "iget: special inode unallocated");
4770 ret = -EFSCORRUPTED;
4771 } else
4772 ret = -ESTALE;
ac27a0ec
DK
4773 goto bad_inode;
4774 }
4775 /* The only unlinked inodes we let through here have
4776 * valid i_mode and are being read by the orphan
4777 * recovery code: that's fine, we're about to complete
393d1d1d
DTB
4778 * the process of deleting those.
4779 * OR it is the EXT4_BOOT_LOADER_INO which is
4780 * not initialized on a new filesystem. */
ac27a0ec 4781 }
ac27a0ec 4782 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
043546e4 4783 ext4_set_inode_flags(inode, true);
0fc1b451 4784 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
7973c0c1 4785 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
e2b911c5 4786 if (ext4_has_feature_64bit(sb))
a1ddeb7e
BP
4787 ei->i_file_acl |=
4788 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
e08ac99f 4789 inode->i_size = ext4_isize(sb, raw_inode);
7e6e1ef4 4790 if ((size = i_size_read(inode)) < 0) {
8a363970
TT
4791 ext4_error_inode(inode, function, line, 0,
4792 "iget: bad i_size value: %lld", size);
7e6e1ef4
DW
4793 ret = -EFSCORRUPTED;
4794 goto bad_inode;
4795 }
48a34311
JK
4796 /*
4797 * If dir_index is not enabled but there's dir with INDEX flag set,
4798 * we'd normally treat htree data as empty space. But with metadata
4799 * checksumming that corrupts checksums so forbid that.
4800 */
4801 if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
4802 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4803 ext4_error_inode(inode, function, line, 0,
4804 "iget: Dir with htree data on filesystem without dir_index feature.");
4805 ret = -EFSCORRUPTED;
4806 goto bad_inode;
4807 }
ac27a0ec 4808 ei->i_disksize = inode->i_size;
a9e7f447
DM
4809#ifdef CONFIG_QUOTA
4810 ei->i_reserved_quota = 0;
4811#endif
ac27a0ec
DK
4812 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4813 ei->i_block_group = iloc.block_group;
a4912123 4814 ei->i_last_alloc_group = ~0;
ac27a0ec
DK
4815 /*
4816 * NOTE! The in-memory inode i_data array is in little-endian order
4817 * even on big-endian machines: we do NOT byteswap the block numbers!
4818 */
617ba13b 4819 for (block = 0; block < EXT4_N_BLOCKS; block++)
ac27a0ec
DK
4820 ei->i_data[block] = raw_inode->i_block[block];
4821 INIT_LIST_HEAD(&ei->i_orphan);
aa75f4d3 4822 ext4_fc_init_inode(&ei->vfs_inode);
ac27a0ec 4823
b436b9be
JK
4824 /*
4825 * Set transaction id's of transactions that have to be committed
4826 * to finish f[data]sync. We set them to currently running transaction
4827 * as we cannot be sure that the inode or some of its metadata isn't
4828 * part of the transaction - the inode could have been reclaimed and
4829 * now it is reread from disk.
4830 */
4831 if (journal) {
4832 transaction_t *transaction;
4833 tid_t tid;
4834
a931da6a 4835 read_lock(&journal->j_state_lock);
b436b9be
JK
4836 if (journal->j_running_transaction)
4837 transaction = journal->j_running_transaction;
4838 else
4839 transaction = journal->j_committing_transaction;
4840 if (transaction)
4841 tid = transaction->t_tid;
4842 else
4843 tid = journal->j_commit_sequence;
a931da6a 4844 read_unlock(&journal->j_state_lock);
b436b9be
JK
4845 ei->i_sync_tid = tid;
4846 ei->i_datasync_tid = tid;
4847 }
4848
0040d987 4849 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ac27a0ec
DK
4850 if (ei->i_extra_isize == 0) {
4851 /* The extra space is currently unused. Use it. */
2dc8d9e1 4852 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
617ba13b
MC
4853 ei->i_extra_isize = sizeof(struct ext4_inode) -
4854 EXT4_GOOD_OLD_INODE_SIZE;
ac27a0ec 4855 } else {
eb9b5f01
TT
4856 ret = ext4_iget_extra_inode(inode, raw_inode, ei);
4857 if (ret)
4858 goto bad_inode;
ac27a0ec 4859 }
814525f4 4860 }
ac27a0ec 4861
ef7f3835
KS
4862 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4863 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4864 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4865 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4866
ed3654eb 4867 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
ee73f9a5
JL
4868 u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
4869
c4f65706
TT
4870 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4871 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
ee73f9a5 4872 ivers |=
c4f65706
TT
4873 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4874 }
e254d1af 4875 ext4_inode_set_iversion_queried(inode, ivers);
25ec56b5
JNC
4876 }
4877
c4b5a614 4878 ret = 0;
485c26ec 4879 if (ei->i_file_acl &&
ce9f24cc 4880 !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
8a363970
TT
4881 ext4_error_inode(inode, function, line, 0,
4882 "iget: bad extended attribute block %llu",
24676da4 4883 ei->i_file_acl);
6a797d27 4884 ret = -EFSCORRUPTED;
485c26ec 4885 goto bad_inode;
f19d5870 4886 } else if (!ext4_has_inline_data(inode)) {
bc716523 4887 /* validate the block references in the inode */
8016e29f
HS
4888 if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
4889 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4890 (S_ISLNK(inode->i_mode) &&
4891 !ext4_inode_is_fast_symlink(inode)))) {
bc716523 4892 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
f19d5870 4893 ret = ext4_ext_check_inode(inode);
bc716523
LS
4894 else
4895 ret = ext4_ind_check_inode(inode);
f19d5870 4896 }
fe2c8191 4897 }
567f3e9a 4898 if (ret)
de9a55b8 4899 goto bad_inode;
7a262f7c 4900
ac27a0ec 4901 if (S_ISREG(inode->i_mode)) {
617ba13b 4902 inode->i_op = &ext4_file_inode_operations;
be64f884 4903 inode->i_fop = &ext4_file_operations;
617ba13b 4904 ext4_set_aops(inode);
ac27a0ec 4905 } else if (S_ISDIR(inode->i_mode)) {
617ba13b
MC
4906 inode->i_op = &ext4_dir_inode_operations;
4907 inode->i_fop = &ext4_dir_operations;
ac27a0ec 4908 } else if (S_ISLNK(inode->i_mode)) {
6390d33b
LR
4909 /* VFS does not allow setting these so must be corruption */
4910 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
8a363970
TT
4911 ext4_error_inode(inode, function, line, 0,
4912 "iget: immutable or append flags "
4913 "not allowed on symlinks");
6390d33b
LR
4914 ret = -EFSCORRUPTED;
4915 goto bad_inode;
4916 }
592ddec7 4917 if (IS_ENCRYPTED(inode)) {
a7a67e8a 4918 inode->i_op = &ext4_encrypted_symlink_inode_operations;
a7a67e8a 4919 } else if (ext4_inode_is_fast_symlink(inode)) {
75e7566b 4920 inode->i_link = (char *)ei->i_data;
617ba13b 4921 inode->i_op = &ext4_fast_symlink_inode_operations;
e83c1397
DG
4922 nd_terminate_link(ei->i_data, inode->i_size,
4923 sizeof(ei->i_data) - 1);
4924 } else {
617ba13b 4925 inode->i_op = &ext4_symlink_inode_operations;
ac27a0ec 4926 }
563bdd61
TT
4927 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4928 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
617ba13b 4929 inode->i_op = &ext4_special_inode_operations;
ac27a0ec
DK
4930 if (raw_inode->i_block[0])
4931 init_special_inode(inode, inode->i_mode,
4932 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4933 else
4934 init_special_inode(inode, inode->i_mode,
4935 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
393d1d1d
DTB
4936 } else if (ino == EXT4_BOOT_LOADER_INO) {
4937 make_bad_inode(inode);
563bdd61 4938 } else {
6a797d27 4939 ret = -EFSCORRUPTED;
8a363970
TT
4940 ext4_error_inode(inode, function, line, 0,
4941 "iget: bogus i_mode (%o)", inode->i_mode);
563bdd61 4942 goto bad_inode;
ac27a0ec 4943 }
6456ca65
TT
4944 if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
4945 ext4_error_inode(inode, function, line, 0,
4946 "casefold flag without casefold feature");
63b1e9bc
BL
4947 if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) {
4948 ext4_error_inode(inode, function, line, 0,
4949 "bad inode without EXT4_IGET_BAD flag");
4950 ret = -EUCLEAN;
4951 goto bad_inode;
4952 }
dec214d0 4953
63b1e9bc 4954 brelse(iloc.bh);
1d1fe1ee
DH
4955 unlock_new_inode(inode);
4956 return inode;
ac27a0ec
DK
4957
4958bad_inode:
567f3e9a 4959 brelse(iloc.bh);
1d1fe1ee
DH
4960 iget_failed(inode);
4961 return ERR_PTR(ret);
ac27a0ec
DK
4962}
4963
3f19b2ab
DH
4964static void __ext4_update_other_inode_time(struct super_block *sb,
4965 unsigned long orig_ino,
4966 unsigned long ino,
4967 struct ext4_inode *raw_inode)
a26f4992 4968{
3f19b2ab
DH
4969 struct inode *inode;
4970
4971 inode = find_inode_by_ino_rcu(sb, ino);
4972 if (!inode)
4973 return;
a26f4992 4974
ed296c6c 4975 if (!inode_is_dirtytime_only(inode))
3f19b2ab
DH
4976 return;
4977
a26f4992 4978 spin_lock(&inode->i_lock);
ed296c6c 4979 if (inode_is_dirtytime_only(inode)) {
a26f4992
TT
4980 struct ext4_inode_info *ei = EXT4_I(inode);
4981
5fcd5750 4982 inode->i_state &= ~I_DIRTY_TIME;
a26f4992
TT
4983 spin_unlock(&inode->i_lock);
4984
4985 spin_lock(&ei->i_raw_lock);
3f19b2ab
DH
4986 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4987 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4988 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4989 ext4_inode_csum_set(inode, raw_inode, ei);
a26f4992 4990 spin_unlock(&ei->i_raw_lock);
3f19b2ab
DH
4991 trace_ext4_other_inode_update_time(inode, orig_ino);
4992 return;
a26f4992
TT
4993 }
4994 spin_unlock(&inode->i_lock);
a26f4992
TT
4995}
4996
4997/*
4998 * Opportunistically update the other time fields for other inodes in
4999 * the same inode table block.
5000 */
5001static void ext4_update_other_inodes_time(struct super_block *sb,
5002 unsigned long orig_ino, char *buf)
5003{
a26f4992
TT
5004 unsigned long ino;
5005 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5006 int inode_size = EXT4_INODE_SIZE(sb);
5007
0f0ff9a9
TT
5008 /*
5009 * Calculate the first inode in the inode table block. Inode
5010 * numbers are one-based. That is, the first inode in a block
5011 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5012 */
5013 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
3f19b2ab 5014 rcu_read_lock();
a26f4992
TT
5015 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5016 if (ino == orig_ino)
5017 continue;
3f19b2ab
DH
5018 __ext4_update_other_inode_time(sb, orig_ino, ino,
5019 (struct ext4_inode *)buf);
a26f4992 5020 }
3f19b2ab 5021 rcu_read_unlock();
a26f4992
TT
5022}
5023
664bd38b
ZY
5024/*
5025 * Post the struct inode info into an on-disk inode location in the
5026 * buffer-cache. This gobbles the caller's reference to the
5027 * buffer_head in the inode location struct.
5028 *
5029 * The caller must have write access to iloc->bh.
5030 */
5031static int ext4_do_update_inode(handle_t *handle,
5032 struct inode *inode,
5033 struct ext4_iloc *iloc)
5034{
5035 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5036 struct ext4_inode_info *ei = EXT4_I(inode);
5037 struct buffer_head *bh = iloc->bh;
5038 struct super_block *sb = inode->i_sb;
5039 int err;
5040 int need_datasync = 0, set_large_file = 0;
5041
5042 spin_lock(&ei->i_raw_lock);
5043
5044 /*
5045 * For fields not tracked in the in-memory inode, initialise them
5046 * to zero for new inodes.
5047 */
5048 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5049 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5050
5051 if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode))
5052 need_datasync = 1;
5053 if (ei->i_disksize > 0x7fffffffULL) {
5054 if (!ext4_has_feature_large_file(sb) ||
5055 EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV))
5056 set_large_file = 1;
5057 }
5058
5059 err = ext4_fill_raw_inode(inode, raw_inode);
202ee5df 5060 spin_unlock(&ei->i_raw_lock);
baaae979
ZY
5061 if (err) {
5062 EXT4_ERROR_INODE(inode, "corrupted inode contents");
5063 goto out_brelse;
5064 }
5065
1751e8a6 5066 if (inode->i_sb->s_flags & SB_LAZYTIME)
a26f4992
TT
5067 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5068 bh->b_data);
202ee5df 5069
830156c7 5070 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
7d8bd3c7
SL
5071 err = ext4_handle_dirty_metadata(handle, NULL, bh);
5072 if (err)
baaae979 5073 goto out_error;
19f5fb7a 5074 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
202ee5df 5075 if (set_large_file) {
5d601255 5076 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
188c299e
JK
5077 err = ext4_journal_get_write_access(handle, sb,
5078 EXT4_SB(sb)->s_sbh,
5079 EXT4_JTR_NONE);
202ee5df 5080 if (err)
baaae979 5081 goto out_error;
05c2c00f 5082 lock_buffer(EXT4_SB(sb)->s_sbh);
e2b911c5 5083 ext4_set_feature_large_file(sb);
05c2c00f
JK
5084 ext4_superblock_csum_set(sb);
5085 unlock_buffer(EXT4_SB(sb)->s_sbh);
202ee5df 5086 ext4_handle_sync(handle);
a3f5cf14
JK
5087 err = ext4_handle_dirty_metadata(handle, NULL,
5088 EXT4_SB(sb)->s_sbh);
202ee5df 5089 }
b71fc079 5090 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
baaae979
ZY
5091out_error:
5092 ext4_std_error(inode->i_sb, err);
ac27a0ec 5093out_brelse:
af5bc92d 5094 brelse(bh);
ac27a0ec
DK
5095 return err;
5096}
5097
5098/*
617ba13b 5099 * ext4_write_inode()
ac27a0ec
DK
5100 *
5101 * We are called from a few places:
5102 *
87f7e416 5103 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
ac27a0ec 5104 * Here, there will be no transaction running. We wait for any running
4907cb7b 5105 * transaction to commit.
ac27a0ec 5106 *
87f7e416
TT
5107 * - Within flush work (sys_sync(), kupdate and such).
5108 * We wait on commit, if told to.
ac27a0ec 5109 *
87f7e416
TT
5110 * - Within iput_final() -> write_inode_now()
5111 * We wait on commit, if told to.
ac27a0ec
DK
5112 *
5113 * In all cases it is actually safe for us to return without doing anything,
5114 * because the inode has been copied into a raw inode buffer in
87f7e416
TT
5115 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL
5116 * writeback.
ac27a0ec
DK
5117 *
5118 * Note that we are absolutely dependent upon all inode dirtiers doing the
5119 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5120 * which we are interested.
5121 *
5122 * It would be a bug for them to not do this. The code:
5123 *
5124 * mark_inode_dirty(inode)
5125 * stuff();
5126 * inode->i_size = expr;
5127 *
87f7e416
TT
5128 * is in error because write_inode() could occur while `stuff()' is running,
5129 * and the new i_size will be lost. Plus the inode will no longer be on the
5130 * superblock's dirty inode list.
ac27a0ec 5131 */
a9185b41 5132int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
ac27a0ec 5133{
91ac6f43
FM
5134 int err;
5135
18f2c4fc
TT
5136 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
5137 sb_rdonly(inode->i_sb))
ac27a0ec
DK
5138 return 0;
5139
18f2c4fc
TT
5140 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5141 return -EIO;
5142
91ac6f43
FM
5143 if (EXT4_SB(inode->i_sb)->s_journal) {
5144 if (ext4_journal_current_handle()) {
4978c659 5145 ext4_debug("called recursively, non-PF_MEMALLOC!\n");
91ac6f43
FM
5146 dump_stack();
5147 return -EIO;
5148 }
ac27a0ec 5149
10542c22
JK
5150 /*
5151 * No need to force transaction in WB_SYNC_NONE mode. Also
5152 * ext4_sync_fs() will force the commit after everything is
5153 * written.
5154 */
5155 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
91ac6f43
FM
5156 return 0;
5157
aa75f4d3 5158 err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
18f2c4fc 5159 EXT4_I(inode)->i_sync_tid);
91ac6f43
FM
5160 } else {
5161 struct ext4_iloc iloc;
ac27a0ec 5162
8016e29f 5163 err = __ext4_get_inode_loc_noinmem(inode, &iloc);
91ac6f43
FM
5164 if (err)
5165 return err;
10542c22
JK
5166 /*
5167 * sync(2) will flush the whole buffer cache. No need to do
5168 * it here separately for each inode.
5169 */
5170 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
830156c7
FM
5171 sync_dirty_buffer(iloc.bh);
5172 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
54d3adbc
TT
5173 ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5174 "IO error syncing inode");
830156c7
FM
5175 err = -EIO;
5176 }
fd2dd9fb 5177 brelse(iloc.bh);
91ac6f43
FM
5178 }
5179 return err;
ac27a0ec
DK
5180}
5181
53e87268 5182/*
ccd16945
MWO
5183 * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
5184 * buffers that are attached to a folio straddling i_size and are undergoing
53e87268
JK
5185 * commit. In that case we have to wait for commit to finish and try again.
5186 */
5187static void ext4_wait_for_tail_page_commit(struct inode *inode)
5188{
53e87268
JK
5189 unsigned offset;
5190 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5191 tid_t commit_tid = 0;
5192 int ret;
5193
09cbfeaf 5194 offset = inode->i_size & (PAGE_SIZE - 1);
53e87268 5195 /*
ccd16945
MWO
5196 * If the folio is fully truncated, we don't need to wait for any commit
5197 * (and we even should not as __ext4_journalled_invalidate_folio() may
5198 * strip all buffers from the folio but keep the folio dirty which can then
3f079114 5199 * confuse e.g. concurrent ext4_writepages() seeing dirty folio without
565333a1 5200 * buffers). Also we don't need to wait for any commit if all buffers in
ccd16945 5201 * the folio remain valid. This is most beneficial for the common case of
565333a1 5202 * blocksize == PAGESIZE.
53e87268 5203 */
565333a1 5204 if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
53e87268
JK
5205 return;
5206 while (1) {
ccd16945 5207 struct folio *folio = filemap_lock_folio(inode->i_mapping,
09cbfeaf 5208 inode->i_size >> PAGE_SHIFT);
66dabbb6 5209 if (IS_ERR(folio))
53e87268 5210 return;
ccd16945
MWO
5211 ret = __ext4_journalled_invalidate_folio(folio, offset,
5212 folio_size(folio) - offset);
5213 folio_unlock(folio);
5214 folio_put(folio);
53e87268
JK
5215 if (ret != -EBUSY)
5216 return;
5217 commit_tid = 0;
5218 read_lock(&journal->j_state_lock);
5219 if (journal->j_committing_transaction)
5220 commit_tid = journal->j_committing_transaction->t_tid;
5221 read_unlock(&journal->j_state_lock);
5222 if (commit_tid)
5223 jbd2_log_wait_commit(journal, commit_tid);
5224 }
5225}
5226
ac27a0ec 5227/*
617ba13b 5228 * ext4_setattr()
ac27a0ec
DK
5229 *
5230 * Called from notify_change.
5231 *
5232 * We want to trap VFS attempts to truncate the file as soon as
5233 * possible. In particular, we want to make sure that when the VFS
5234 * shrinks i_size, we put the inode on the orphan list and modify
5235 * i_disksize immediately, so that during the subsequent flushing of
5236 * dirty pages and freeing of disk blocks, we can guarantee that any
5237 * commit will leave the blocks being flushed in an unused state on
5238 * disk. (On recovery, the inode will get truncated and the blocks will
5239 * be freed, so we have a strong guarantee that no future commit will
5240 * leave these blocks visible to the user.)
5241 *
678aaf48
JK
5242 * Another thing we have to assure is that if we are in ordered mode
5243 * and inode is still attached to the committing transaction, we must
5244 * we start writeout of all the dirty pages which are being truncated.
5245 * This way we are sure that all the data written in the previous
5246 * transaction are already on disk (truncate waits for pages under
5247 * writeback).
5248 *
f340b3d9 5249 * Called with inode->i_rwsem down.
ac27a0ec 5250 */
c1632a0f 5251int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
549c7297 5252 struct iattr *attr)
ac27a0ec 5253{
2b0143b5 5254 struct inode *inode = d_inode(dentry);
ac27a0ec 5255 int error, rc = 0;
3d287de3 5256 int orphan = 0;
ac27a0ec 5257 const unsigned int ia_valid = attr->ia_valid;
a642c2c0 5258 bool inc_ivers = true;
ac27a0ec 5259
0db1ff22
TT
5260 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5261 return -EIO;
5262
02b016ca
TT
5263 if (unlikely(IS_IMMUTABLE(inode)))
5264 return -EPERM;
5265
5266 if (unlikely(IS_APPEND(inode) &&
5267 (ia_valid & (ATTR_MODE | ATTR_UID |
5268 ATTR_GID | ATTR_TIMES_SET))))
5269 return -EPERM;
5270
c1632a0f 5271 error = setattr_prepare(idmap, dentry, attr);
ac27a0ec
DK
5272 if (error)
5273 return error;
5274
3ce2b8dd
EB
5275 error = fscrypt_prepare_setattr(dentry, attr);
5276 if (error)
5277 return error;
5278
c93d8f88
EB
5279 error = fsverity_prepare_setattr(dentry, attr);
5280 if (error)
5281 return error;
5282
f861646a 5283 if (is_quota_modification(idmap, inode, attr)) {
a7cdadee
JK
5284 error = dquot_initialize(inode);
5285 if (error)
5286 return error;
5287 }
2729cfdc 5288
0dbe12f2
CB
5289 if (i_uid_needs_update(idmap, attr, inode) ||
5290 i_gid_needs_update(idmap, attr, inode)) {
ac27a0ec
DK
5291 handle_t *handle;
5292
5293 /* (user+group)*(old+new) structure, inode write (sb,
5294 * inode block, ? - but truncate inode update has it) */
9924a92a
TT
5295 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5296 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5297 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
ac27a0ec
DK
5298 if (IS_ERR(handle)) {
5299 error = PTR_ERR(handle);
5300 goto err_out;
5301 }
7a9ca53a
TE
5302
5303 /* dquot_transfer() calls back ext4_get_inode_usage() which
5304 * counts xattr inode references.
5305 */
5306 down_read(&EXT4_I(inode)->xattr_sem);
f861646a 5307 error = dquot_transfer(idmap, inode, attr);
7a9ca53a
TE
5308 up_read(&EXT4_I(inode)->xattr_sem);
5309
ac27a0ec 5310 if (error) {
617ba13b 5311 ext4_journal_stop(handle);
ac27a0ec
DK
5312 return error;
5313 }
5314 /* Update corresponding info in inode so that everything is in
5315 * one transaction */
0dbe12f2
CB
5316 i_uid_update(idmap, attr, inode);
5317 i_gid_update(idmap, attr, inode);
617ba13b
MC
5318 error = ext4_mark_inode_dirty(handle, inode);
5319 ext4_journal_stop(handle);
512c15ef 5320 if (unlikely(error)) {
4209ae12 5321 return error;
512c15ef 5322 }
ac27a0ec
DK
5323 }
5324
3da40c7b 5325 if (attr->ia_valid & ATTR_SIZE) {
5208386c 5326 handle_t *handle;
3da40c7b 5327 loff_t oldsize = inode->i_size;
f4534c9f 5328 loff_t old_disksize;
b9c1c267 5329 int shrink = (attr->ia_size < inode->i_size);
562c72aa 5330
12e9b892 5331 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
e2b46574
ES
5332 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5333
aa75f4d3 5334 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
0c095c7f 5335 return -EFBIG;
aa75f4d3 5336 }
e2b46574 5337 }
aa75f4d3 5338 if (!S_ISREG(inode->i_mode)) {
3da40c7b 5339 return -EINVAL;
aa75f4d3 5340 }
dff6efc3 5341
a642c2c0
JL
5342 if (attr->ia_size == inode->i_size)
5343 inc_ivers = false;
dff6efc3 5344
b9c1c267
JK
5345 if (shrink) {
5346 if (ext4_should_order_data(inode)) {
5347 error = ext4_begin_ordered_truncate(inode,
678aaf48 5348 attr->ia_size);
b9c1c267
JK
5349 if (error)
5350 goto err_out;
5351 }
5352 /*
5353 * Blocks are going to be removed from the inode. Wait
5354 * for dio in flight.
5355 */
5356 inode_dio_wait(inode);
5357 }
5358
d4f5258e 5359 filemap_invalidate_lock(inode->i_mapping);
b9c1c267
JK
5360
5361 rc = ext4_break_layouts(inode);
5362 if (rc) {
d4f5258e 5363 filemap_invalidate_unlock(inode->i_mapping);
aa75f4d3 5364 goto err_out;
3da40c7b 5365 }
b9c1c267 5366
3da40c7b 5367 if (attr->ia_size != inode->i_size) {
5208386c
JK
5368 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5369 if (IS_ERR(handle)) {
5370 error = PTR_ERR(handle);
b9c1c267 5371 goto out_mmap_sem;
5208386c 5372 }
3da40c7b 5373 if (ext4_handle_valid(handle) && shrink) {
5208386c
JK
5374 error = ext4_orphan_add(handle, inode);
5375 orphan = 1;
5376 }
911af577
EG
5377 /*
5378 * Update c/mtime on truncate up, ext4_truncate() will
5379 * update c/mtime in shrink case below
5380 */
5381 if (!shrink) {
eeca7ea1 5382 inode->i_mtime = current_time(inode);
911af577
EG
5383 inode->i_ctime = inode->i_mtime;
5384 }
aa75f4d3
HS
5385
5386 if (shrink)
a80f7fcf 5387 ext4_fc_track_range(handle, inode,
aa75f4d3
HS
5388 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5389 inode->i_sb->s_blocksize_bits,
9725958b 5390 EXT_MAX_BLOCKS - 1);
aa75f4d3
HS
5391 else
5392 ext4_fc_track_range(
a80f7fcf 5393 handle, inode,
aa75f4d3
HS
5394 (oldsize > 0 ? oldsize - 1 : oldsize) >>
5395 inode->i_sb->s_blocksize_bits,
5396 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5397 inode->i_sb->s_blocksize_bits);
5398
90e775b7 5399 down_write(&EXT4_I(inode)->i_data_sem);
f4534c9f 5400 old_disksize = EXT4_I(inode)->i_disksize;
5208386c
JK
5401 EXT4_I(inode)->i_disksize = attr->ia_size;
5402 rc = ext4_mark_inode_dirty(handle, inode);
5403 if (!error)
5404 error = rc;
90e775b7
JK
5405 /*
5406 * We have to update i_size under i_data_sem together
5407 * with i_disksize to avoid races with writeback code
5408 * running ext4_wb_update_i_disksize().
5409 */
5410 if (!error)
5411 i_size_write(inode, attr->ia_size);
f4534c9f
YB
5412 else
5413 EXT4_I(inode)->i_disksize = old_disksize;
90e775b7 5414 up_write(&EXT4_I(inode)->i_data_sem);
5208386c 5415 ext4_journal_stop(handle);
b9c1c267
JK
5416 if (error)
5417 goto out_mmap_sem;
5418 if (!shrink) {
5419 pagecache_isize_extended(inode, oldsize,
5420 inode->i_size);
5421 } else if (ext4_should_journal_data(inode)) {
5422 ext4_wait_for_tail_page_commit(inode);
678aaf48 5423 }
d6320cbf 5424 }
430657b6 5425
5208386c
JK
5426 /*
5427 * Truncate pagecache after we've waited for commit
5428 * in data=journal mode to make pages freeable.
5429 */
923ae0ff 5430 truncate_pagecache(inode, inode->i_size);
b9c1c267
JK
5431 /*
5432 * Call ext4_truncate() even if i_size didn't change to
5433 * truncate possible preallocated blocks.
5434 */
5435 if (attr->ia_size <= oldsize) {
2c98eb5e
TT
5436 rc = ext4_truncate(inode);
5437 if (rc)
5438 error = rc;
5439 }
b9c1c267 5440out_mmap_sem:
d4f5258e 5441 filemap_invalidate_unlock(inode->i_mapping);
072bd7ea 5442 }
ac27a0ec 5443
2c98eb5e 5444 if (!error) {
a642c2c0
JL
5445 if (inc_ivers)
5446 inode_inc_iversion(inode);
c1632a0f 5447 setattr_copy(idmap, inode, attr);
1025774c
CH
5448 mark_inode_dirty(inode);
5449 }
5450
5451 /*
5452 * If the call to ext4_truncate failed to get a transaction handle at
5453 * all, we need to clean up the in-core orphan list manually.
5454 */
3d287de3 5455 if (orphan && inode->i_nlink)
617ba13b 5456 ext4_orphan_del(NULL, inode);
ac27a0ec 5457
2c98eb5e 5458 if (!error && (ia_valid & ATTR_MODE))
13e83a49 5459 rc = posix_acl_chmod(idmap, dentry, inode->i_mode);
ac27a0ec
DK
5460
5461err_out:
aa75f4d3
HS
5462 if (error)
5463 ext4_std_error(inode->i_sb, error);
ac27a0ec
DK
5464 if (!error)
5465 error = rc;
5466 return error;
5467}
5468
8434ef1d
EB
5469u32 ext4_dio_alignment(struct inode *inode)
5470{
5471 if (fsverity_active(inode))
5472 return 0;
5473 if (ext4_should_journal_data(inode))
5474 return 0;
5475 if (ext4_has_inline_data(inode))
5476 return 0;
5477 if (IS_ENCRYPTED(inode)) {
5478 if (!fscrypt_dio_supported(inode))
5479 return 0;
5480 return i_blocksize(inode);
5481 }
5482 return 1; /* use the iomap defaults */
5483}
5484
b74d24f7 5485int ext4_getattr(struct mnt_idmap *idmap, const struct path *path,
549c7297 5486 struct kstat *stat, u32 request_mask, unsigned int query_flags)
3e3398a0 5487{
99652ea5
DH
5488 struct inode *inode = d_inode(path->dentry);
5489 struct ext4_inode *raw_inode;
5490 struct ext4_inode_info *ei = EXT4_I(inode);
5491 unsigned int flags;
5492
d4c5e960
TT
5493 if ((request_mask & STATX_BTIME) &&
5494 EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
99652ea5
DH
5495 stat->result_mask |= STATX_BTIME;
5496 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5497 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5498 }
5499
8434ef1d
EB
5500 /*
5501 * Return the DIO alignment restrictions if requested. We only return
5502 * this information when requested, since on encrypted files it might
5503 * take a fair bit of work to get if the file wasn't opened recently.
5504 */
5505 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
5506 u32 dio_align = ext4_dio_alignment(inode);
5507
5508 stat->result_mask |= STATX_DIOALIGN;
5509 if (dio_align == 1) {
5510 struct block_device *bdev = inode->i_sb->s_bdev;
5511
5512 /* iomap defaults */
5513 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
5514 stat->dio_offset_align = bdev_logical_block_size(bdev);
5515 } else {
5516 stat->dio_mem_align = dio_align;
5517 stat->dio_offset_align = dio_align;
5518 }
5519 }
5520
99652ea5
DH
5521 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5522 if (flags & EXT4_APPEND_FL)
5523 stat->attributes |= STATX_ATTR_APPEND;
5524 if (flags & EXT4_COMPR_FL)
5525 stat->attributes |= STATX_ATTR_COMPRESSED;
5526 if (flags & EXT4_ENCRYPT_FL)
5527 stat->attributes |= STATX_ATTR_ENCRYPTED;
5528 if (flags & EXT4_IMMUTABLE_FL)
5529 stat->attributes |= STATX_ATTR_IMMUTABLE;
5530 if (flags & EXT4_NODUMP_FL)
5531 stat->attributes |= STATX_ATTR_NODUMP;
1f607195
EB
5532 if (flags & EXT4_VERITY_FL)
5533 stat->attributes |= STATX_ATTR_VERITY;
3e3398a0 5534
3209f68b
DH
5535 stat->attributes_mask |= (STATX_ATTR_APPEND |
5536 STATX_ATTR_COMPRESSED |
5537 STATX_ATTR_ENCRYPTED |
5538 STATX_ATTR_IMMUTABLE |
1f607195
EB
5539 STATX_ATTR_NODUMP |
5540 STATX_ATTR_VERITY);
3209f68b 5541
b74d24f7 5542 generic_fillattr(idmap, inode, stat);
99652ea5
DH
5543 return 0;
5544}
5545
b74d24f7 5546int ext4_file_getattr(struct mnt_idmap *idmap,
549c7297 5547 const struct path *path, struct kstat *stat,
99652ea5
DH
5548 u32 request_mask, unsigned int query_flags)
5549{
5550 struct inode *inode = d_inode(path->dentry);
5551 u64 delalloc_blocks;
5552
b74d24f7 5553 ext4_getattr(idmap, path, stat, request_mask, query_flags);
3e3398a0 5554
9206c561
AD
5555 /*
5556 * If there is inline data in the inode, the inode will normally not
5557 * have data blocks allocated (it may have an external xattr block).
5558 * Report at least one sector for such files, so tools like tar, rsync,
d67d64f4 5559 * others don't incorrectly think the file is completely sparse.
9206c561
AD
5560 */
5561 if (unlikely(ext4_has_inline_data(inode)))
5562 stat->blocks += (stat->size + 511) >> 9;
5563
3e3398a0
MC
5564 /*
5565 * We can't update i_blocks if the block allocation is delayed
5566 * otherwise in the case of system crash before the real block
5567 * allocation is done, we will have i_blocks inconsistent with
5568 * on-disk file blocks.
5569 * We always keep i_blocks updated together with real
5570 * allocation. But to not confuse with user, stat
5571 * will return the blocks that include the delayed allocation
5572 * blocks for this file.
5573 */
96607551 5574 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
9206c561
AD
5575 EXT4_I(inode)->i_reserved_data_blocks);
5576 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
3e3398a0
MC
5577 return 0;
5578}
ac27a0ec 5579
fffb2739
JK
5580static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5581 int pextents)
a02908f1 5582{
12e9b892 5583 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
fffb2739
JK
5584 return ext4_ind_trans_blocks(inode, lblocks);
5585 return ext4_ext_index_trans_blocks(inode, pextents);
a02908f1 5586}
ac51d837 5587
ac27a0ec 5588/*
a02908f1
MC
5589 * Account for index blocks, block groups bitmaps and block group
5590 * descriptor blocks if modify datablocks and index blocks
5591 * worse case, the indexs blocks spread over different block groups
ac27a0ec 5592 *
a02908f1 5593 * If datablocks are discontiguous, they are possible to spread over
4907cb7b 5594 * different block groups too. If they are contiguous, with flexbg,
a02908f1 5595 * they could still across block group boundary.
ac27a0ec 5596 *
a02908f1
MC
5597 * Also account for superblock, inode, quota and xattr blocks
5598 */
dec214d0 5599static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
fffb2739 5600 int pextents)
a02908f1 5601{
8df9675f
TT
5602 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5603 int gdpblocks;
a02908f1 5604 int idxblocks;
7fc51f92 5605 int ret;
a02908f1
MC
5606
5607 /*
fffb2739
JK
5608 * How many index blocks need to touch to map @lblocks logical blocks
5609 * to @pextents physical extents?
a02908f1 5610 */
fffb2739 5611 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
a02908f1
MC
5612
5613 ret = idxblocks;
5614
5615 /*
5616 * Now let's see how many group bitmaps and group descriptors need
5617 * to account
5618 */
fffb2739 5619 groups = idxblocks + pextents;
a02908f1 5620 gdpblocks = groups;
8df9675f
TT
5621 if (groups > ngroups)
5622 groups = ngroups;
a02908f1
MC
5623 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5624 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5625
5626 /* bitmaps and block group descriptor blocks */
5627 ret += groups + gdpblocks;
5628
5629 /* Blocks for super block, inode, quota and xattr blocks */
5630 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5631
5632 return ret;
5633}
5634
5635/*
25985edc 5636 * Calculate the total number of credits to reserve to fit
f3bd1f3f
MC
5637 * the modification of a single pages into a single transaction,
5638 * which may include multiple chunks of block allocations.
ac27a0ec 5639 *
525f4ed8 5640 * This could be called via ext4_write_begin()
ac27a0ec 5641 *
525f4ed8 5642 * We need to consider the worse case, when
a02908f1 5643 * one new block per extent.
ac27a0ec 5644 */
a86c6181 5645int ext4_writepage_trans_blocks(struct inode *inode)
ac27a0ec 5646{
617ba13b 5647 int bpp = ext4_journal_blocks_per_page(inode);
ac27a0ec
DK
5648 int ret;
5649
fffb2739 5650 ret = ext4_meta_trans_blocks(inode, bpp, bpp);
a86c6181 5651
a02908f1 5652 /* Account for data blocks for journalled mode */
617ba13b 5653 if (ext4_should_journal_data(inode))
a02908f1 5654 ret += bpp;
ac27a0ec
DK
5655 return ret;
5656}
f3bd1f3f
MC
5657
5658/*
5659 * Calculate the journal credits for a chunk of data modification.
5660 *
5661 * This is called from DIO, fallocate or whoever calling
79e83036 5662 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
f3bd1f3f
MC
5663 *
5664 * journal buffers for data blocks are not included here, as DIO
5665 * and fallocate do no need to journal data buffers.
5666 */
5667int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5668{
5669 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5670}
5671
ac27a0ec 5672/*
617ba13b 5673 * The caller must have previously called ext4_reserve_inode_write().
ac27a0ec
DK
5674 * Give this, we know that the caller already has write access to iloc->bh.
5675 */
617ba13b 5676int ext4_mark_iloc_dirty(handle_t *handle,
de9a55b8 5677 struct inode *inode, struct ext4_iloc *iloc)
ac27a0ec
DK
5678{
5679 int err = 0;
5680
a6758309
VA
5681 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
5682 put_bh(iloc->bh);
0db1ff22 5683 return -EIO;
a6758309 5684 }
a80f7fcf 5685 ext4_fc_track_inode(handle, inode);
aa75f4d3 5686
ac27a0ec
DK
5687 /* the do_update_inode consumes one bh->b_count */
5688 get_bh(iloc->bh);
5689
dab291af 5690 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
830156c7 5691 err = ext4_do_update_inode(handle, inode, iloc);
ac27a0ec
DK
5692 put_bh(iloc->bh);
5693 return err;
5694}
5695
5696/*
5697 * On success, We end up with an outstanding reference count against
5698 * iloc->bh. This _must_ be cleaned up later.
5699 */
5700
5701int
617ba13b
MC
5702ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5703 struct ext4_iloc *iloc)
ac27a0ec 5704{
0390131b
FM
5705 int err;
5706
0db1ff22
TT
5707 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5708 return -EIO;
5709
0390131b
FM
5710 err = ext4_get_inode_loc(inode, iloc);
5711 if (!err) {
5712 BUFFER_TRACE(iloc->bh, "get_write_access");
188c299e
JK
5713 err = ext4_journal_get_write_access(handle, inode->i_sb,
5714 iloc->bh, EXT4_JTR_NONE);
0390131b
FM
5715 if (err) {
5716 brelse(iloc->bh);
5717 iloc->bh = NULL;
ac27a0ec
DK
5718 }
5719 }
617ba13b 5720 ext4_std_error(inode->i_sb, err);
ac27a0ec
DK
5721 return err;
5722}
5723
c03b45b8
MX
5724static int __ext4_expand_extra_isize(struct inode *inode,
5725 unsigned int new_extra_isize,
5726 struct ext4_iloc *iloc,
5727 handle_t *handle, int *no_expand)
5728{
5729 struct ext4_inode *raw_inode;
5730 struct ext4_xattr_ibody_header *header;
4ea99936
TT
5731 unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5732 struct ext4_inode_info *ei = EXT4_I(inode);
c03b45b8
MX
5733 int error;
5734
4ea99936
TT
5735 /* this was checked at iget time, but double check for good measure */
5736 if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
5737 (ei->i_extra_isize & 3)) {
5738 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5739 ei->i_extra_isize,
5740 EXT4_INODE_SIZE(inode->i_sb));
5741 return -EFSCORRUPTED;
5742 }
5743 if ((new_extra_isize < ei->i_extra_isize) ||
5744 (new_extra_isize < 4) ||
5745 (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
5746 return -EINVAL; /* Should never happen */
5747
c03b45b8
MX
5748 raw_inode = ext4_raw_inode(iloc);
5749
5750 header = IHDR(inode, raw_inode);
5751
5752 /* No extended attributes present */
5753 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5754 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5755 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5756 EXT4_I(inode)->i_extra_isize, 0,
5757 new_extra_isize - EXT4_I(inode)->i_extra_isize);
5758 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5759 return 0;
5760 }
5761
8994d113
JK
5762 /*
5763 * We may need to allocate external xattr block so we need quotas
5764 * initialized. Here we can be called with various locks held so we
5765 * cannot affort to initialize quotas ourselves. So just bail.
5766 */
5767 if (dquot_initialize_needed(inode))
5768 return -EAGAIN;
5769
c03b45b8
MX
5770 /* try to expand with EAs present */
5771 error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5772 raw_inode, handle);
5773 if (error) {
5774 /*
5775 * Inode size expansion failed; don't try again
5776 */
5777 *no_expand = 1;
5778 }
5779
5780 return error;
5781}
5782
6dd4ee7c
KS
5783/*
5784 * Expand an inode by new_extra_isize bytes.
5785 * Returns 0 on success or negative error number on failure.
5786 */
cf0a5e81
MX
5787static int ext4_try_to_expand_extra_isize(struct inode *inode,
5788 unsigned int new_extra_isize,
5789 struct ext4_iloc iloc,
5790 handle_t *handle)
6dd4ee7c 5791{
3b10fdc6
MX
5792 int no_expand;
5793 int error;
6dd4ee7c 5794
cf0a5e81
MX
5795 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5796 return -EOVERFLOW;
5797
5798 /*
5799 * In nojournal mode, we can immediately attempt to expand
5800 * the inode. When journaled, we first need to obtain extra
5801 * buffer credits since we may write into the EA block
5802 * with this same handle. If journal_extend fails, then it will
5803 * only result in a minor loss of functionality for that inode.
5804 * If this is felt to be critical, then e2fsck should be run to
5805 * force a large enough s_min_extra_isize.
5806 */
6cb367c2 5807 if (ext4_journal_extend(handle,
83448bdf 5808 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
cf0a5e81 5809 return -ENOSPC;
6dd4ee7c 5810
3b10fdc6 5811 if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
cf0a5e81 5812 return -EBUSY;
3b10fdc6 5813
c03b45b8
MX
5814 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5815 handle, &no_expand);
5816 ext4_write_unlock_xattr(inode, &no_expand);
6dd4ee7c 5817
c03b45b8
MX
5818 return error;
5819}
6dd4ee7c 5820
c03b45b8
MX
5821int ext4_expand_extra_isize(struct inode *inode,
5822 unsigned int new_extra_isize,
5823 struct ext4_iloc *iloc)
5824{
5825 handle_t *handle;
5826 int no_expand;
5827 int error, rc;
5828
5829 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5830 brelse(iloc->bh);
5831 return -EOVERFLOW;
6dd4ee7c
KS
5832 }
5833
c03b45b8
MX
5834 handle = ext4_journal_start(inode, EXT4_HT_INODE,
5835 EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5836 if (IS_ERR(handle)) {
5837 error = PTR_ERR(handle);
5838 brelse(iloc->bh);
5839 return error;
5840 }
5841
5842 ext4_write_lock_xattr(inode, &no_expand);
5843
ddccb6db 5844 BUFFER_TRACE(iloc->bh, "get_write_access");
188c299e
JK
5845 error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh,
5846 EXT4_JTR_NONE);
3b10fdc6 5847 if (error) {
c03b45b8 5848 brelse(iloc->bh);
7f420d64 5849 goto out_unlock;
3b10fdc6 5850 }
cf0a5e81 5851
c03b45b8
MX
5852 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5853 handle, &no_expand);
5854
5855 rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5856 if (!error)
5857 error = rc;
5858
7f420d64 5859out_unlock:
c03b45b8 5860 ext4_write_unlock_xattr(inode, &no_expand);
c03b45b8 5861 ext4_journal_stop(handle);
3b10fdc6 5862 return error;
6dd4ee7c
KS
5863}
5864
ac27a0ec
DK
5865/*
5866 * What we do here is to mark the in-core inode as clean with respect to inode
5867 * dirtiness (it may still be data-dirty).
5868 * This means that the in-core inode may be reaped by prune_icache
5869 * without having to perform any I/O. This is a very good thing,
5870 * because *any* task may call prune_icache - even ones which
5871 * have a transaction open against a different journal.
5872 *
5873 * Is this cheating? Not really. Sure, we haven't written the
5874 * inode out, but prune_icache isn't a user-visible syncing function.
5875 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5876 * we start and wait on commits.
ac27a0ec 5877 */
4209ae12
HS
5878int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
5879 const char *func, unsigned int line)
ac27a0ec 5880{
617ba13b 5881 struct ext4_iloc iloc;
6dd4ee7c 5882 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
cf0a5e81 5883 int err;
ac27a0ec
DK
5884
5885 might_sleep();
7ff9c073 5886 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
617ba13b 5887 err = ext4_reserve_inode_write(handle, inode, &iloc);
5e1021f2 5888 if (err)
4209ae12 5889 goto out;
cf0a5e81
MX
5890
5891 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
5892 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
5893 iloc, handle);
5894
4209ae12
HS
5895 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5896out:
5897 if (unlikely(err))
5898 ext4_error_inode_err(inode, func, line, 0, err,
5899 "mark_inode_dirty error");
5900 return err;
ac27a0ec
DK
5901}
5902
5903/*
617ba13b 5904 * ext4_dirty_inode() is called from __mark_inode_dirty()
ac27a0ec
DK
5905 *
5906 * We're really interested in the case where a file is being extended.
5907 * i_size has been changed by generic_commit_write() and we thus need
5908 * to include the updated inode in the current transaction.
5909 *
5dd4056d 5910 * Also, dquot_alloc_block() will always dirty the inode when blocks
ac27a0ec
DK
5911 * are allocated to the file.
5912 *
5913 * If the inode is marked synchronous, we don't honour that here - doing
5914 * so would cause a commit on atime updates, which we don't bother doing.
5915 * We handle synchronous inodes at the highest possible level.
5916 */
aa385729 5917void ext4_dirty_inode(struct inode *inode, int flags)
ac27a0ec 5918{
ac27a0ec
DK
5919 handle_t *handle;
5920
9924a92a 5921 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
ac27a0ec 5922 if (IS_ERR(handle))
e2728c56 5923 return;
f3dc272f 5924 ext4_mark_inode_dirty(handle, inode);
617ba13b 5925 ext4_journal_stop(handle);
ac27a0ec
DK
5926}
5927
617ba13b 5928int ext4_change_inode_journal_flag(struct inode *inode, int val)
ac27a0ec
DK
5929{
5930 journal_t *journal;
5931 handle_t *handle;
5932 int err;
00d873c1 5933 int alloc_ctx;
ac27a0ec
DK
5934
5935 /*
5936 * We have to be very careful here: changing a data block's
5937 * journaling status dynamically is dangerous. If we write a
5938 * data block to the journal, change the status and then delete
5939 * that block, we risk forgetting to revoke the old log record
5940 * from the journal and so a subsequent replay can corrupt data.
5941 * So, first we make sure that the journal is empty and that
5942 * nobody is changing anything.
5943 */
5944
617ba13b 5945 journal = EXT4_JOURNAL(inode);
0390131b
FM
5946 if (!journal)
5947 return 0;
d699594d 5948 if (is_journal_aborted(journal))
ac27a0ec
DK
5949 return -EROFS;
5950
17335dcc 5951 /* Wait for all existing dio workers */
17335dcc
DM
5952 inode_dio_wait(inode);
5953
4c546592
DJ
5954 /*
5955 * Before flushing the journal and switching inode's aops, we have
5956 * to flush all dirty data the inode has. There can be outstanding
5957 * delayed allocations, there can be unwritten extents created by
5958 * fallocate or buffered writes in dioread_nolock mode covered by
5959 * dirty data which can be converted only after flushing the dirty
5960 * data (and journalled aops don't know how to handle these cases).
5961 */
5962 if (val) {
d4f5258e 5963 filemap_invalidate_lock(inode->i_mapping);
4c546592
DJ
5964 err = filemap_write_and_wait(inode->i_mapping);
5965 if (err < 0) {
d4f5258e 5966 filemap_invalidate_unlock(inode->i_mapping);
4c546592
DJ
5967 return err;
5968 }
5969 }
5970
00d873c1 5971 alloc_ctx = ext4_writepages_down_write(inode->i_sb);
dab291af 5972 jbd2_journal_lock_updates(journal);
ac27a0ec
DK
5973
5974 /*
5975 * OK, there are no updates running now, and all cached data is
5976 * synced to disk. We are now in a completely consistent state
5977 * which doesn't have anything in the journal, and we know that
5978 * no filesystem updates are running, so it is safe to modify
5979 * the inode's in-core data-journaling state flag now.
5980 */
5981
5982 if (val)
12e9b892 5983 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5872ddaa 5984 else {
01d5d965 5985 err = jbd2_journal_flush(journal, 0);
4f879ca6
JK
5986 if (err < 0) {
5987 jbd2_journal_unlock_updates(journal);
00d873c1 5988 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
4f879ca6
JK
5989 return err;
5990 }
12e9b892 5991 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5872ddaa 5992 }
617ba13b 5993 ext4_set_aops(inode);
ac27a0ec 5994
dab291af 5995 jbd2_journal_unlock_updates(journal);
00d873c1 5996 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
c8585c6f 5997
4c546592 5998 if (val)
d4f5258e 5999 filemap_invalidate_unlock(inode->i_mapping);
ac27a0ec
DK
6000
6001 /* Finally we can mark the inode as dirty. */
6002
9924a92a 6003 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
ac27a0ec
DK
6004 if (IS_ERR(handle))
6005 return PTR_ERR(handle);
6006
aa75f4d3 6007 ext4_fc_mark_ineligible(inode->i_sb,
e85c81ba 6008 EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
617ba13b 6009 err = ext4_mark_inode_dirty(handle, inode);
0390131b 6010 ext4_handle_sync(handle);
617ba13b
MC
6011 ext4_journal_stop(handle);
6012 ext4_std_error(inode->i_sb, err);
ac27a0ec
DK
6013
6014 return err;
6015}
2e9ee850 6016
188c299e
JK
6017static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
6018 struct buffer_head *bh)
2e9ee850
AK
6019{
6020 return !buffer_mapped(bh);
6021}
6022
401b25aa 6023vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
2e9ee850 6024{
11bac800 6025 struct vm_area_struct *vma = vmf->vma;
9ea0e45b 6026 struct folio *folio = page_folio(vmf->page);
2e9ee850
AK
6027 loff_t size;
6028 unsigned long len;
401b25aa
SJ
6029 int err;
6030 vm_fault_t ret;
2e9ee850 6031 struct file *file = vma->vm_file;
496ad9aa 6032 struct inode *inode = file_inode(file);
2e9ee850 6033 struct address_space *mapping = inode->i_mapping;
9ea7df53
JK
6034 handle_t *handle;
6035 get_block_t *get_block;
6036 int retries = 0;
2e9ee850 6037
02b016ca
TT
6038 if (unlikely(IS_IMMUTABLE(inode)))
6039 return VM_FAULT_SIGBUS;
6040
8e8ad8a5 6041 sb_start_pagefault(inode->i_sb);
041bbb6d 6042 file_update_time(vma->vm_file);
ea3d7209 6043
d4f5258e 6044 filemap_invalidate_lock_shared(mapping);
7b4cc978 6045
401b25aa
SJ
6046 err = ext4_convert_inline_data(inode);
6047 if (err)
7b4cc978
EB
6048 goto out_ret;
6049
64a9f144
MFO
6050 /*
6051 * On data journalling we skip straight to the transaction handle:
6052 * there's no delalloc; page truncated will be checked later; the
6053 * early return w/ all buffers mapped (calculates size/len) can't
6054 * be used; and there's no dioread_nolock, so only ext4_get_block.
6055 */
6056 if (ext4_should_journal_data(inode))
6057 goto retry_alloc;
6058
9ea7df53
JK
6059 /* Delalloc case is easy... */
6060 if (test_opt(inode->i_sb, DELALLOC) &&
9ea7df53
JK
6061 !ext4_nonda_switch(inode->i_sb)) {
6062 do {
401b25aa 6063 err = block_page_mkwrite(vma, vmf,
9ea7df53 6064 ext4_da_get_block_prep);
401b25aa 6065 } while (err == -ENOSPC &&
9ea7df53
JK
6066 ext4_should_retry_alloc(inode->i_sb, &retries));
6067 goto out_ret;
2e9ee850 6068 }
0e499890 6069
9ea0e45b 6070 folio_lock(folio);
9ea7df53
JK
6071 size = i_size_read(inode);
6072 /* Page got truncated from under us? */
9ea0e45b
MW
6073 if (folio->mapping != mapping || folio_pos(folio) > size) {
6074 folio_unlock(folio);
9ea7df53
JK
6075 ret = VM_FAULT_NOPAGE;
6076 goto out;
0e499890 6077 }
2e9ee850 6078
9ea0e45b
MW
6079 len = folio_size(folio);
6080 if (folio_pos(folio) + len > size)
6081 len = size - folio_pos(folio);
a827eaff 6082 /*
9ea7df53
JK
6083 * Return if we have all the buffers mapped. This avoids the need to do
6084 * journal_start/journal_stop which can block and take a long time
64a9f144
MFO
6085 *
6086 * This cannot be done for data journalling, as we have to add the
6087 * inode to the transaction's list to writeprotect pages on commit.
a827eaff 6088 */
9ea0e45b
MW
6089 if (folio_buffers(folio)) {
6090 if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio),
f19d5870
TM
6091 0, len, NULL,
6092 ext4_bh_unmapped)) {
9ea7df53 6093 /* Wait so that we don't change page under IO */
9ea0e45b 6094 folio_wait_stable(folio);
9ea7df53
JK
6095 ret = VM_FAULT_LOCKED;
6096 goto out;
a827eaff 6097 }
2e9ee850 6098 }
9ea0e45b 6099 folio_unlock(folio);
9ea7df53
JK
6100 /* OK, we need to fill the hole... */
6101 if (ext4_should_dioread_nolock(inode))
705965bd 6102 get_block = ext4_get_block_unwritten;
9ea7df53
JK
6103 else
6104 get_block = ext4_get_block;
6105retry_alloc:
9924a92a
TT
6106 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6107 ext4_writepage_trans_blocks(inode));
9ea7df53 6108 if (IS_ERR(handle)) {
c2ec175c 6109 ret = VM_FAULT_SIGBUS;
9ea7df53
JK
6110 goto out;
6111 }
64a9f144
MFO
6112 /*
6113 * Data journalling can't use block_page_mkwrite() because it
6114 * will set_buffer_dirty() before do_journal_get_write_access()
6115 * thus might hit warning messages for dirty metadata buffers.
6116 */
6117 if (!ext4_should_journal_data(inode)) {
6118 err = block_page_mkwrite(vma, vmf, get_block);
6119 } else {
9ea0e45b 6120 folio_lock(folio);
64a9f144
MFO
6121 size = i_size_read(inode);
6122 /* Page got truncated from under us? */
9ea0e45b 6123 if (folio->mapping != mapping || folio_pos(folio) > size) {
64a9f144 6124 ret = VM_FAULT_NOPAGE;
afb585a9 6125 goto out_error;
9ea7df53 6126 }
64a9f144 6127
9ea0e45b
MW
6128 len = folio_size(folio);
6129 if (folio_pos(folio) + len > size)
6130 len = size - folio_pos(folio);
64a9f144 6131
9ea0e45b 6132 err = __block_write_begin(&folio->page, 0, len, ext4_get_block);
64a9f144 6133 if (!err) {
afb585a9 6134 ret = VM_FAULT_SIGBUS;
9ea0e45b 6135 if (ext4_journal_page_buffers(handle, &folio->page, len))
afb585a9 6136 goto out_error;
64a9f144 6137 } else {
9ea0e45b 6138 folio_unlock(folio);
64a9f144 6139 }
9ea7df53
JK
6140 }
6141 ext4_journal_stop(handle);
401b25aa 6142 if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
9ea7df53
JK
6143 goto retry_alloc;
6144out_ret:
401b25aa 6145 ret = block_page_mkwrite_return(err);
9ea7df53 6146out:
d4f5258e 6147 filemap_invalidate_unlock_shared(mapping);
8e8ad8a5 6148 sb_end_pagefault(inode->i_sb);
2e9ee850 6149 return ret;
afb585a9 6150out_error:
9ea0e45b 6151 folio_unlock(folio);
afb585a9
MFO
6152 ext4_journal_stop(handle);
6153 goto out;
2e9ee850 6154}