Commit | Line | Data |
---|---|---|
f5166768 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a86c6181 AT |
2 | /* |
3 | * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com | |
4 | * Written by Alex Tomas <alex@clusterfs.com> | |
5 | * | |
6 | * Architecture independence: | |
7 | * Copyright (c) 2005, Bull S.A. | |
8 | * Written by Pierre Peiffer <pierre.peiffer@bull.net> | |
a86c6181 AT |
9 | */ |
10 | ||
11 | /* | |
12 | * Extents support for EXT4 | |
13 | * | |
14 | * TODO: | |
15 | * - ext4*_error() should be used in some situations | |
16 | * - analyze all BUG()/BUG_ON(), use -EIO where appropriate | |
17 | * - smart tree reduction | |
18 | */ | |
19 | ||
a86c6181 AT |
20 | #include <linux/fs.h> |
21 | #include <linux/time.h> | |
cd02ff0b | 22 | #include <linux/jbd2.h> |
a86c6181 AT |
23 | #include <linux/highuid.h> |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/quotaops.h> | |
26 | #include <linux/string.h> | |
27 | #include <linux/slab.h> | |
7c0f6ba6 | 28 | #include <linux/uaccess.h> |
6873fa0d | 29 | #include <linux/fiemap.h> |
d3b6f23f | 30 | #include <linux/iomap.h> |
4034247a | 31 | #include <linux/sched/mm.h> |
3dcf5451 | 32 | #include "ext4_jbd2.h" |
4a092d73 | 33 | #include "ext4_extents.h" |
f19d5870 | 34 | #include "xattr.h" |
a86c6181 | 35 | |
0562e0ba JZ |
36 | #include <trace/events/ext4.h> |
37 | ||
5f95d21f LC |
38 | /* |
39 | * used by extent splitting. | |
40 | */ | |
41 | #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ | |
42 | due to ENOSPC */ | |
556615dc LC |
43 | #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ |
44 | #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ | |
5f95d21f | 45 | |
dee1f973 DM |
46 | #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ |
47 | #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ | |
48 | ||
7ac5990d DW |
49 | static __le32 ext4_extent_block_csum(struct inode *inode, |
50 | struct ext4_extent_header *eh) | |
51 | { | |
52 | struct ext4_inode_info *ei = EXT4_I(inode); | |
53 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | |
54 | __u32 csum; | |
55 | ||
56 | csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, | |
57 | EXT4_EXTENT_TAIL_OFFSET(eh)); | |
58 | return cpu_to_le32(csum); | |
59 | } | |
60 | ||
61 | static int ext4_extent_block_csum_verify(struct inode *inode, | |
62 | struct ext4_extent_header *eh) | |
63 | { | |
64 | struct ext4_extent_tail *et; | |
65 | ||
9aa5d32b | 66 | if (!ext4_has_metadata_csum(inode->i_sb)) |
7ac5990d DW |
67 | return 1; |
68 | ||
69 | et = find_ext4_extent_tail(eh); | |
70 | if (et->et_checksum != ext4_extent_block_csum(inode, eh)) | |
71 | return 0; | |
72 | return 1; | |
73 | } | |
74 | ||
75 | static void ext4_extent_block_csum_set(struct inode *inode, | |
76 | struct ext4_extent_header *eh) | |
77 | { | |
78 | struct ext4_extent_tail *et; | |
79 | ||
9aa5d32b | 80 | if (!ext4_has_metadata_csum(inode->i_sb)) |
7ac5990d DW |
81 | return; |
82 | ||
83 | et = find_ext4_extent_tail(eh); | |
84 | et->et_checksum = ext4_extent_block_csum(inode, eh); | |
85 | } | |
86 | ||
5f95d21f LC |
87 | static int ext4_split_extent_at(handle_t *handle, |
88 | struct inode *inode, | |
dfe50809 | 89 | struct ext4_ext_path **ppath, |
5f95d21f LC |
90 | ext4_lblk_t split, |
91 | int split_flag, | |
92 | int flags); | |
93 | ||
a4130367 | 94 | static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) |
a86c6181 | 95 | { |
7b808191 | 96 | /* |
a4130367 JK |
97 | * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this |
98 | * moment, get_block can be called only for blocks inside i_size since | |
99 | * page cache has been already dropped and writes are blocked by | |
100 | * i_mutex. So we can safely drop the i_data_sem here. | |
7b808191 | 101 | */ |
a4130367 | 102 | BUG_ON(EXT4_JOURNAL(inode) == NULL); |
27bc446e | 103 | ext4_discard_preallocations(inode, 0); |
a4130367 JK |
104 | up_write(&EXT4_I(inode)->i_data_sem); |
105 | *dropped = 1; | |
106 | return 0; | |
107 | } | |
487caeef | 108 | |
a4130367 JK |
109 | /* |
110 | * Make sure 'handle' has at least 'check_cred' credits. If not, restart | |
111 | * transaction with 'restart_cred' credits. The function drops i_data_sem | |
112 | * when restarting transaction and gets it after transaction is restarted. | |
113 | * | |
114 | * The function returns 0 on success, 1 if transaction had to be restarted, | |
115 | * and < 0 in case of fatal error. | |
116 | */ | |
117 | int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode, | |
83448bdf JK |
118 | int check_cred, int restart_cred, |
119 | int revoke_cred) | |
a4130367 JK |
120 | { |
121 | int ret; | |
122 | int dropped = 0; | |
123 | ||
124 | ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred, | |
83448bdf | 125 | revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped)); |
a4130367 JK |
126 | if (dropped) |
127 | down_write(&EXT4_I(inode)->i_data_sem); | |
128 | return ret; | |
a86c6181 AT |
129 | } |
130 | ||
131 | /* | |
132 | * could return: | |
133 | * - EROFS | |
134 | * - ENOMEM | |
135 | */ | |
136 | static int ext4_ext_get_access(handle_t *handle, struct inode *inode, | |
137 | struct ext4_ext_path *path) | |
138 | { | |
0f2f87d5 ZY |
139 | int err = 0; |
140 | ||
a86c6181 AT |
141 | if (path->p_bh) { |
142 | /* path points to block */ | |
5d601255 | 143 | BUFFER_TRACE(path->p_bh, "get_write_access"); |
0f2f87d5 ZY |
144 | err = ext4_journal_get_write_access(handle, inode->i_sb, |
145 | path->p_bh, EXT4_JTR_NONE); | |
146 | /* | |
147 | * The extent buffer's verified bit will be set again in | |
148 | * __ext4_ext_dirty(). We could leave an inconsistent | |
149 | * buffer if the extents updating procudure break off du | |
150 | * to some error happens, force to check it again. | |
151 | */ | |
152 | if (!err) | |
153 | clear_buffer_verified(path->p_bh); | |
a86c6181 AT |
154 | } |
155 | /* path points to leaf/index in inode body */ | |
156 | /* we use in-core data, no need to protect them */ | |
0f2f87d5 | 157 | return err; |
a86c6181 AT |
158 | } |
159 | ||
160 | /* | |
161 | * could return: | |
162 | * - EROFS | |
163 | * - ENOMEM | |
164 | * - EIO | |
165 | */ | |
43f81677 EB |
166 | static int __ext4_ext_dirty(const char *where, unsigned int line, |
167 | handle_t *handle, struct inode *inode, | |
168 | struct ext4_ext_path *path) | |
a86c6181 AT |
169 | { |
170 | int err; | |
4b1f1660 DM |
171 | |
172 | WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); | |
a86c6181 | 173 | if (path->p_bh) { |
7ac5990d | 174 | ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); |
a86c6181 | 175 | /* path points to block */ |
9ea7a0df TT |
176 | err = __ext4_handle_dirty_metadata(where, line, handle, |
177 | inode, path->p_bh); | |
0f2f87d5 ZY |
178 | /* Extents updating done, re-set verified flag */ |
179 | if (!err) | |
180 | set_buffer_verified(path->p_bh); | |
a86c6181 AT |
181 | } else { |
182 | /* path points to leaf/index in inode body */ | |
183 | err = ext4_mark_inode_dirty(handle, inode); | |
184 | } | |
185 | return err; | |
186 | } | |
187 | ||
43f81677 EB |
188 | #define ext4_ext_dirty(handle, inode, path) \ |
189 | __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) | |
190 | ||
f65e6fba | 191 | static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, |
a86c6181 | 192 | struct ext4_ext_path *path, |
725d26d3 | 193 | ext4_lblk_t block) |
a86c6181 | 194 | { |
a86c6181 | 195 | if (path) { |
81fdbb4a | 196 | int depth = path->p_depth; |
a86c6181 | 197 | struct ext4_extent *ex; |
a86c6181 | 198 | |
ad4fb9ca KM |
199 | /* |
200 | * Try to predict block placement assuming that we are | |
201 | * filling in a file which will eventually be | |
202 | * non-sparse --- i.e., in the case of libbfd writing | |
203 | * an ELF object sections out-of-order but in a way | |
204 | * the eventually results in a contiguous object or | |
205 | * executable file, or some database extending a table | |
206 | * space file. However, this is actually somewhat | |
207 | * non-ideal if we are writing a sparse file such as | |
208 | * qemu or KVM writing a raw image file that is going | |
209 | * to stay fairly sparse, since it will end up | |
210 | * fragmenting the file system's free space. Maybe we | |
211 | * should have some hueristics or some way to allow | |
212 | * userspace to pass a hint to file system, | |
b8d6568a | 213 | * especially if the latter case turns out to be |
ad4fb9ca KM |
214 | * common. |
215 | */ | |
7e028976 | 216 | ex = path[depth].p_ext; |
ad4fb9ca KM |
217 | if (ex) { |
218 | ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); | |
219 | ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); | |
220 | ||
221 | if (block > ext_block) | |
222 | return ext_pblk + (block - ext_block); | |
223 | else | |
224 | return ext_pblk - (ext_block - block); | |
225 | } | |
a86c6181 | 226 | |
d0d856e8 RD |
227 | /* it looks like index is empty; |
228 | * try to find starting block from index itself */ | |
a86c6181 AT |
229 | if (path[depth].p_bh) |
230 | return path[depth].p_bh->b_blocknr; | |
231 | } | |
232 | ||
233 | /* OK. use inode's group */ | |
f86186b4 | 234 | return ext4_inode_to_goal_block(inode); |
a86c6181 AT |
235 | } |
236 | ||
654b4908 AK |
237 | /* |
238 | * Allocation for a meta data block | |
239 | */ | |
f65e6fba | 240 | static ext4_fsblk_t |
654b4908 | 241 | ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, |
a86c6181 | 242 | struct ext4_ext_path *path, |
55f020db | 243 | struct ext4_extent *ex, int *err, unsigned int flags) |
a86c6181 | 244 | { |
f65e6fba | 245 | ext4_fsblk_t goal, newblock; |
a86c6181 AT |
246 | |
247 | goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); | |
55f020db AH |
248 | newblock = ext4_new_meta_blocks(handle, inode, goal, flags, |
249 | NULL, err); | |
a86c6181 AT |
250 | return newblock; |
251 | } | |
252 | ||
55ad63bf | 253 | static inline int ext4_ext_space_block(struct inode *inode, int check) |
a86c6181 AT |
254 | { |
255 | int size; | |
256 | ||
257 | size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) | |
258 | / sizeof(struct ext4_extent); | |
bbf2f9fb | 259 | #ifdef AGGRESSIVE_TEST |
02dc62fb YY |
260 | if (!check && size > 6) |
261 | size = 6; | |
a86c6181 AT |
262 | #endif |
263 | return size; | |
264 | } | |
265 | ||
55ad63bf | 266 | static inline int ext4_ext_space_block_idx(struct inode *inode, int check) |
a86c6181 AT |
267 | { |
268 | int size; | |
269 | ||
270 | size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) | |
271 | / sizeof(struct ext4_extent_idx); | |
bbf2f9fb | 272 | #ifdef AGGRESSIVE_TEST |
02dc62fb YY |
273 | if (!check && size > 5) |
274 | size = 5; | |
a86c6181 AT |
275 | #endif |
276 | return size; | |
277 | } | |
278 | ||
55ad63bf | 279 | static inline int ext4_ext_space_root(struct inode *inode, int check) |
a86c6181 AT |
280 | { |
281 | int size; | |
282 | ||
283 | size = sizeof(EXT4_I(inode)->i_data); | |
284 | size -= sizeof(struct ext4_extent_header); | |
285 | size /= sizeof(struct ext4_extent); | |
bbf2f9fb | 286 | #ifdef AGGRESSIVE_TEST |
02dc62fb YY |
287 | if (!check && size > 3) |
288 | size = 3; | |
a86c6181 AT |
289 | #endif |
290 | return size; | |
291 | } | |
292 | ||
55ad63bf | 293 | static inline int ext4_ext_space_root_idx(struct inode *inode, int check) |
a86c6181 AT |
294 | { |
295 | int size; | |
296 | ||
297 | size = sizeof(EXT4_I(inode)->i_data); | |
298 | size -= sizeof(struct ext4_extent_header); | |
299 | size /= sizeof(struct ext4_extent_idx); | |
bbf2f9fb | 300 | #ifdef AGGRESSIVE_TEST |
02dc62fb YY |
301 | if (!check && size > 4) |
302 | size = 4; | |
a86c6181 AT |
303 | #endif |
304 | return size; | |
305 | } | |
306 | ||
fcf6b1b7 DM |
307 | static inline int |
308 | ext4_force_split_extent_at(handle_t *handle, struct inode *inode, | |
dfe50809 | 309 | struct ext4_ext_path **ppath, ext4_lblk_t lblk, |
fcf6b1b7 DM |
310 | int nofail) |
311 | { | |
dfe50809 | 312 | struct ext4_ext_path *path = *ppath; |
fcf6b1b7 | 313 | int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); |
73c384c0 TT |
314 | int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO; |
315 | ||
316 | if (nofail) | |
317 | flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL; | |
fcf6b1b7 | 318 | |
dfe50809 | 319 | return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? |
fcf6b1b7 | 320 | EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, |
73c384c0 | 321 | flags); |
fcf6b1b7 DM |
322 | } |
323 | ||
c29c0ae7 AT |
324 | static int |
325 | ext4_ext_max_entries(struct inode *inode, int depth) | |
326 | { | |
327 | int max; | |
328 | ||
329 | if (depth == ext_depth(inode)) { | |
330 | if (depth == 0) | |
55ad63bf | 331 | max = ext4_ext_space_root(inode, 1); |
c29c0ae7 | 332 | else |
55ad63bf | 333 | max = ext4_ext_space_root_idx(inode, 1); |
c29c0ae7 AT |
334 | } else { |
335 | if (depth == 0) | |
55ad63bf | 336 | max = ext4_ext_space_block(inode, 1); |
c29c0ae7 | 337 | else |
55ad63bf | 338 | max = ext4_ext_space_block_idx(inode, 1); |
c29c0ae7 AT |
339 | } |
340 | ||
341 | return max; | |
342 | } | |
343 | ||
56b19868 AK |
344 | static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) |
345 | { | |
bf89d16f | 346 | ext4_fsblk_t block = ext4_ext_pblock(ext); |
56b19868 | 347 | int len = ext4_ext_get_actual_len(ext); |
5946d089 | 348 | ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); |
e84a26ce | 349 | |
f70749ca VN |
350 | /* |
351 | * We allow neither: | |
352 | * - zero length | |
353 | * - overflow/wrap-around | |
354 | */ | |
355 | if (lblock + len <= lblock) | |
31d4f3a2 | 356 | return 0; |
ce9f24cc | 357 | return ext4_inode_block_valid(inode, block, len); |
56b19868 AK |
358 | } |
359 | ||
360 | static int ext4_valid_extent_idx(struct inode *inode, | |
361 | struct ext4_extent_idx *ext_idx) | |
362 | { | |
bf89d16f | 363 | ext4_fsblk_t block = ext4_idx_pblock(ext_idx); |
e84a26ce | 364 | |
ce9f24cc | 365 | return ext4_inode_block_valid(inode, block, 1); |
56b19868 AK |
366 | } |
367 | ||
368 | static int ext4_valid_extent_entries(struct inode *inode, | |
54d3adbc | 369 | struct ext4_extent_header *eh, |
9c6e0719 ZY |
370 | ext4_lblk_t lblk, ext4_fsblk_t *pblk, |
371 | int depth) | |
56b19868 | 372 | { |
56b19868 | 373 | unsigned short entries; |
8dd27fec ZY |
374 | ext4_lblk_t lblock = 0; |
375 | ext4_lblk_t prev = 0; | |
376 | ||
56b19868 AK |
377 | if (eh->eh_entries == 0) |
378 | return 1; | |
379 | ||
380 | entries = le16_to_cpu(eh->eh_entries); | |
381 | ||
382 | if (depth == 0) { | |
383 | /* leaf entries */ | |
81fdbb4a | 384 | struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); |
9c6e0719 ZY |
385 | |
386 | /* | |
387 | * The logical block in the first entry should equal to | |
388 | * the number in the index block. | |
389 | */ | |
390 | if (depth != ext_depth(inode) && | |
391 | lblk != le32_to_cpu(ext->ee_block)) | |
392 | return 0; | |
56b19868 AK |
393 | while (entries) { |
394 | if (!ext4_valid_extent(inode, ext)) | |
395 | return 0; | |
5946d089 EG |
396 | |
397 | /* Check for overlapping extents */ | |
398 | lblock = le32_to_cpu(ext->ee_block); | |
5946d089 | 399 | if ((lblock <= prev) && prev) { |
54d3adbc | 400 | *pblk = ext4_ext_pblock(ext); |
5946d089 EG |
401 | return 0; |
402 | } | |
8dd27fec | 403 | prev = lblock + ext4_ext_get_actual_len(ext) - 1; |
56b19868 AK |
404 | ext++; |
405 | entries--; | |
406 | } | |
407 | } else { | |
81fdbb4a | 408 | struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); |
9c6e0719 ZY |
409 | |
410 | /* | |
411 | * The logical block in the first entry should equal to | |
412 | * the number in the parent index block. | |
413 | */ | |
414 | if (depth != ext_depth(inode) && | |
415 | lblk != le32_to_cpu(ext_idx->ei_block)) | |
416 | return 0; | |
56b19868 AK |
417 | while (entries) { |
418 | if (!ext4_valid_extent_idx(inode, ext_idx)) | |
419 | return 0; | |
8dd27fec ZY |
420 | |
421 | /* Check for overlapping index extents */ | |
422 | lblock = le32_to_cpu(ext_idx->ei_block); | |
423 | if ((lblock <= prev) && prev) { | |
424 | *pblk = ext4_idx_pblock(ext_idx); | |
425 | return 0; | |
426 | } | |
56b19868 AK |
427 | ext_idx++; |
428 | entries--; | |
8dd27fec | 429 | prev = lblock; |
56b19868 AK |
430 | } |
431 | } | |
432 | return 1; | |
433 | } | |
434 | ||
c398eda0 TT |
435 | static int __ext4_ext_check(const char *function, unsigned int line, |
436 | struct inode *inode, struct ext4_extent_header *eh, | |
9c6e0719 | 437 | int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk) |
c29c0ae7 AT |
438 | { |
439 | const char *error_msg; | |
6a797d27 | 440 | int max = 0, err = -EFSCORRUPTED; |
c29c0ae7 AT |
441 | |
442 | if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { | |
443 | error_msg = "invalid magic"; | |
444 | goto corrupted; | |
445 | } | |
446 | if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { | |
447 | error_msg = "unexpected eh_depth"; | |
448 | goto corrupted; | |
449 | } | |
450 | if (unlikely(eh->eh_max == 0)) { | |
451 | error_msg = "invalid eh_max"; | |
452 | goto corrupted; | |
453 | } | |
454 | max = ext4_ext_max_entries(inode, depth); | |
455 | if (unlikely(le16_to_cpu(eh->eh_max) > max)) { | |
456 | error_msg = "too large eh_max"; | |
457 | goto corrupted; | |
458 | } | |
459 | if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { | |
460 | error_msg = "invalid eh_entries"; | |
461 | goto corrupted; | |
462 | } | |
9c6e0719 | 463 | if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) { |
56b19868 AK |
464 | error_msg = "invalid extent entries"; |
465 | goto corrupted; | |
466 | } | |
7bc94916 VN |
467 | if (unlikely(depth > 32)) { |
468 | error_msg = "too large eh_depth"; | |
469 | goto corrupted; | |
470 | } | |
7ac5990d DW |
471 | /* Verify checksum on non-root extent tree nodes */ |
472 | if (ext_depth(inode) != depth && | |
473 | !ext4_extent_block_csum_verify(inode, eh)) { | |
474 | error_msg = "extent tree corrupted"; | |
6a797d27 | 475 | err = -EFSBADCRC; |
7ac5990d DW |
476 | goto corrupted; |
477 | } | |
c29c0ae7 AT |
478 | return 0; |
479 | ||
480 | corrupted: | |
54d3adbc TT |
481 | ext4_error_inode_err(inode, function, line, 0, -err, |
482 | "pblk %llu bad header/extent: %s - magic %x, " | |
483 | "entries %u, max %u(%u), depth %u(%u)", | |
484 | (unsigned long long) pblk, error_msg, | |
485 | le16_to_cpu(eh->eh_magic), | |
486 | le16_to_cpu(eh->eh_entries), | |
487 | le16_to_cpu(eh->eh_max), | |
488 | max, le16_to_cpu(eh->eh_depth), depth); | |
6a797d27 | 489 | return err; |
c29c0ae7 AT |
490 | } |
491 | ||
c349179b | 492 | #define ext4_ext_check(inode, eh, depth, pblk) \ |
9c6e0719 | 493 | __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0) |
c29c0ae7 | 494 | |
7a262f7c AK |
495 | int ext4_ext_check_inode(struct inode *inode) |
496 | { | |
c349179b | 497 | return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); |
7a262f7c AK |
498 | } |
499 | ||
4068664e DM |
500 | static void ext4_cache_extents(struct inode *inode, |
501 | struct ext4_extent_header *eh) | |
502 | { | |
503 | struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); | |
504 | ext4_lblk_t prev = 0; | |
505 | int i; | |
506 | ||
507 | for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { | |
508 | unsigned int status = EXTENT_STATUS_WRITTEN; | |
509 | ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); | |
510 | int len = ext4_ext_get_actual_len(ex); | |
511 | ||
512 | if (prev && (prev != lblk)) | |
513 | ext4_es_cache_extent(inode, prev, lblk - prev, ~0, | |
514 | EXTENT_STATUS_HOLE); | |
515 | ||
516 | if (ext4_ext_is_unwritten(ex)) | |
517 | status = EXTENT_STATUS_UNWRITTEN; | |
518 | ext4_es_cache_extent(inode, lblk, len, | |
519 | ext4_ext_pblock(ex), status); | |
520 | prev = lblk + len; | |
521 | } | |
522 | } | |
523 | ||
7d7ea89e TT |
524 | static struct buffer_head * |
525 | __read_extent_tree_block(const char *function, unsigned int line, | |
9c6e0719 ZY |
526 | struct inode *inode, struct ext4_extent_idx *idx, |
527 | int depth, int flags) | |
f8489128 | 528 | { |
7d7ea89e TT |
529 | struct buffer_head *bh; |
530 | int err; | |
73c384c0 | 531 | gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS; |
9c6e0719 | 532 | ext4_fsblk_t pblk; |
73c384c0 TT |
533 | |
534 | if (flags & EXT4_EX_NOFAIL) | |
535 | gfp_flags |= __GFP_NOFAIL; | |
7d7ea89e | 536 | |
9c6e0719 | 537 | pblk = ext4_idx_pblock(idx); |
73c384c0 | 538 | bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags); |
7d7ea89e TT |
539 | if (unlikely(!bh)) |
540 | return ERR_PTR(-ENOMEM); | |
f8489128 | 541 | |
7d7ea89e TT |
542 | if (!bh_uptodate_or_lock(bh)) { |
543 | trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); | |
2d069c08 | 544 | err = ext4_read_bh(bh, 0, NULL); |
7d7ea89e TT |
545 | if (err < 0) |
546 | goto errout; | |
547 | } | |
7869a4a6 | 548 | if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) |
7d7ea89e | 549 | return bh; |
9c6e0719 ZY |
550 | err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh), |
551 | depth, pblk, le32_to_cpu(idx->ei_block)); | |
ce9f24cc JK |
552 | if (err) |
553 | goto errout; | |
f8489128 | 554 | set_buffer_verified(bh); |
107a7bd3 TT |
555 | /* |
556 | * If this is a leaf block, cache all of its entries | |
557 | */ | |
558 | if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { | |
559 | struct ext4_extent_header *eh = ext_block_hdr(bh); | |
4068664e | 560 | ext4_cache_extents(inode, eh); |
107a7bd3 | 561 | } |
7d7ea89e TT |
562 | return bh; |
563 | errout: | |
564 | put_bh(bh); | |
565 | return ERR_PTR(err); | |
566 | ||
f8489128 DW |
567 | } |
568 | ||
9c6e0719 ZY |
569 | #define read_extent_tree_block(inode, idx, depth, flags) \ |
570 | __read_extent_tree_block(__func__, __LINE__, (inode), (idx), \ | |
107a7bd3 | 571 | (depth), (flags)) |
f8489128 | 572 | |
7869a4a6 TT |
573 | /* |
574 | * This function is called to cache a file's extent information in the | |
575 | * extent status tree | |
576 | */ | |
577 | int ext4_ext_precache(struct inode *inode) | |
578 | { | |
579 | struct ext4_inode_info *ei = EXT4_I(inode); | |
580 | struct ext4_ext_path *path = NULL; | |
581 | struct buffer_head *bh; | |
582 | int i = 0, depth, ret = 0; | |
583 | ||
584 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | |
585 | return 0; /* not an extent-mapped inode */ | |
586 | ||
587 | down_read(&ei->i_data_sem); | |
588 | depth = ext_depth(inode); | |
589 | ||
2f424a5a RH |
590 | /* Don't cache anything if there are no external extent blocks */ |
591 | if (!depth) { | |
592 | up_read(&ei->i_data_sem); | |
593 | return ret; | |
594 | } | |
595 | ||
6396bb22 | 596 | path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), |
7869a4a6 TT |
597 | GFP_NOFS); |
598 | if (path == NULL) { | |
599 | up_read(&ei->i_data_sem); | |
600 | return -ENOMEM; | |
601 | } | |
602 | ||
7869a4a6 TT |
603 | path[0].p_hdr = ext_inode_hdr(inode); |
604 | ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); | |
605 | if (ret) | |
606 | goto out; | |
607 | path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); | |
608 | while (i >= 0) { | |
609 | /* | |
610 | * If this is a leaf block or we've reached the end of | |
611 | * the index block, go up | |
612 | */ | |
613 | if ((i == depth) || | |
614 | path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { | |
615 | brelse(path[i].p_bh); | |
616 | path[i].p_bh = NULL; | |
617 | i--; | |
618 | continue; | |
619 | } | |
9c6e0719 | 620 | bh = read_extent_tree_block(inode, path[i].p_idx++, |
7869a4a6 TT |
621 | depth - i - 1, |
622 | EXT4_EX_FORCE_CACHE); | |
623 | if (IS_ERR(bh)) { | |
624 | ret = PTR_ERR(bh); | |
625 | break; | |
626 | } | |
627 | i++; | |
628 | path[i].p_bh = bh; | |
629 | path[i].p_hdr = ext_block_hdr(bh); | |
630 | path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); | |
631 | } | |
632 | ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); | |
633 | out: | |
634 | up_read(&ei->i_data_sem); | |
635 | ext4_ext_drop_refs(path); | |
636 | kfree(path); | |
637 | return ret; | |
638 | } | |
639 | ||
a86c6181 AT |
640 | #ifdef EXT_DEBUG |
641 | static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) | |
642 | { | |
643 | int k, l = path->p_depth; | |
644 | ||
70aa1554 | 645 | ext_debug(inode, "path:"); |
a86c6181 AT |
646 | for (k = 0; k <= l; k++, path++) { |
647 | if (path->p_idx) { | |
70aa1554 | 648 | ext_debug(inode, " %d->%llu", |
6e89bbb7 EB |
649 | le32_to_cpu(path->p_idx->ei_block), |
650 | ext4_idx_pblock(path->p_idx)); | |
a86c6181 | 651 | } else if (path->p_ext) { |
70aa1554 | 652 | ext_debug(inode, " %d:[%d]%d:%llu ", |
a86c6181 | 653 | le32_to_cpu(path->p_ext->ee_block), |
556615dc | 654 | ext4_ext_is_unwritten(path->p_ext), |
a2df2a63 | 655 | ext4_ext_get_actual_len(path->p_ext), |
bf89d16f | 656 | ext4_ext_pblock(path->p_ext)); |
a86c6181 | 657 | } else |
70aa1554 | 658 | ext_debug(inode, " []"); |
a86c6181 | 659 | } |
70aa1554 | 660 | ext_debug(inode, "\n"); |
a86c6181 AT |
661 | } |
662 | ||
663 | static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) | |
664 | { | |
665 | int depth = ext_depth(inode); | |
666 | struct ext4_extent_header *eh; | |
667 | struct ext4_extent *ex; | |
668 | int i; | |
669 | ||
670 | if (!path) | |
671 | return; | |
672 | ||
673 | eh = path[depth].p_hdr; | |
674 | ex = EXT_FIRST_EXTENT(eh); | |
675 | ||
70aa1554 | 676 | ext_debug(inode, "Displaying leaf extents\n"); |
553f9008 | 677 | |
a86c6181 | 678 | for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { |
70aa1554 | 679 | ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), |
556615dc | 680 | ext4_ext_is_unwritten(ex), |
bf89d16f | 681 | ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); |
a86c6181 | 682 | } |
70aa1554 | 683 | ext_debug(inode, "\n"); |
a86c6181 | 684 | } |
1b16da77 YY |
685 | |
686 | static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, | |
687 | ext4_fsblk_t newblock, int level) | |
688 | { | |
689 | int depth = ext_depth(inode); | |
690 | struct ext4_extent *ex; | |
691 | ||
692 | if (depth != level) { | |
693 | struct ext4_extent_idx *idx; | |
694 | idx = path[level].p_idx; | |
695 | while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { | |
70aa1554 RH |
696 | ext_debug(inode, "%d: move %d:%llu in new index %llu\n", |
697 | level, le32_to_cpu(idx->ei_block), | |
698 | ext4_idx_pblock(idx), newblock); | |
1b16da77 YY |
699 | idx++; |
700 | } | |
701 | ||
702 | return; | |
703 | } | |
704 | ||
705 | ex = path[depth].p_ext; | |
706 | while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { | |
70aa1554 | 707 | ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n", |
1b16da77 YY |
708 | le32_to_cpu(ex->ee_block), |
709 | ext4_ext_pblock(ex), | |
556615dc | 710 | ext4_ext_is_unwritten(ex), |
1b16da77 YY |
711 | ext4_ext_get_actual_len(ex), |
712 | newblock); | |
713 | ex++; | |
714 | } | |
715 | } | |
716 | ||
a86c6181 | 717 | #else |
af5bc92d TT |
718 | #define ext4_ext_show_path(inode, path) |
719 | #define ext4_ext_show_leaf(inode, path) | |
1b16da77 | 720 | #define ext4_ext_show_move(inode, path, newblock, level) |
a86c6181 AT |
721 | #endif |
722 | ||
b35905c1 | 723 | void ext4_ext_drop_refs(struct ext4_ext_path *path) |
a86c6181 | 724 | { |
b7ea89ad | 725 | int depth, i; |
a86c6181 | 726 | |
b7ea89ad TT |
727 | if (!path) |
728 | return; | |
729 | depth = path->p_depth; | |
de745485 | 730 | for (i = 0; i <= depth; i++, path++) { |
e0f49d27 ME |
731 | brelse(path->p_bh); |
732 | path->p_bh = NULL; | |
de745485 | 733 | } |
a86c6181 AT |
734 | } |
735 | ||
736 | /* | |
d0d856e8 RD |
737 | * ext4_ext_binsearch_idx: |
738 | * binary search for the closest index of the given block | |
c29c0ae7 | 739 | * the header must be checked before calling this |
a86c6181 AT |
740 | */ |
741 | static void | |
725d26d3 AK |
742 | ext4_ext_binsearch_idx(struct inode *inode, |
743 | struct ext4_ext_path *path, ext4_lblk_t block) | |
a86c6181 AT |
744 | { |
745 | struct ext4_extent_header *eh = path->p_hdr; | |
746 | struct ext4_extent_idx *r, *l, *m; | |
747 | ||
a86c6181 | 748 | |
70aa1554 | 749 | ext_debug(inode, "binsearch for %u(idx): ", block); |
a86c6181 AT |
750 | |
751 | l = EXT_FIRST_INDEX(eh) + 1; | |
e9f410b1 | 752 | r = EXT_LAST_INDEX(eh); |
a86c6181 AT |
753 | while (l <= r) { |
754 | m = l + (r - l) / 2; | |
83c5688b | 755 | ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, |
756 | le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block), | |
757 | r, le32_to_cpu(r->ei_block)); | |
758 | ||
a86c6181 AT |
759 | if (block < le32_to_cpu(m->ei_block)) |
760 | r = m - 1; | |
761 | else | |
762 | l = m + 1; | |
a86c6181 AT |
763 | } |
764 | ||
765 | path->p_idx = l - 1; | |
70aa1554 | 766 | ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), |
bf89d16f | 767 | ext4_idx_pblock(path->p_idx)); |
a86c6181 AT |
768 | |
769 | #ifdef CHECK_BINSEARCH | |
770 | { | |
771 | struct ext4_extent_idx *chix, *ix; | |
772 | int k; | |
773 | ||
774 | chix = ix = EXT_FIRST_INDEX(eh); | |
775 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { | |
6e89bbb7 EB |
776 | if (k != 0 && le32_to_cpu(ix->ei_block) <= |
777 | le32_to_cpu(ix[-1].ei_block)) { | |
4776004f TT |
778 | printk(KERN_DEBUG "k=%d, ix=0x%p, " |
779 | "first=0x%p\n", k, | |
780 | ix, EXT_FIRST_INDEX(eh)); | |
781 | printk(KERN_DEBUG "%u <= %u\n", | |
a86c6181 AT |
782 | le32_to_cpu(ix->ei_block), |
783 | le32_to_cpu(ix[-1].ei_block)); | |
784 | } | |
785 | BUG_ON(k && le32_to_cpu(ix->ei_block) | |
8c55e204 | 786 | <= le32_to_cpu(ix[-1].ei_block)); |
a86c6181 AT |
787 | if (block < le32_to_cpu(ix->ei_block)) |
788 | break; | |
789 | chix = ix; | |
790 | } | |
791 | BUG_ON(chix != path->p_idx); | |
792 | } | |
793 | #endif | |
794 | ||
795 | } | |
796 | ||
797 | /* | |
d0d856e8 RD |
798 | * ext4_ext_binsearch: |
799 | * binary search for closest extent of the given block | |
c29c0ae7 | 800 | * the header must be checked before calling this |
a86c6181 AT |
801 | */ |
802 | static void | |
725d26d3 AK |
803 | ext4_ext_binsearch(struct inode *inode, |
804 | struct ext4_ext_path *path, ext4_lblk_t block) | |
a86c6181 AT |
805 | { |
806 | struct ext4_extent_header *eh = path->p_hdr; | |
807 | struct ext4_extent *r, *l, *m; | |
808 | ||
a86c6181 AT |
809 | if (eh->eh_entries == 0) { |
810 | /* | |
d0d856e8 RD |
811 | * this leaf is empty: |
812 | * we get such a leaf in split/add case | |
a86c6181 AT |
813 | */ |
814 | return; | |
815 | } | |
816 | ||
70aa1554 | 817 | ext_debug(inode, "binsearch for %u: ", block); |
a86c6181 AT |
818 | |
819 | l = EXT_FIRST_EXTENT(eh) + 1; | |
e9f410b1 | 820 | r = EXT_LAST_EXTENT(eh); |
a86c6181 AT |
821 | |
822 | while (l <= r) { | |
823 | m = l + (r - l) / 2; | |
83c5688b | 824 | ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, |
825 | le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block), | |
826 | r, le32_to_cpu(r->ee_block)); | |
827 | ||
a86c6181 AT |
828 | if (block < le32_to_cpu(m->ee_block)) |
829 | r = m - 1; | |
830 | else | |
831 | l = m + 1; | |
a86c6181 AT |
832 | } |
833 | ||
834 | path->p_ext = l - 1; | |
70aa1554 | 835 | ext_debug(inode, " -> %d:%llu:[%d]%d ", |
8c55e204 | 836 | le32_to_cpu(path->p_ext->ee_block), |
bf89d16f | 837 | ext4_ext_pblock(path->p_ext), |
556615dc | 838 | ext4_ext_is_unwritten(path->p_ext), |
a2df2a63 | 839 | ext4_ext_get_actual_len(path->p_ext)); |
a86c6181 AT |
840 | |
841 | #ifdef CHECK_BINSEARCH | |
842 | { | |
843 | struct ext4_extent *chex, *ex; | |
844 | int k; | |
845 | ||
846 | chex = ex = EXT_FIRST_EXTENT(eh); | |
847 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { | |
848 | BUG_ON(k && le32_to_cpu(ex->ee_block) | |
8c55e204 | 849 | <= le32_to_cpu(ex[-1].ee_block)); |
a86c6181 AT |
850 | if (block < le32_to_cpu(ex->ee_block)) |
851 | break; | |
852 | chex = ex; | |
853 | } | |
854 | BUG_ON(chex != path->p_ext); | |
855 | } | |
856 | #endif | |
857 | ||
858 | } | |
859 | ||
4209ae12 | 860 | void ext4_ext_tree_init(handle_t *handle, struct inode *inode) |
a86c6181 AT |
861 | { |
862 | struct ext4_extent_header *eh; | |
863 | ||
864 | eh = ext_inode_hdr(inode); | |
865 | eh->eh_depth = 0; | |
866 | eh->eh_entries = 0; | |
867 | eh->eh_magic = EXT4_EXT_MAGIC; | |
55ad63bf | 868 | eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); |
ce3aba43 | 869 | eh->eh_generation = 0; |
a86c6181 | 870 | ext4_mark_inode_dirty(handle, inode); |
a86c6181 AT |
871 | } |
872 | ||
873 | struct ext4_ext_path * | |
ed8a1a76 TT |
874 | ext4_find_extent(struct inode *inode, ext4_lblk_t block, |
875 | struct ext4_ext_path **orig_path, int flags) | |
a86c6181 AT |
876 | { |
877 | struct ext4_extent_header *eh; | |
878 | struct buffer_head *bh; | |
705912ca TT |
879 | struct ext4_ext_path *path = orig_path ? *orig_path : NULL; |
880 | short int depth, i, ppos = 0; | |
860d21e2 | 881 | int ret; |
73c384c0 TT |
882 | gfp_t gfp_flags = GFP_NOFS; |
883 | ||
884 | if (flags & EXT4_EX_NOFAIL) | |
885 | gfp_flags |= __GFP_NOFAIL; | |
a86c6181 AT |
886 | |
887 | eh = ext_inode_hdr(inode); | |
c29c0ae7 | 888 | depth = ext_depth(inode); |
bc890a60 TT |
889 | if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { |
890 | EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", | |
891 | depth); | |
892 | ret = -EFSCORRUPTED; | |
893 | goto err; | |
894 | } | |
a86c6181 | 895 | |
10809df8 | 896 | if (path) { |
523f431c | 897 | ext4_ext_drop_refs(path); |
10809df8 TT |
898 | if (depth > path[0].p_maxdepth) { |
899 | kfree(path); | |
900 | *orig_path = path = NULL; | |
901 | } | |
902 | } | |
903 | if (!path) { | |
523f431c | 904 | /* account possible depth increase */ |
6396bb22 | 905 | path = kcalloc(depth + 2, sizeof(struct ext4_ext_path), |
73c384c0 | 906 | gfp_flags); |
19008f6d | 907 | if (unlikely(!path)) |
a86c6181 | 908 | return ERR_PTR(-ENOMEM); |
10809df8 | 909 | path[0].p_maxdepth = depth + 1; |
a86c6181 | 910 | } |
a86c6181 | 911 | path[0].p_hdr = eh; |
1973adcb | 912 | path[0].p_bh = NULL; |
a86c6181 | 913 | |
c29c0ae7 | 914 | i = depth; |
4068664e DM |
915 | if (!(flags & EXT4_EX_NOCACHE) && depth == 0) |
916 | ext4_cache_extents(inode, eh); | |
a86c6181 AT |
917 | /* walk through the tree */ |
918 | while (i) { | |
70aa1554 | 919 | ext_debug(inode, "depth %d: num %d, max %d\n", |
a86c6181 | 920 | ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); |
c29c0ae7 | 921 | |
a86c6181 | 922 | ext4_ext_binsearch_idx(inode, path + ppos, block); |
bf89d16f | 923 | path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); |
a86c6181 AT |
924 | path[ppos].p_depth = i; |
925 | path[ppos].p_ext = NULL; | |
926 | ||
9c6e0719 | 927 | bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags); |
a1c83681 | 928 | if (IS_ERR(bh)) { |
7d7ea89e | 929 | ret = PTR_ERR(bh); |
a86c6181 | 930 | goto err; |
860d21e2 | 931 | } |
7d7ea89e | 932 | |
a86c6181 AT |
933 | eh = ext_block_hdr(bh); |
934 | ppos++; | |
a86c6181 AT |
935 | path[ppos].p_bh = bh; |
936 | path[ppos].p_hdr = eh; | |
a86c6181 AT |
937 | } |
938 | ||
939 | path[ppos].p_depth = i; | |
a86c6181 AT |
940 | path[ppos].p_ext = NULL; |
941 | path[ppos].p_idx = NULL; | |
942 | ||
a86c6181 AT |
943 | /* find extent */ |
944 | ext4_ext_binsearch(inode, path + ppos, block); | |
1973adcb SF |
945 | /* if not an empty leaf */ |
946 | if (path[ppos].p_ext) | |
bf89d16f | 947 | path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); |
a86c6181 AT |
948 | |
949 | ext4_ext_show_path(inode, path); | |
950 | ||
951 | return path; | |
952 | ||
953 | err: | |
954 | ext4_ext_drop_refs(path); | |
dfe50809 TT |
955 | kfree(path); |
956 | if (orig_path) | |
957 | *orig_path = NULL; | |
860d21e2 | 958 | return ERR_PTR(ret); |
a86c6181 AT |
959 | } |
960 | ||
961 | /* | |
d0d856e8 RD |
962 | * ext4_ext_insert_index: |
963 | * insert new index [@logical;@ptr] into the block at @curp; | |
964 | * check where to insert: before @curp or after @curp | |
a86c6181 | 965 | */ |
1f109d5a TT |
966 | static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, |
967 | struct ext4_ext_path *curp, | |
968 | int logical, ext4_fsblk_t ptr) | |
a86c6181 AT |
969 | { |
970 | struct ext4_extent_idx *ix; | |
971 | int len, err; | |
972 | ||
7e028976 AM |
973 | err = ext4_ext_get_access(handle, inode, curp); |
974 | if (err) | |
a86c6181 AT |
975 | return err; |
976 | ||
273df556 FM |
977 | if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { |
978 | EXT4_ERROR_INODE(inode, | |
979 | "logical %d == ei_block %d!", | |
980 | logical, le32_to_cpu(curp->p_idx->ei_block)); | |
6a797d27 | 981 | return -EFSCORRUPTED; |
273df556 | 982 | } |
d4620315 RD |
983 | |
984 | if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) | |
985 | >= le16_to_cpu(curp->p_hdr->eh_max))) { | |
986 | EXT4_ERROR_INODE(inode, | |
987 | "eh_entries %d >= eh_max %d!", | |
988 | le16_to_cpu(curp->p_hdr->eh_entries), | |
989 | le16_to_cpu(curp->p_hdr->eh_max)); | |
6a797d27 | 990 | return -EFSCORRUPTED; |
d4620315 RD |
991 | } |
992 | ||
a86c6181 AT |
993 | if (logical > le32_to_cpu(curp->p_idx->ei_block)) { |
994 | /* insert after */ | |
70aa1554 RH |
995 | ext_debug(inode, "insert new index %d after: %llu\n", |
996 | logical, ptr); | |
a86c6181 AT |
997 | ix = curp->p_idx + 1; |
998 | } else { | |
999 | /* insert before */ | |
70aa1554 RH |
1000 | ext_debug(inode, "insert new index %d before: %llu\n", |
1001 | logical, ptr); | |
a86c6181 AT |
1002 | ix = curp->p_idx; |
1003 | } | |
1004 | ||
80e675f9 EG |
1005 | len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; |
1006 | BUG_ON(len < 0); | |
1007 | if (len > 0) { | |
70aa1554 | 1008 | ext_debug(inode, "insert new index %d: " |
80e675f9 EG |
1009 | "move %d indices from 0x%p to 0x%p\n", |
1010 | logical, len, ix, ix + 1); | |
1011 | memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); | |
1012 | } | |
1013 | ||
f472e026 TM |
1014 | if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { |
1015 | EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); | |
6a797d27 | 1016 | return -EFSCORRUPTED; |
f472e026 TM |
1017 | } |
1018 | ||
a86c6181 | 1019 | ix->ei_block = cpu_to_le32(logical); |
f65e6fba | 1020 | ext4_idx_store_pblock(ix, ptr); |
e8546d06 | 1021 | le16_add_cpu(&curp->p_hdr->eh_entries, 1); |
a86c6181 | 1022 | |
273df556 FM |
1023 | if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { |
1024 | EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); | |
6a797d27 | 1025 | return -EFSCORRUPTED; |
273df556 | 1026 | } |
a86c6181 AT |
1027 | |
1028 | err = ext4_ext_dirty(handle, inode, curp); | |
1029 | ext4_std_error(inode->i_sb, err); | |
1030 | ||
1031 | return err; | |
1032 | } | |
1033 | ||
1034 | /* | |
d0d856e8 RD |
1035 | * ext4_ext_split: |
1036 | * inserts new subtree into the path, using free index entry | |
1037 | * at depth @at: | |
1038 | * - allocates all needed blocks (new leaf and all intermediate index blocks) | |
1039 | * - makes decision where to split | |
1040 | * - moves remaining extents and index entries (right to the split point) | |
1041 | * into the newly allocated blocks | |
1042 | * - initializes subtree | |
a86c6181 AT |
1043 | */ |
1044 | static int ext4_ext_split(handle_t *handle, struct inode *inode, | |
55f020db AH |
1045 | unsigned int flags, |
1046 | struct ext4_ext_path *path, | |
1047 | struct ext4_extent *newext, int at) | |
a86c6181 AT |
1048 | { |
1049 | struct buffer_head *bh = NULL; | |
1050 | int depth = ext_depth(inode); | |
1051 | struct ext4_extent_header *neh; | |
1052 | struct ext4_extent_idx *fidx; | |
a86c6181 | 1053 | int i = at, k, m, a; |
f65e6fba | 1054 | ext4_fsblk_t newblock, oldblock; |
a86c6181 | 1055 | __le32 border; |
f65e6fba | 1056 | ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ |
73c384c0 | 1057 | gfp_t gfp_flags = GFP_NOFS; |
a86c6181 | 1058 | int err = 0; |
592acbf1 | 1059 | size_t ext_size = 0; |
a86c6181 | 1060 | |
73c384c0 TT |
1061 | if (flags & EXT4_EX_NOFAIL) |
1062 | gfp_flags |= __GFP_NOFAIL; | |
1063 | ||
a86c6181 | 1064 | /* make decision: where to split? */ |
d0d856e8 | 1065 | /* FIXME: now decision is simplest: at current extent */ |
a86c6181 | 1066 | |
d0d856e8 | 1067 | /* if current leaf will be split, then we should use |
a86c6181 | 1068 | * border from split point */ |
273df556 FM |
1069 | if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { |
1070 | EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); | |
6a797d27 | 1071 | return -EFSCORRUPTED; |
273df556 | 1072 | } |
a86c6181 AT |
1073 | if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { |
1074 | border = path[depth].p_ext[1].ee_block; | |
70aa1554 | 1075 | ext_debug(inode, "leaf will be split." |
a86c6181 | 1076 | " next leaf starts at %d\n", |
8c55e204 | 1077 | le32_to_cpu(border)); |
a86c6181 AT |
1078 | } else { |
1079 | border = newext->ee_block; | |
70aa1554 | 1080 | ext_debug(inode, "leaf will be added." |
a86c6181 | 1081 | " next leaf starts at %d\n", |
8c55e204 | 1082 | le32_to_cpu(border)); |
a86c6181 AT |
1083 | } |
1084 | ||
1085 | /* | |
d0d856e8 RD |
1086 | * If error occurs, then we break processing |
1087 | * and mark filesystem read-only. index won't | |
a86c6181 | 1088 | * be inserted and tree will be in consistent |
d0d856e8 | 1089 | * state. Next mount will repair buffers too. |
a86c6181 AT |
1090 | */ |
1091 | ||
1092 | /* | |
d0d856e8 RD |
1093 | * Get array to track all allocated blocks. |
1094 | * We need this to handle errors and free blocks | |
1095 | * upon them. | |
a86c6181 | 1096 | */ |
73c384c0 | 1097 | ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags); |
a86c6181 AT |
1098 | if (!ablocks) |
1099 | return -ENOMEM; | |
a86c6181 AT |
1100 | |
1101 | /* allocate all needed blocks */ | |
70aa1554 | 1102 | ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at); |
a86c6181 | 1103 | for (a = 0; a < depth - at; a++) { |
654b4908 | 1104 | newblock = ext4_ext_new_meta_block(handle, inode, path, |
55f020db | 1105 | newext, &err, flags); |
a86c6181 AT |
1106 | if (newblock == 0) |
1107 | goto cleanup; | |
1108 | ablocks[a] = newblock; | |
1109 | } | |
1110 | ||
1111 | /* initialize new leaf */ | |
1112 | newblock = ablocks[--a]; | |
273df556 FM |
1113 | if (unlikely(newblock == 0)) { |
1114 | EXT4_ERROR_INODE(inode, "newblock == 0!"); | |
6a797d27 | 1115 | err = -EFSCORRUPTED; |
273df556 FM |
1116 | goto cleanup; |
1117 | } | |
c45653c3 | 1118 | bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); |
aebf0243 | 1119 | if (unlikely(!bh)) { |
860d21e2 | 1120 | err = -ENOMEM; |
a86c6181 AT |
1121 | goto cleanup; |
1122 | } | |
1123 | lock_buffer(bh); | |
1124 | ||
188c299e JK |
1125 | err = ext4_journal_get_create_access(handle, inode->i_sb, bh, |
1126 | EXT4_JTR_NONE); | |
7e028976 | 1127 | if (err) |
a86c6181 AT |
1128 | goto cleanup; |
1129 | ||
1130 | neh = ext_block_hdr(bh); | |
1131 | neh->eh_entries = 0; | |
55ad63bf | 1132 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
a86c6181 AT |
1133 | neh->eh_magic = EXT4_EXT_MAGIC; |
1134 | neh->eh_depth = 0; | |
ce3aba43 | 1135 | neh->eh_generation = 0; |
a86c6181 | 1136 | |
d0d856e8 | 1137 | /* move remainder of path[depth] to the new leaf */ |
273df556 FM |
1138 | if (unlikely(path[depth].p_hdr->eh_entries != |
1139 | path[depth].p_hdr->eh_max)) { | |
1140 | EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", | |
1141 | path[depth].p_hdr->eh_entries, | |
1142 | path[depth].p_hdr->eh_max); | |
6a797d27 | 1143 | err = -EFSCORRUPTED; |
273df556 FM |
1144 | goto cleanup; |
1145 | } | |
a86c6181 | 1146 | /* start copy from next extent */ |
1b16da77 YY |
1147 | m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; |
1148 | ext4_ext_show_move(inode, path, newblock, depth); | |
a86c6181 | 1149 | if (m) { |
1b16da77 YY |
1150 | struct ext4_extent *ex; |
1151 | ex = EXT_FIRST_EXTENT(neh); | |
1152 | memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); | |
e8546d06 | 1153 | le16_add_cpu(&neh->eh_entries, m); |
a86c6181 AT |
1154 | } |
1155 | ||
592acbf1 SR |
1156 | /* zero out unused area in the extent block */ |
1157 | ext_size = sizeof(struct ext4_extent_header) + | |
1158 | sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries); | |
1159 | memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); | |
7ac5990d | 1160 | ext4_extent_block_csum_set(inode, neh); |
a86c6181 AT |
1161 | set_buffer_uptodate(bh); |
1162 | unlock_buffer(bh); | |
1163 | ||
0390131b | 1164 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 1165 | if (err) |
a86c6181 AT |
1166 | goto cleanup; |
1167 | brelse(bh); | |
1168 | bh = NULL; | |
1169 | ||
1170 | /* correct old leaf */ | |
1171 | if (m) { | |
7e028976 AM |
1172 | err = ext4_ext_get_access(handle, inode, path + depth); |
1173 | if (err) | |
a86c6181 | 1174 | goto cleanup; |
e8546d06 | 1175 | le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); |
7e028976 AM |
1176 | err = ext4_ext_dirty(handle, inode, path + depth); |
1177 | if (err) | |
a86c6181 AT |
1178 | goto cleanup; |
1179 | ||
1180 | } | |
1181 | ||
1182 | /* create intermediate indexes */ | |
1183 | k = depth - at - 1; | |
273df556 FM |
1184 | if (unlikely(k < 0)) { |
1185 | EXT4_ERROR_INODE(inode, "k %d < 0!", k); | |
6a797d27 | 1186 | err = -EFSCORRUPTED; |
273df556 FM |
1187 | goto cleanup; |
1188 | } | |
a86c6181 | 1189 | if (k) |
70aa1554 | 1190 | ext_debug(inode, "create %d intermediate indices\n", k); |
a86c6181 AT |
1191 | /* insert new index into current index block */ |
1192 | /* current depth stored in i var */ | |
1193 | i = depth - 1; | |
1194 | while (k--) { | |
1195 | oldblock = newblock; | |
1196 | newblock = ablocks[--a]; | |
bba90743 | 1197 | bh = sb_getblk(inode->i_sb, newblock); |
aebf0243 | 1198 | if (unlikely(!bh)) { |
860d21e2 | 1199 | err = -ENOMEM; |
a86c6181 AT |
1200 | goto cleanup; |
1201 | } | |
1202 | lock_buffer(bh); | |
1203 | ||
188c299e JK |
1204 | err = ext4_journal_get_create_access(handle, inode->i_sb, bh, |
1205 | EXT4_JTR_NONE); | |
7e028976 | 1206 | if (err) |
a86c6181 AT |
1207 | goto cleanup; |
1208 | ||
1209 | neh = ext_block_hdr(bh); | |
1210 | neh->eh_entries = cpu_to_le16(1); | |
1211 | neh->eh_magic = EXT4_EXT_MAGIC; | |
55ad63bf | 1212 | neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
a86c6181 | 1213 | neh->eh_depth = cpu_to_le16(depth - i); |
ce3aba43 | 1214 | neh->eh_generation = 0; |
a86c6181 AT |
1215 | fidx = EXT_FIRST_INDEX(neh); |
1216 | fidx->ei_block = border; | |
f65e6fba | 1217 | ext4_idx_store_pblock(fidx, oldblock); |
a86c6181 | 1218 | |
70aa1554 | 1219 | ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n", |
bba90743 | 1220 | i, newblock, le32_to_cpu(border), oldblock); |
a86c6181 | 1221 | |
1b16da77 | 1222 | /* move remainder of path[i] to the new index block */ |
273df556 FM |
1223 | if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != |
1224 | EXT_LAST_INDEX(path[i].p_hdr))) { | |
1225 | EXT4_ERROR_INODE(inode, | |
1226 | "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", | |
1227 | le32_to_cpu(path[i].p_ext->ee_block)); | |
6a797d27 | 1228 | err = -EFSCORRUPTED; |
273df556 FM |
1229 | goto cleanup; |
1230 | } | |
1b16da77 YY |
1231 | /* start copy indexes */ |
1232 | m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; | |
70aa1554 | 1233 | ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx, |
1b16da77 YY |
1234 | EXT_MAX_INDEX(path[i].p_hdr)); |
1235 | ext4_ext_show_move(inode, path, newblock, i); | |
a86c6181 | 1236 | if (m) { |
1b16da77 | 1237 | memmove(++fidx, path[i].p_idx, |
a86c6181 | 1238 | sizeof(struct ext4_extent_idx) * m); |
e8546d06 | 1239 | le16_add_cpu(&neh->eh_entries, m); |
a86c6181 | 1240 | } |
592acbf1 SR |
1241 | /* zero out unused area in the extent block */ |
1242 | ext_size = sizeof(struct ext4_extent_header) + | |
1243 | (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries)); | |
1244 | memset(bh->b_data + ext_size, 0, | |
1245 | inode->i_sb->s_blocksize - ext_size); | |
7ac5990d | 1246 | ext4_extent_block_csum_set(inode, neh); |
a86c6181 AT |
1247 | set_buffer_uptodate(bh); |
1248 | unlock_buffer(bh); | |
1249 | ||
0390131b | 1250 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 1251 | if (err) |
a86c6181 AT |
1252 | goto cleanup; |
1253 | brelse(bh); | |
1254 | bh = NULL; | |
1255 | ||
1256 | /* correct old index */ | |
1257 | if (m) { | |
1258 | err = ext4_ext_get_access(handle, inode, path + i); | |
1259 | if (err) | |
1260 | goto cleanup; | |
e8546d06 | 1261 | le16_add_cpu(&path[i].p_hdr->eh_entries, -m); |
a86c6181 AT |
1262 | err = ext4_ext_dirty(handle, inode, path + i); |
1263 | if (err) | |
1264 | goto cleanup; | |
1265 | } | |
1266 | ||
1267 | i--; | |
1268 | } | |
1269 | ||
1270 | /* insert new index */ | |
a86c6181 AT |
1271 | err = ext4_ext_insert_index(handle, inode, path + at, |
1272 | le32_to_cpu(border), newblock); | |
1273 | ||
1274 | cleanup: | |
1275 | if (bh) { | |
1276 | if (buffer_locked(bh)) | |
1277 | unlock_buffer(bh); | |
1278 | brelse(bh); | |
1279 | } | |
1280 | ||
1281 | if (err) { | |
1282 | /* free all allocated blocks in error case */ | |
1283 | for (i = 0; i < depth; i++) { | |
1284 | if (!ablocks[i]) | |
1285 | continue; | |
7dc57615 | 1286 | ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, |
e6362609 | 1287 | EXT4_FREE_BLOCKS_METADATA); |
a86c6181 AT |
1288 | } |
1289 | } | |
1290 | kfree(ablocks); | |
1291 | ||
1292 | return err; | |
1293 | } | |
1294 | ||
1295 | /* | |
d0d856e8 RD |
1296 | * ext4_ext_grow_indepth: |
1297 | * implements tree growing procedure: | |
1298 | * - allocates new block | |
1299 | * - moves top-level data (index block or leaf) into the new block | |
1300 | * - initializes new top-level, creating index that points to the | |
1301 | * just created block | |
a86c6181 AT |
1302 | */ |
1303 | static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, | |
be5cd90d | 1304 | unsigned int flags) |
a86c6181 | 1305 | { |
a86c6181 | 1306 | struct ext4_extent_header *neh; |
a86c6181 | 1307 | struct buffer_head *bh; |
be5cd90d DM |
1308 | ext4_fsblk_t newblock, goal = 0; |
1309 | struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; | |
a86c6181 | 1310 | int err = 0; |
592acbf1 | 1311 | size_t ext_size = 0; |
a86c6181 | 1312 | |
be5cd90d DM |
1313 | /* Try to prepend new index to old one */ |
1314 | if (ext_depth(inode)) | |
1315 | goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); | |
1316 | if (goal > le32_to_cpu(es->s_first_data_block)) { | |
1317 | flags |= EXT4_MB_HINT_TRY_GOAL; | |
1318 | goal--; | |
1319 | } else | |
1320 | goal = ext4_inode_to_goal_block(inode); | |
1321 | newblock = ext4_new_meta_blocks(handle, inode, goal, flags, | |
1322 | NULL, &err); | |
a86c6181 AT |
1323 | if (newblock == 0) |
1324 | return err; | |
1325 | ||
c45653c3 | 1326 | bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); |
aebf0243 | 1327 | if (unlikely(!bh)) |
860d21e2 | 1328 | return -ENOMEM; |
a86c6181 AT |
1329 | lock_buffer(bh); |
1330 | ||
188c299e JK |
1331 | err = ext4_journal_get_create_access(handle, inode->i_sb, bh, |
1332 | EXT4_JTR_NONE); | |
7e028976 | 1333 | if (err) { |
a86c6181 AT |
1334 | unlock_buffer(bh); |
1335 | goto out; | |
1336 | } | |
1337 | ||
592acbf1 | 1338 | ext_size = sizeof(EXT4_I(inode)->i_data); |
a86c6181 | 1339 | /* move top-level index/leaf into new block */ |
592acbf1 SR |
1340 | memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size); |
1341 | /* zero out unused area in the extent block */ | |
1342 | memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); | |
a86c6181 AT |
1343 | |
1344 | /* set size of new block */ | |
1345 | neh = ext_block_hdr(bh); | |
1346 | /* old root could have indexes or leaves | |
1347 | * so calculate e_max right way */ | |
1348 | if (ext_depth(inode)) | |
55ad63bf | 1349 | neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
a86c6181 | 1350 | else |
55ad63bf | 1351 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
a86c6181 | 1352 | neh->eh_magic = EXT4_EXT_MAGIC; |
7ac5990d | 1353 | ext4_extent_block_csum_set(inode, neh); |
a86c6181 | 1354 | set_buffer_uptodate(bh); |
0caaefba | 1355 | set_buffer_verified(bh); |
a86c6181 AT |
1356 | unlock_buffer(bh); |
1357 | ||
0390131b | 1358 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 1359 | if (err) |
a86c6181 AT |
1360 | goto out; |
1361 | ||
1939dd84 | 1362 | /* Update top-level index: num,max,pointer */ |
a86c6181 | 1363 | neh = ext_inode_hdr(inode); |
1939dd84 DM |
1364 | neh->eh_entries = cpu_to_le16(1); |
1365 | ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); | |
1366 | if (neh->eh_depth == 0) { | |
1367 | /* Root extent block becomes index block */ | |
1368 | neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); | |
1369 | EXT_FIRST_INDEX(neh)->ei_block = | |
1370 | EXT_FIRST_EXTENT(neh)->ee_block; | |
1371 | } | |
70aa1554 | 1372 | ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n", |
a86c6181 | 1373 | le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), |
5a0790c2 | 1374 | le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), |
bf89d16f | 1375 | ext4_idx_pblock(EXT_FIRST_INDEX(neh))); |
a86c6181 | 1376 | |
ba39ebb6 | 1377 | le16_add_cpu(&neh->eh_depth, 1); |
4209ae12 | 1378 | err = ext4_mark_inode_dirty(handle, inode); |
a86c6181 AT |
1379 | out: |
1380 | brelse(bh); | |
1381 | ||
1382 | return err; | |
1383 | } | |
1384 | ||
1385 | /* | |
d0d856e8 RD |
1386 | * ext4_ext_create_new_leaf: |
1387 | * finds empty index and adds new leaf. | |
1388 | * if no free index is found, then it requests in-depth growing. | |
a86c6181 AT |
1389 | */ |
1390 | static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, | |
107a7bd3 TT |
1391 | unsigned int mb_flags, |
1392 | unsigned int gb_flags, | |
dfe50809 | 1393 | struct ext4_ext_path **ppath, |
55f020db | 1394 | struct ext4_extent *newext) |
a86c6181 | 1395 | { |
dfe50809 | 1396 | struct ext4_ext_path *path = *ppath; |
a86c6181 AT |
1397 | struct ext4_ext_path *curp; |
1398 | int depth, i, err = 0; | |
1399 | ||
1400 | repeat: | |
1401 | i = depth = ext_depth(inode); | |
1402 | ||
1403 | /* walk up to the tree and look for free index entry */ | |
1404 | curp = path + depth; | |
1405 | while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { | |
1406 | i--; | |
1407 | curp--; | |
1408 | } | |
1409 | ||
d0d856e8 RD |
1410 | /* we use already allocated block for index block, |
1411 | * so subsequent data blocks should be contiguous */ | |
a86c6181 AT |
1412 | if (EXT_HAS_FREE_INDEX(curp)) { |
1413 | /* if we found index with free entry, then use that | |
1414 | * entry: create all needed subtree and add new leaf */ | |
107a7bd3 | 1415 | err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); |
787e0981 SF |
1416 | if (err) |
1417 | goto out; | |
a86c6181 AT |
1418 | |
1419 | /* refill path */ | |
ed8a1a76 | 1420 | path = ext4_find_extent(inode, |
725d26d3 | 1421 | (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
dfe50809 | 1422 | ppath, gb_flags); |
a86c6181 AT |
1423 | if (IS_ERR(path)) |
1424 | err = PTR_ERR(path); | |
1425 | } else { | |
1426 | /* tree is full, time to grow in depth */ | |
be5cd90d | 1427 | err = ext4_ext_grow_indepth(handle, inode, mb_flags); |
a86c6181 AT |
1428 | if (err) |
1429 | goto out; | |
1430 | ||
1431 | /* refill path */ | |
ed8a1a76 | 1432 | path = ext4_find_extent(inode, |
725d26d3 | 1433 | (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
dfe50809 | 1434 | ppath, gb_flags); |
a86c6181 AT |
1435 | if (IS_ERR(path)) { |
1436 | err = PTR_ERR(path); | |
1437 | goto out; | |
1438 | } | |
1439 | ||
1440 | /* | |
d0d856e8 RD |
1441 | * only first (depth 0 -> 1) produces free space; |
1442 | * in all other cases we have to split the grown tree | |
a86c6181 AT |
1443 | */ |
1444 | depth = ext_depth(inode); | |
1445 | if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { | |
d0d856e8 | 1446 | /* now we need to split */ |
a86c6181 AT |
1447 | goto repeat; |
1448 | } | |
1449 | } | |
1450 | ||
1451 | out: | |
1452 | return err; | |
1453 | } | |
1454 | ||
1988b51e AT |
1455 | /* |
1456 | * search the closest allocated block to the left for *logical | |
1457 | * and returns it at @logical + it's physical address at @phys | |
1458 | * if *logical is the smallest allocated block, the function | |
1459 | * returns 0 at @phys | |
1460 | * return value contains 0 (success) or error code | |
1461 | */ | |
1f109d5a TT |
1462 | static int ext4_ext_search_left(struct inode *inode, |
1463 | struct ext4_ext_path *path, | |
1464 | ext4_lblk_t *logical, ext4_fsblk_t *phys) | |
1988b51e AT |
1465 | { |
1466 | struct ext4_extent_idx *ix; | |
1467 | struct ext4_extent *ex; | |
b939e376 | 1468 | int depth, ee_len; |
1988b51e | 1469 | |
273df556 FM |
1470 | if (unlikely(path == NULL)) { |
1471 | EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); | |
6a797d27 | 1472 | return -EFSCORRUPTED; |
273df556 | 1473 | } |
1988b51e AT |
1474 | depth = path->p_depth; |
1475 | *phys = 0; | |
1476 | ||
1477 | if (depth == 0 && path->p_ext == NULL) | |
1478 | return 0; | |
1479 | ||
1480 | /* usually extent in the path covers blocks smaller | |
1481 | * then *logical, but it can be that extent is the | |
1482 | * first one in the file */ | |
1483 | ||
1484 | ex = path[depth].p_ext; | |
b939e376 | 1485 | ee_len = ext4_ext_get_actual_len(ex); |
1988b51e | 1486 | if (*logical < le32_to_cpu(ex->ee_block)) { |
273df556 FM |
1487 | if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
1488 | EXT4_ERROR_INODE(inode, | |
1489 | "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", | |
1490 | *logical, le32_to_cpu(ex->ee_block)); | |
6a797d27 | 1491 | return -EFSCORRUPTED; |
273df556 | 1492 | } |
1988b51e AT |
1493 | while (--depth >= 0) { |
1494 | ix = path[depth].p_idx; | |
273df556 FM |
1495 | if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
1496 | EXT4_ERROR_INODE(inode, | |
1497 | "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", | |
6ee3b212 | 1498 | ix != NULL ? le32_to_cpu(ix->ei_block) : 0, |
037e7c52 | 1499 | le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block), |
273df556 | 1500 | depth); |
6a797d27 | 1501 | return -EFSCORRUPTED; |
273df556 | 1502 | } |
1988b51e AT |
1503 | } |
1504 | return 0; | |
1505 | } | |
1506 | ||
273df556 FM |
1507 | if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
1508 | EXT4_ERROR_INODE(inode, | |
1509 | "logical %d < ee_block %d + ee_len %d!", | |
1510 | *logical, le32_to_cpu(ex->ee_block), ee_len); | |
6a797d27 | 1511 | return -EFSCORRUPTED; |
273df556 | 1512 | } |
1988b51e | 1513 | |
b939e376 | 1514 | *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; |
bf89d16f | 1515 | *phys = ext4_ext_pblock(ex) + ee_len - 1; |
1988b51e AT |
1516 | return 0; |
1517 | } | |
1518 | ||
1519 | /* | |
d7dce9e0 | 1520 | * Search the closest allocated block to the right for *logical |
1521 | * and returns it at @logical + it's physical address at @phys. | |
1522 | * If not exists, return 0 and @phys is set to 0. We will return | |
1523 | * 1 which means we found an allocated block and ret_ex is valid. | |
1524 | * Or return a (< 0) error code. | |
1988b51e | 1525 | */ |
1f109d5a TT |
1526 | static int ext4_ext_search_right(struct inode *inode, |
1527 | struct ext4_ext_path *path, | |
4d33b1ef | 1528 | ext4_lblk_t *logical, ext4_fsblk_t *phys, |
d7dce9e0 | 1529 | struct ext4_extent *ret_ex) |
1988b51e AT |
1530 | { |
1531 | struct buffer_head *bh = NULL; | |
1532 | struct ext4_extent_header *eh; | |
1533 | struct ext4_extent_idx *ix; | |
1534 | struct ext4_extent *ex; | |
395a87bf ES |
1535 | int depth; /* Note, NOT eh_depth; depth from top of tree */ |
1536 | int ee_len; | |
1988b51e | 1537 | |
273df556 FM |
1538 | if (unlikely(path == NULL)) { |
1539 | EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); | |
6a797d27 | 1540 | return -EFSCORRUPTED; |
273df556 | 1541 | } |
1988b51e AT |
1542 | depth = path->p_depth; |
1543 | *phys = 0; | |
1544 | ||
1545 | if (depth == 0 && path->p_ext == NULL) | |
1546 | return 0; | |
1547 | ||
1548 | /* usually extent in the path covers blocks smaller | |
1549 | * then *logical, but it can be that extent is the | |
1550 | * first one in the file */ | |
1551 | ||
1552 | ex = path[depth].p_ext; | |
b939e376 | 1553 | ee_len = ext4_ext_get_actual_len(ex); |
1988b51e | 1554 | if (*logical < le32_to_cpu(ex->ee_block)) { |
273df556 FM |
1555 | if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
1556 | EXT4_ERROR_INODE(inode, | |
1557 | "first_extent(path[%d].p_hdr) != ex", | |
1558 | depth); | |
6a797d27 | 1559 | return -EFSCORRUPTED; |
273df556 | 1560 | } |
1988b51e AT |
1561 | while (--depth >= 0) { |
1562 | ix = path[depth].p_idx; | |
273df556 FM |
1563 | if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
1564 | EXT4_ERROR_INODE(inode, | |
1565 | "ix != EXT_FIRST_INDEX *logical %d!", | |
1566 | *logical); | |
6a797d27 | 1567 | return -EFSCORRUPTED; |
273df556 | 1568 | } |
1988b51e | 1569 | } |
4d33b1ef | 1570 | goto found_extent; |
1988b51e AT |
1571 | } |
1572 | ||
273df556 FM |
1573 | if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
1574 | EXT4_ERROR_INODE(inode, | |
1575 | "logical %d < ee_block %d + ee_len %d!", | |
1576 | *logical, le32_to_cpu(ex->ee_block), ee_len); | |
6a797d27 | 1577 | return -EFSCORRUPTED; |
273df556 | 1578 | } |
1988b51e AT |
1579 | |
1580 | if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { | |
1581 | /* next allocated block in this leaf */ | |
1582 | ex++; | |
4d33b1ef | 1583 | goto found_extent; |
1988b51e AT |
1584 | } |
1585 | ||
1586 | /* go up and search for index to the right */ | |
1587 | while (--depth >= 0) { | |
1588 | ix = path[depth].p_idx; | |
1589 | if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) | |
25f1ee3a | 1590 | goto got_index; |
1988b51e AT |
1591 | } |
1592 | ||
25f1ee3a WF |
1593 | /* we've gone up to the root and found no index to the right */ |
1594 | return 0; | |
1988b51e | 1595 | |
25f1ee3a | 1596 | got_index: |
1988b51e AT |
1597 | /* we've found index to the right, let's |
1598 | * follow it and find the closest allocated | |
1599 | * block to the right */ | |
1600 | ix++; | |
1988b51e | 1601 | while (++depth < path->p_depth) { |
395a87bf | 1602 | /* subtract from p_depth to get proper eh_depth */ |
9c6e0719 | 1603 | bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); |
7d7ea89e TT |
1604 | if (IS_ERR(bh)) |
1605 | return PTR_ERR(bh); | |
1606 | eh = ext_block_hdr(bh); | |
1988b51e | 1607 | ix = EXT_FIRST_INDEX(eh); |
1988b51e AT |
1608 | put_bh(bh); |
1609 | } | |
1610 | ||
9c6e0719 | 1611 | bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); |
7d7ea89e TT |
1612 | if (IS_ERR(bh)) |
1613 | return PTR_ERR(bh); | |
1988b51e | 1614 | eh = ext_block_hdr(bh); |
1988b51e | 1615 | ex = EXT_FIRST_EXTENT(eh); |
4d33b1ef | 1616 | found_extent: |
1988b51e | 1617 | *logical = le32_to_cpu(ex->ee_block); |
bf89d16f | 1618 | *phys = ext4_ext_pblock(ex); |
d7dce9e0 | 1619 | if (ret_ex) |
1620 | *ret_ex = *ex; | |
4d33b1ef TT |
1621 | if (bh) |
1622 | put_bh(bh); | |
d7dce9e0 | 1623 | return 1; |
1988b51e AT |
1624 | } |
1625 | ||
a86c6181 | 1626 | /* |
d0d856e8 | 1627 | * ext4_ext_next_allocated_block: |
f17722f9 | 1628 | * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. |
d0d856e8 RD |
1629 | * NOTE: it considers block number from index entry as |
1630 | * allocated block. Thus, index entries have to be consistent | |
1631 | * with leaves. | |
a86c6181 | 1632 | */ |
fcf6b1b7 | 1633 | ext4_lblk_t |
a86c6181 AT |
1634 | ext4_ext_next_allocated_block(struct ext4_ext_path *path) |
1635 | { | |
1636 | int depth; | |
1637 | ||
1638 | BUG_ON(path == NULL); | |
1639 | depth = path->p_depth; | |
1640 | ||
1641 | if (depth == 0 && path->p_ext == NULL) | |
f17722f9 | 1642 | return EXT_MAX_BLOCKS; |
a86c6181 AT |
1643 | |
1644 | while (depth >= 0) { | |
6e89bbb7 EB |
1645 | struct ext4_ext_path *p = &path[depth]; |
1646 | ||
a86c6181 AT |
1647 | if (depth == path->p_depth) { |
1648 | /* leaf */ | |
6e89bbb7 EB |
1649 | if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr)) |
1650 | return le32_to_cpu(p->p_ext[1].ee_block); | |
a86c6181 AT |
1651 | } else { |
1652 | /* index */ | |
6e89bbb7 EB |
1653 | if (p->p_idx != EXT_LAST_INDEX(p->p_hdr)) |
1654 | return le32_to_cpu(p->p_idx[1].ei_block); | |
a86c6181 AT |
1655 | } |
1656 | depth--; | |
1657 | } | |
1658 | ||
f17722f9 | 1659 | return EXT_MAX_BLOCKS; |
a86c6181 AT |
1660 | } |
1661 | ||
1662 | /* | |
d0d856e8 | 1663 | * ext4_ext_next_leaf_block: |
f17722f9 | 1664 | * returns first allocated block from next leaf or EXT_MAX_BLOCKS |
a86c6181 | 1665 | */ |
5718789d | 1666 | static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) |
a86c6181 AT |
1667 | { |
1668 | int depth; | |
1669 | ||
1670 | BUG_ON(path == NULL); | |
1671 | depth = path->p_depth; | |
1672 | ||
1673 | /* zero-tree has no leaf blocks at all */ | |
1674 | if (depth == 0) | |
f17722f9 | 1675 | return EXT_MAX_BLOCKS; |
a86c6181 AT |
1676 | |
1677 | /* go to index block */ | |
1678 | depth--; | |
1679 | ||
1680 | while (depth >= 0) { | |
1681 | if (path[depth].p_idx != | |
1682 | EXT_LAST_INDEX(path[depth].p_hdr)) | |
725d26d3 AK |
1683 | return (ext4_lblk_t) |
1684 | le32_to_cpu(path[depth].p_idx[1].ei_block); | |
a86c6181 AT |
1685 | depth--; |
1686 | } | |
1687 | ||
f17722f9 | 1688 | return EXT_MAX_BLOCKS; |
a86c6181 AT |
1689 | } |
1690 | ||
1691 | /* | |
d0d856e8 RD |
1692 | * ext4_ext_correct_indexes: |
1693 | * if leaf gets modified and modified extent is first in the leaf, | |
1694 | * then we have to correct all indexes above. | |
a86c6181 AT |
1695 | * TODO: do we need to correct tree in all cases? |
1696 | */ | |
1d03ec98 | 1697 | static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, |
a86c6181 AT |
1698 | struct ext4_ext_path *path) |
1699 | { | |
1700 | struct ext4_extent_header *eh; | |
1701 | int depth = ext_depth(inode); | |
1702 | struct ext4_extent *ex; | |
1703 | __le32 border; | |
1704 | int k, err = 0; | |
1705 | ||
1706 | eh = path[depth].p_hdr; | |
1707 | ex = path[depth].p_ext; | |
273df556 FM |
1708 | |
1709 | if (unlikely(ex == NULL || eh == NULL)) { | |
1710 | EXT4_ERROR_INODE(inode, | |
1711 | "ex %p == NULL or eh %p == NULL", ex, eh); | |
6a797d27 | 1712 | return -EFSCORRUPTED; |
273df556 | 1713 | } |
a86c6181 AT |
1714 | |
1715 | if (depth == 0) { | |
1716 | /* there is no tree at all */ | |
1717 | return 0; | |
1718 | } | |
1719 | ||
1720 | if (ex != EXT_FIRST_EXTENT(eh)) { | |
1721 | /* we correct tree if first leaf got modified only */ | |
1722 | return 0; | |
1723 | } | |
1724 | ||
1725 | /* | |
d0d856e8 | 1726 | * TODO: we need correction if border is smaller than current one |
a86c6181 AT |
1727 | */ |
1728 | k = depth - 1; | |
1729 | border = path[depth].p_ext->ee_block; | |
7e028976 AM |
1730 | err = ext4_ext_get_access(handle, inode, path + k); |
1731 | if (err) | |
a86c6181 AT |
1732 | return err; |
1733 | path[k].p_idx->ei_block = border; | |
7e028976 AM |
1734 | err = ext4_ext_dirty(handle, inode, path + k); |
1735 | if (err) | |
a86c6181 AT |
1736 | return err; |
1737 | ||
1738 | while (k--) { | |
1739 | /* change all left-side indexes */ | |
1740 | if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) | |
1741 | break; | |
7e028976 AM |
1742 | err = ext4_ext_get_access(handle, inode, path + k); |
1743 | if (err) | |
a86c6181 AT |
1744 | break; |
1745 | path[k].p_idx->ei_block = border; | |
7e028976 AM |
1746 | err = ext4_ext_dirty(handle, inode, path + k); |
1747 | if (err) | |
a86c6181 AT |
1748 | break; |
1749 | } | |
1750 | ||
1751 | return err; | |
1752 | } | |
1753 | ||
43f81677 EB |
1754 | static int ext4_can_extents_be_merged(struct inode *inode, |
1755 | struct ext4_extent *ex1, | |
1756 | struct ext4_extent *ex2) | |
a86c6181 | 1757 | { |
da0169b3 | 1758 | unsigned short ext1_ee_len, ext2_ee_len; |
a2df2a63 | 1759 | |
556615dc | 1760 | if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) |
a2df2a63 AA |
1761 | return 0; |
1762 | ||
1763 | ext1_ee_len = ext4_ext_get_actual_len(ex1); | |
1764 | ext2_ee_len = ext4_ext_get_actual_len(ex2); | |
1765 | ||
1766 | if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != | |
63f57933 | 1767 | le32_to_cpu(ex2->ee_block)) |
a86c6181 AT |
1768 | return 0; |
1769 | ||
da0169b3 | 1770 | if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) |
471d4011 | 1771 | return 0; |
378f32ba | 1772 | |
556615dc | 1773 | if (ext4_ext_is_unwritten(ex1) && |
378f32ba | 1774 | ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN) |
a9b82415 | 1775 | return 0; |
bbf2f9fb | 1776 | #ifdef AGGRESSIVE_TEST |
b939e376 | 1777 | if (ext1_ee_len >= 4) |
a86c6181 AT |
1778 | return 0; |
1779 | #endif | |
1780 | ||
bf89d16f | 1781 | if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) |
a86c6181 AT |
1782 | return 1; |
1783 | return 0; | |
1784 | } | |
1785 | ||
56055d3a AA |
1786 | /* |
1787 | * This function tries to merge the "ex" extent to the next extent in the tree. | |
1788 | * It always tries to merge towards right. If you want to merge towards | |
1789 | * left, pass "ex - 1" as argument instead of "ex". | |
1790 | * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns | |
1791 | * 1 if they got merged. | |
1792 | */ | |
197217a5 | 1793 | static int ext4_ext_try_to_merge_right(struct inode *inode, |
1f109d5a TT |
1794 | struct ext4_ext_path *path, |
1795 | struct ext4_extent *ex) | |
56055d3a AA |
1796 | { |
1797 | struct ext4_extent_header *eh; | |
1798 | unsigned int depth, len; | |
556615dc | 1799 | int merge_done = 0, unwritten; |
56055d3a AA |
1800 | |
1801 | depth = ext_depth(inode); | |
1802 | BUG_ON(path[depth].p_hdr == NULL); | |
1803 | eh = path[depth].p_hdr; | |
1804 | ||
1805 | while (ex < EXT_LAST_EXTENT(eh)) { | |
1806 | if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) | |
1807 | break; | |
1808 | /* merge with next extent! */ | |
556615dc | 1809 | unwritten = ext4_ext_is_unwritten(ex); |
56055d3a AA |
1810 | ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) |
1811 | + ext4_ext_get_actual_len(ex + 1)); | |
556615dc LC |
1812 | if (unwritten) |
1813 | ext4_ext_mark_unwritten(ex); | |
56055d3a AA |
1814 | |
1815 | if (ex + 1 < EXT_LAST_EXTENT(eh)) { | |
1816 | len = (EXT_LAST_EXTENT(eh) - ex - 1) | |
1817 | * sizeof(struct ext4_extent); | |
1818 | memmove(ex + 1, ex + 2, len); | |
1819 | } | |
e8546d06 | 1820 | le16_add_cpu(&eh->eh_entries, -1); |
56055d3a AA |
1821 | merge_done = 1; |
1822 | WARN_ON(eh->eh_entries == 0); | |
1823 | if (!eh->eh_entries) | |
24676da4 | 1824 | EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); |
56055d3a AA |
1825 | } |
1826 | ||
1827 | return merge_done; | |
1828 | } | |
1829 | ||
ecb94f5f TT |
1830 | /* |
1831 | * This function does a very simple check to see if we can collapse | |
1832 | * an extent tree with a single extent tree leaf block into the inode. | |
1833 | */ | |
1834 | static void ext4_ext_try_to_merge_up(handle_t *handle, | |
1835 | struct inode *inode, | |
1836 | struct ext4_ext_path *path) | |
1837 | { | |
1838 | size_t s; | |
1839 | unsigned max_root = ext4_ext_space_root(inode, 0); | |
1840 | ext4_fsblk_t blk; | |
1841 | ||
1842 | if ((path[0].p_depth != 1) || | |
1843 | (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || | |
1844 | (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) | |
1845 | return; | |
1846 | ||
1847 | /* | |
1848 | * We need to modify the block allocation bitmap and the block | |
1849 | * group descriptor to release the extent tree block. If we | |
1850 | * can't get the journal credits, give up. | |
1851 | */ | |
83448bdf JK |
1852 | if (ext4_journal_extend(handle, 2, |
1853 | ext4_free_metadata_revoke_credits(inode->i_sb, 1))) | |
ecb94f5f TT |
1854 | return; |
1855 | ||
1856 | /* | |
1857 | * Copy the extent data up to the inode | |
1858 | */ | |
1859 | blk = ext4_idx_pblock(path[0].p_idx); | |
1860 | s = le16_to_cpu(path[1].p_hdr->eh_entries) * | |
1861 | sizeof(struct ext4_extent_idx); | |
1862 | s += sizeof(struct ext4_extent_header); | |
1863 | ||
10809df8 | 1864 | path[1].p_maxdepth = path[0].p_maxdepth; |
ecb94f5f TT |
1865 | memcpy(path[0].p_hdr, path[1].p_hdr, s); |
1866 | path[0].p_depth = 0; | |
1867 | path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + | |
1868 | (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); | |
1869 | path[0].p_hdr->eh_max = cpu_to_le16(max_root); | |
1870 | ||
1871 | brelse(path[1].p_bh); | |
1872 | ext4_free_blocks(handle, inode, NULL, blk, 1, | |
71d4f7d0 | 1873 | EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
ecb94f5f TT |
1874 | } |
1875 | ||
197217a5 | 1876 | /* |
adde81cf EB |
1877 | * This function tries to merge the @ex extent to neighbours in the tree, then |
1878 | * tries to collapse the extent tree into the inode. | |
197217a5 | 1879 | */ |
ecb94f5f TT |
1880 | static void ext4_ext_try_to_merge(handle_t *handle, |
1881 | struct inode *inode, | |
197217a5 | 1882 | struct ext4_ext_path *path, |
adde81cf EB |
1883 | struct ext4_extent *ex) |
1884 | { | |
197217a5 YY |
1885 | struct ext4_extent_header *eh; |
1886 | unsigned int depth; | |
1887 | int merge_done = 0; | |
197217a5 YY |
1888 | |
1889 | depth = ext_depth(inode); | |
1890 | BUG_ON(path[depth].p_hdr == NULL); | |
1891 | eh = path[depth].p_hdr; | |
1892 | ||
1893 | if (ex > EXT_FIRST_EXTENT(eh)) | |
1894 | merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); | |
1895 | ||
1896 | if (!merge_done) | |
ecb94f5f | 1897 | (void) ext4_ext_try_to_merge_right(inode, path, ex); |
197217a5 | 1898 | |
ecb94f5f | 1899 | ext4_ext_try_to_merge_up(handle, inode, path); |
197217a5 YY |
1900 | } |
1901 | ||
25d14f98 AA |
1902 | /* |
1903 | * check if a portion of the "newext" extent overlaps with an | |
1904 | * existing extent. | |
1905 | * | |
1906 | * If there is an overlap discovered, it updates the length of the newext | |
1907 | * such that there will be no overlap, and then returns 1. | |
1908 | * If there is no overlap found, it returns 0. | |
1909 | */ | |
4d33b1ef TT |
1910 | static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, |
1911 | struct inode *inode, | |
1f109d5a TT |
1912 | struct ext4_extent *newext, |
1913 | struct ext4_ext_path *path) | |
25d14f98 | 1914 | { |
725d26d3 | 1915 | ext4_lblk_t b1, b2; |
25d14f98 AA |
1916 | unsigned int depth, len1; |
1917 | unsigned int ret = 0; | |
1918 | ||
1919 | b1 = le32_to_cpu(newext->ee_block); | |
a2df2a63 | 1920 | len1 = ext4_ext_get_actual_len(newext); |
25d14f98 AA |
1921 | depth = ext_depth(inode); |
1922 | if (!path[depth].p_ext) | |
1923 | goto out; | |
f5a44db5 | 1924 | b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); |
25d14f98 AA |
1925 | |
1926 | /* | |
1927 | * get the next allocated block if the extent in the path | |
2b2d6d01 | 1928 | * is before the requested block(s) |
25d14f98 AA |
1929 | */ |
1930 | if (b2 < b1) { | |
1931 | b2 = ext4_ext_next_allocated_block(path); | |
f17722f9 | 1932 | if (b2 == EXT_MAX_BLOCKS) |
25d14f98 | 1933 | goto out; |
f5a44db5 | 1934 | b2 = EXT4_LBLK_CMASK(sbi, b2); |
25d14f98 AA |
1935 | } |
1936 | ||
725d26d3 | 1937 | /* check for wrap through zero on extent logical start block*/ |
25d14f98 | 1938 | if (b1 + len1 < b1) { |
f17722f9 | 1939 | len1 = EXT_MAX_BLOCKS - b1; |
25d14f98 AA |
1940 | newext->ee_len = cpu_to_le16(len1); |
1941 | ret = 1; | |
1942 | } | |
1943 | ||
1944 | /* check for overlap */ | |
1945 | if (b1 + len1 > b2) { | |
1946 | newext->ee_len = cpu_to_le16(b2 - b1); | |
1947 | ret = 1; | |
1948 | } | |
1949 | out: | |
1950 | return ret; | |
1951 | } | |
1952 | ||
a86c6181 | 1953 | /* |
d0d856e8 | 1954 | * ext4_ext_insert_extent: |
e4d7f2d3 | 1955 | * tries to merge requested extent into the existing extent or |
d0d856e8 RD |
1956 | * inserts requested extent as new one into the tree, |
1957 | * creating new leaf in the no-space case. | |
a86c6181 AT |
1958 | */ |
1959 | int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, | |
dfe50809 | 1960 | struct ext4_ext_path **ppath, |
107a7bd3 | 1961 | struct ext4_extent *newext, int gb_flags) |
a86c6181 | 1962 | { |
dfe50809 | 1963 | struct ext4_ext_path *path = *ppath; |
af5bc92d | 1964 | struct ext4_extent_header *eh; |
a86c6181 AT |
1965 | struct ext4_extent *ex, *fex; |
1966 | struct ext4_extent *nearex; /* nearest extent */ | |
1967 | struct ext4_ext_path *npath = NULL; | |
725d26d3 AK |
1968 | int depth, len, err; |
1969 | ext4_lblk_t next; | |
556615dc | 1970 | int mb_flags = 0, unwritten; |
a86c6181 | 1971 | |
e3cf5d5d TT |
1972 | if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
1973 | mb_flags |= EXT4_MB_DELALLOC_RESERVED; | |
273df556 FM |
1974 | if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { |
1975 | EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); | |
6a797d27 | 1976 | return -EFSCORRUPTED; |
273df556 | 1977 | } |
a86c6181 AT |
1978 | depth = ext_depth(inode); |
1979 | ex = path[depth].p_ext; | |
be8981be | 1980 | eh = path[depth].p_hdr; |
273df556 FM |
1981 | if (unlikely(path[depth].p_hdr == NULL)) { |
1982 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | |
6a797d27 | 1983 | return -EFSCORRUPTED; |
273df556 | 1984 | } |
a86c6181 AT |
1985 | |
1986 | /* try to insert block into found extent and return */ | |
107a7bd3 | 1987 | if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { |
a2df2a63 AA |
1988 | |
1989 | /* | |
be8981be LC |
1990 | * Try to see whether we should rather test the extent on |
1991 | * right from ex, or from the left of ex. This is because | |
ed8a1a76 | 1992 | * ext4_find_extent() can return either extent on the |
be8981be LC |
1993 | * left, or on the right from the searched position. This |
1994 | * will make merging more effective. | |
a2df2a63 | 1995 | */ |
be8981be LC |
1996 | if (ex < EXT_LAST_EXTENT(eh) && |
1997 | (le32_to_cpu(ex->ee_block) + | |
1998 | ext4_ext_get_actual_len(ex) < | |
1999 | le32_to_cpu(newext->ee_block))) { | |
2000 | ex += 1; | |
2001 | goto prepend; | |
2002 | } else if ((ex > EXT_FIRST_EXTENT(eh)) && | |
2003 | (le32_to_cpu(newext->ee_block) + | |
2004 | ext4_ext_get_actual_len(newext) < | |
2005 | le32_to_cpu(ex->ee_block))) | |
2006 | ex -= 1; | |
2007 | ||
2008 | /* Try to append newex to the ex */ | |
2009 | if (ext4_can_extents_be_merged(inode, ex, newext)) { | |
70aa1554 | 2010 | ext_debug(inode, "append [%d]%d block to %u:[%d]%d" |
be8981be | 2011 | "(from %llu)\n", |
556615dc | 2012 | ext4_ext_is_unwritten(newext), |
be8981be LC |
2013 | ext4_ext_get_actual_len(newext), |
2014 | le32_to_cpu(ex->ee_block), | |
556615dc | 2015 | ext4_ext_is_unwritten(ex), |
be8981be LC |
2016 | ext4_ext_get_actual_len(ex), |
2017 | ext4_ext_pblock(ex)); | |
2018 | err = ext4_ext_get_access(handle, inode, | |
2019 | path + depth); | |
2020 | if (err) | |
2021 | return err; | |
556615dc | 2022 | unwritten = ext4_ext_is_unwritten(ex); |
be8981be | 2023 | ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) |
a2df2a63 | 2024 | + ext4_ext_get_actual_len(newext)); |
556615dc LC |
2025 | if (unwritten) |
2026 | ext4_ext_mark_unwritten(ex); | |
be8981be LC |
2027 | nearex = ex; |
2028 | goto merge; | |
2029 | } | |
2030 | ||
2031 | prepend: | |
2032 | /* Try to prepend newex to the ex */ | |
2033 | if (ext4_can_extents_be_merged(inode, newext, ex)) { | |
70aa1554 | 2034 | ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d" |
be8981be LC |
2035 | "(from %llu)\n", |
2036 | le32_to_cpu(newext->ee_block), | |
556615dc | 2037 | ext4_ext_is_unwritten(newext), |
be8981be LC |
2038 | ext4_ext_get_actual_len(newext), |
2039 | le32_to_cpu(ex->ee_block), | |
556615dc | 2040 | ext4_ext_is_unwritten(ex), |
be8981be LC |
2041 | ext4_ext_get_actual_len(ex), |
2042 | ext4_ext_pblock(ex)); | |
2043 | err = ext4_ext_get_access(handle, inode, | |
2044 | path + depth); | |
2045 | if (err) | |
2046 | return err; | |
2047 | ||
556615dc | 2048 | unwritten = ext4_ext_is_unwritten(ex); |
be8981be LC |
2049 | ex->ee_block = newext->ee_block; |
2050 | ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); | |
2051 | ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) | |
2052 | + ext4_ext_get_actual_len(newext)); | |
556615dc LC |
2053 | if (unwritten) |
2054 | ext4_ext_mark_unwritten(ex); | |
be8981be LC |
2055 | nearex = ex; |
2056 | goto merge; | |
2057 | } | |
a86c6181 AT |
2058 | } |
2059 | ||
a86c6181 AT |
2060 | depth = ext_depth(inode); |
2061 | eh = path[depth].p_hdr; | |
2062 | if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) | |
2063 | goto has_space; | |
2064 | ||
2065 | /* probably next leaf has space for us? */ | |
2066 | fex = EXT_LAST_EXTENT(eh); | |
598dbdf2 RD |
2067 | next = EXT_MAX_BLOCKS; |
2068 | if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) | |
5718789d | 2069 | next = ext4_ext_next_leaf_block(path); |
598dbdf2 | 2070 | if (next != EXT_MAX_BLOCKS) { |
70aa1554 | 2071 | ext_debug(inode, "next leaf block - %u\n", next); |
a86c6181 | 2072 | BUG_ON(npath != NULL); |
73c384c0 | 2073 | npath = ext4_find_extent(inode, next, NULL, gb_flags); |
a86c6181 AT |
2074 | if (IS_ERR(npath)) |
2075 | return PTR_ERR(npath); | |
2076 | BUG_ON(npath->p_depth != path->p_depth); | |
2077 | eh = npath[depth].p_hdr; | |
2078 | if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { | |
70aa1554 | 2079 | ext_debug(inode, "next leaf isn't full(%d)\n", |
a86c6181 AT |
2080 | le16_to_cpu(eh->eh_entries)); |
2081 | path = npath; | |
ffb505ff | 2082 | goto has_space; |
a86c6181 | 2083 | } |
70aa1554 | 2084 | ext_debug(inode, "next leaf has no free space(%d,%d)\n", |
a86c6181 AT |
2085 | le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); |
2086 | } | |
2087 | ||
2088 | /* | |
d0d856e8 RD |
2089 | * There is no free space in the found leaf. |
2090 | * We're gonna add a new leaf in the tree. | |
a86c6181 | 2091 | */ |
107a7bd3 | 2092 | if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) |
e3cf5d5d | 2093 | mb_flags |= EXT4_MB_USE_RESERVED; |
107a7bd3 | 2094 | err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, |
dfe50809 | 2095 | ppath, newext); |
a86c6181 AT |
2096 | if (err) |
2097 | goto cleanup; | |
2098 | depth = ext_depth(inode); | |
2099 | eh = path[depth].p_hdr; | |
2100 | ||
2101 | has_space: | |
2102 | nearex = path[depth].p_ext; | |
2103 | ||
7e028976 AM |
2104 | err = ext4_ext_get_access(handle, inode, path + depth); |
2105 | if (err) | |
a86c6181 AT |
2106 | goto cleanup; |
2107 | ||
2108 | if (!nearex) { | |
2109 | /* there is no extent in this leaf, create first one */ | |
70aa1554 | 2110 | ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n", |
8c55e204 | 2111 | le32_to_cpu(newext->ee_block), |
bf89d16f | 2112 | ext4_ext_pblock(newext), |
556615dc | 2113 | ext4_ext_is_unwritten(newext), |
a2df2a63 | 2114 | ext4_ext_get_actual_len(newext)); |
80e675f9 EG |
2115 | nearex = EXT_FIRST_EXTENT(eh); |
2116 | } else { | |
2117 | if (le32_to_cpu(newext->ee_block) | |
8c55e204 | 2118 | > le32_to_cpu(nearex->ee_block)) { |
80e675f9 | 2119 | /* Insert after */ |
70aa1554 | 2120 | ext_debug(inode, "insert %u:%llu:[%d]%d before: " |
32de6756 | 2121 | "nearest %p\n", |
80e675f9 EG |
2122 | le32_to_cpu(newext->ee_block), |
2123 | ext4_ext_pblock(newext), | |
556615dc | 2124 | ext4_ext_is_unwritten(newext), |
80e675f9 EG |
2125 | ext4_ext_get_actual_len(newext), |
2126 | nearex); | |
2127 | nearex++; | |
2128 | } else { | |
2129 | /* Insert before */ | |
2130 | BUG_ON(newext->ee_block == nearex->ee_block); | |
70aa1554 | 2131 | ext_debug(inode, "insert %u:%llu:[%d]%d after: " |
32de6756 | 2132 | "nearest %p\n", |
8c55e204 | 2133 | le32_to_cpu(newext->ee_block), |
bf89d16f | 2134 | ext4_ext_pblock(newext), |
556615dc | 2135 | ext4_ext_is_unwritten(newext), |
a2df2a63 | 2136 | ext4_ext_get_actual_len(newext), |
80e675f9 EG |
2137 | nearex); |
2138 | } | |
2139 | len = EXT_LAST_EXTENT(eh) - nearex + 1; | |
2140 | if (len > 0) { | |
70aa1554 | 2141 | ext_debug(inode, "insert %u:%llu:[%d]%d: " |
80e675f9 EG |
2142 | "move %d extents from 0x%p to 0x%p\n", |
2143 | le32_to_cpu(newext->ee_block), | |
2144 | ext4_ext_pblock(newext), | |
556615dc | 2145 | ext4_ext_is_unwritten(newext), |
80e675f9 EG |
2146 | ext4_ext_get_actual_len(newext), |
2147 | len, nearex, nearex + 1); | |
2148 | memmove(nearex + 1, nearex, | |
2149 | len * sizeof(struct ext4_extent)); | |
a86c6181 | 2150 | } |
a86c6181 AT |
2151 | } |
2152 | ||
e8546d06 | 2153 | le16_add_cpu(&eh->eh_entries, 1); |
80e675f9 | 2154 | path[depth].p_ext = nearex; |
a86c6181 | 2155 | nearex->ee_block = newext->ee_block; |
bf89d16f | 2156 | ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); |
a86c6181 | 2157 | nearex->ee_len = newext->ee_len; |
a86c6181 AT |
2158 | |
2159 | merge: | |
e7bcf823 | 2160 | /* try to merge extents */ |
107a7bd3 | 2161 | if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) |
ecb94f5f | 2162 | ext4_ext_try_to_merge(handle, inode, path, nearex); |
a86c6181 | 2163 | |
a86c6181 AT |
2164 | |
2165 | /* time to correct all indexes above */ | |
2166 | err = ext4_ext_correct_indexes(handle, inode, path); | |
2167 | if (err) | |
2168 | goto cleanup; | |
2169 | ||
ecb94f5f | 2170 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
a86c6181 AT |
2171 | |
2172 | cleanup: | |
b7ea89ad TT |
2173 | ext4_ext_drop_refs(npath); |
2174 | kfree(npath); | |
a86c6181 AT |
2175 | return err; |
2176 | } | |
2177 | ||
bb5835ed TT |
2178 | static int ext4_fill_es_cache_info(struct inode *inode, |
2179 | ext4_lblk_t block, ext4_lblk_t num, | |
2180 | struct fiemap_extent_info *fieinfo) | |
2181 | { | |
2182 | ext4_lblk_t next, end = block + num - 1; | |
2183 | struct extent_status es; | |
2184 | unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; | |
2185 | unsigned int flags; | |
2186 | int err; | |
2187 | ||
2188 | while (block <= end) { | |
2189 | next = 0; | |
2190 | flags = 0; | |
2191 | if (!ext4_es_lookup_extent(inode, block, &next, &es)) | |
2192 | break; | |
2193 | if (ext4_es_is_unwritten(&es)) | |
2194 | flags |= FIEMAP_EXTENT_UNWRITTEN; | |
2195 | if (ext4_es_is_delayed(&es)) | |
2196 | flags |= (FIEMAP_EXTENT_DELALLOC | | |
2197 | FIEMAP_EXTENT_UNKNOWN); | |
2198 | if (ext4_es_is_hole(&es)) | |
2199 | flags |= EXT4_FIEMAP_EXTENT_HOLE; | |
2200 | if (next == 0) | |
2201 | flags |= FIEMAP_EXTENT_LAST; | |
2202 | if (flags & (FIEMAP_EXTENT_DELALLOC| | |
2203 | EXT4_FIEMAP_EXTENT_HOLE)) | |
2204 | es.es_pblk = 0; | |
2205 | else | |
2206 | es.es_pblk = ext4_es_pblock(&es); | |
2207 | err = fiemap_fill_next_extent(fieinfo, | |
2208 | (__u64)es.es_lblk << blksize_bits, | |
2209 | (__u64)es.es_pblk << blksize_bits, | |
2210 | (__u64)es.es_len << blksize_bits, | |
2211 | flags); | |
2212 | if (next == 0) | |
2213 | break; | |
2214 | block = next; | |
2215 | if (err < 0) | |
2216 | return err; | |
2217 | if (err == 1) | |
2218 | return 0; | |
2219 | } | |
2220 | return 0; | |
2221 | } | |
2222 | ||
2223 | ||
a86c6181 | 2224 | /* |
140a5250 JK |
2225 | * ext4_ext_determine_hole - determine hole around given block |
2226 | * @inode: inode we lookup in | |
2227 | * @path: path in extent tree to @lblk | |
2228 | * @lblk: pointer to logical block around which we want to determine hole | |
2229 | * | |
2230 | * Determine hole length (and start if easily possible) around given logical | |
2231 | * block. We don't try too hard to find the beginning of the hole but @path | |
2232 | * actually points to extent before @lblk, we provide it. | |
2233 | * | |
2234 | * The function returns the length of a hole starting at @lblk. We update @lblk | |
2235 | * to the beginning of the hole if we managed to find it. | |
a86c6181 | 2236 | */ |
140a5250 JK |
2237 | static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode, |
2238 | struct ext4_ext_path *path, | |
2239 | ext4_lblk_t *lblk) | |
a86c6181 AT |
2240 | { |
2241 | int depth = ext_depth(inode); | |
a86c6181 | 2242 | struct ext4_extent *ex; |
140a5250 | 2243 | ext4_lblk_t len; |
a86c6181 AT |
2244 | |
2245 | ex = path[depth].p_ext; | |
2246 | if (ex == NULL) { | |
2f8e0a7c | 2247 | /* there is no extent yet, so gap is [0;-] */ |
140a5250 | 2248 | *lblk = 0; |
2f8e0a7c | 2249 | len = EXT_MAX_BLOCKS; |
140a5250 JK |
2250 | } else if (*lblk < le32_to_cpu(ex->ee_block)) { |
2251 | len = le32_to_cpu(ex->ee_block) - *lblk; | |
2252 | } else if (*lblk >= le32_to_cpu(ex->ee_block) | |
a2df2a63 | 2253 | + ext4_ext_get_actual_len(ex)) { |
725d26d3 | 2254 | ext4_lblk_t next; |
725d26d3 | 2255 | |
140a5250 | 2256 | *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); |
725d26d3 | 2257 | next = ext4_ext_next_allocated_block(path); |
140a5250 JK |
2258 | BUG_ON(next == *lblk); |
2259 | len = next - *lblk; | |
a86c6181 | 2260 | } else { |
a86c6181 AT |
2261 | BUG(); |
2262 | } | |
140a5250 JK |
2263 | return len; |
2264 | } | |
a86c6181 | 2265 | |
140a5250 JK |
2266 | /* |
2267 | * ext4_ext_put_gap_in_cache: | |
2268 | * calculate boundaries of the gap that the requested block fits into | |
2269 | * and cache this gap | |
2270 | */ | |
2271 | static void | |
2272 | ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start, | |
2273 | ext4_lblk_t hole_len) | |
2274 | { | |
2275 | struct extent_status es; | |
2276 | ||
ad431025 EW |
2277 | ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start, |
2278 | hole_start + hole_len - 1, &es); | |
2f8e0a7c ZL |
2279 | if (es.es_len) { |
2280 | /* There's delayed extent containing lblock? */ | |
140a5250 | 2281 | if (es.es_lblk <= hole_start) |
2f8e0a7c | 2282 | return; |
140a5250 | 2283 | hole_len = min(es.es_lblk - hole_start, hole_len); |
2f8e0a7c | 2284 | } |
70aa1554 | 2285 | ext_debug(inode, " -> %u:%u\n", hole_start, hole_len); |
140a5250 JK |
2286 | ext4_es_insert_extent(inode, hole_start, hole_len, ~0, |
2287 | EXTENT_STATUS_HOLE); | |
a86c6181 AT |
2288 | } |
2289 | ||
2290 | /* | |
d0d856e8 RD |
2291 | * ext4_ext_rm_idx: |
2292 | * removes index from the index block. | |
a86c6181 | 2293 | */ |
1d03ec98 | 2294 | static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, |
c36575e6 | 2295 | struct ext4_ext_path *path, int depth) |
a86c6181 | 2296 | { |
a86c6181 | 2297 | int err; |
f65e6fba | 2298 | ext4_fsblk_t leaf; |
a86c6181 AT |
2299 | |
2300 | /* free index block */ | |
c36575e6 FL |
2301 | depth--; |
2302 | path = path + depth; | |
bf89d16f | 2303 | leaf = ext4_idx_pblock(path->p_idx); |
273df556 FM |
2304 | if (unlikely(path->p_hdr->eh_entries == 0)) { |
2305 | EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); | |
6a797d27 | 2306 | return -EFSCORRUPTED; |
273df556 | 2307 | } |
7e028976 AM |
2308 | err = ext4_ext_get_access(handle, inode, path); |
2309 | if (err) | |
a86c6181 | 2310 | return err; |
0e1147b0 RD |
2311 | |
2312 | if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { | |
2313 | int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; | |
2314 | len *= sizeof(struct ext4_extent_idx); | |
2315 | memmove(path->p_idx, path->p_idx + 1, len); | |
2316 | } | |
2317 | ||
e8546d06 | 2318 | le16_add_cpu(&path->p_hdr->eh_entries, -1); |
7e028976 AM |
2319 | err = ext4_ext_dirty(handle, inode, path); |
2320 | if (err) | |
a86c6181 | 2321 | return err; |
70aa1554 | 2322 | ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf); |
d8990240 AK |
2323 | trace_ext4_ext_rm_idx(inode, leaf); |
2324 | ||
7dc57615 | 2325 | ext4_free_blocks(handle, inode, NULL, leaf, 1, |
e6362609 | 2326 | EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
c36575e6 FL |
2327 | |
2328 | while (--depth >= 0) { | |
2329 | if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) | |
2330 | break; | |
2331 | path--; | |
2332 | err = ext4_ext_get_access(handle, inode, path); | |
2333 | if (err) | |
2334 | break; | |
2335 | path->p_idx->ei_block = (path+1)->p_idx->ei_block; | |
2336 | err = ext4_ext_dirty(handle, inode, path); | |
2337 | if (err) | |
2338 | break; | |
2339 | } | |
a86c6181 AT |
2340 | return err; |
2341 | } | |
2342 | ||
2343 | /* | |
ee12b630 MC |
2344 | * ext4_ext_calc_credits_for_single_extent: |
2345 | * This routine returns max. credits that needed to insert an extent | |
2346 | * to the extent tree. | |
2347 | * When pass the actual path, the caller should calculate credits | |
2348 | * under i_data_sem. | |
a86c6181 | 2349 | */ |
525f4ed8 | 2350 | int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, |
a86c6181 AT |
2351 | struct ext4_ext_path *path) |
2352 | { | |
a86c6181 | 2353 | if (path) { |
ee12b630 | 2354 | int depth = ext_depth(inode); |
f3bd1f3f | 2355 | int ret = 0; |
ee12b630 | 2356 | |
a86c6181 | 2357 | /* probably there is space in leaf? */ |
a86c6181 | 2358 | if (le16_to_cpu(path[depth].p_hdr->eh_entries) |
ee12b630 | 2359 | < le16_to_cpu(path[depth].p_hdr->eh_max)) { |
a86c6181 | 2360 | |
ee12b630 MC |
2361 | /* |
2362 | * There are some space in the leaf tree, no | |
2363 | * need to account for leaf block credit | |
2364 | * | |
2365 | * bitmaps and block group descriptor blocks | |
df3ab170 | 2366 | * and other metadata blocks still need to be |
ee12b630 MC |
2367 | * accounted. |
2368 | */ | |
525f4ed8 | 2369 | /* 1 bitmap, 1 block group descriptor */ |
ee12b630 | 2370 | ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); |
5887e98b | 2371 | return ret; |
ee12b630 MC |
2372 | } |
2373 | } | |
a86c6181 | 2374 | |
525f4ed8 | 2375 | return ext4_chunk_trans_blocks(inode, nrblocks); |
ee12b630 | 2376 | } |
a86c6181 | 2377 | |
ee12b630 | 2378 | /* |
fffb2739 | 2379 | * How many index/leaf blocks need to change/allocate to add @extents extents? |
ee12b630 | 2380 | * |
fffb2739 JK |
2381 | * If we add a single extent, then in the worse case, each tree level |
2382 | * index/leaf need to be changed in case of the tree split. | |
ee12b630 | 2383 | * |
fffb2739 JK |
2384 | * If more extents are inserted, they could cause the whole tree split more |
2385 | * than once, but this is really rare. | |
ee12b630 | 2386 | */ |
fffb2739 | 2387 | int ext4_ext_index_trans_blocks(struct inode *inode, int extents) |
ee12b630 MC |
2388 | { |
2389 | int index; | |
f19d5870 TM |
2390 | int depth; |
2391 | ||
2392 | /* If we are converting the inline data, only one is needed here. */ | |
2393 | if (ext4_has_inline_data(inode)) | |
2394 | return 1; | |
2395 | ||
2396 | depth = ext_depth(inode); | |
a86c6181 | 2397 | |
fffb2739 | 2398 | if (extents <= 1) |
ee12b630 MC |
2399 | index = depth * 2; |
2400 | else | |
2401 | index = depth * 3; | |
a86c6181 | 2402 | |
ee12b630 | 2403 | return index; |
a86c6181 AT |
2404 | } |
2405 | ||
981250ca TT |
2406 | static inline int get_default_free_blocks_flags(struct inode *inode) |
2407 | { | |
ddfa17e4 TE |
2408 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || |
2409 | ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE)) | |
981250ca TT |
2410 | return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; |
2411 | else if (ext4_should_journal_data(inode)) | |
2412 | return EXT4_FREE_BLOCKS_FORGET; | |
2413 | return 0; | |
2414 | } | |
2415 | ||
9fe67149 EW |
2416 | /* |
2417 | * ext4_rereserve_cluster - increment the reserved cluster count when | |
2418 | * freeing a cluster with a pending reservation | |
2419 | * | |
2420 | * @inode - file containing the cluster | |
2421 | * @lblk - logical block in cluster to be reserved | |
2422 | * | |
2423 | * Increments the reserved cluster count and adjusts quota in a bigalloc | |
2424 | * file system when freeing a partial cluster containing at least one | |
2425 | * delayed and unwritten block. A partial cluster meeting that | |
2426 | * requirement will have a pending reservation. If so, the | |
2427 | * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to | |
2428 | * defer reserved and allocated space accounting to a subsequent call | |
2429 | * to this function. | |
2430 | */ | |
2431 | static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk) | |
2432 | { | |
2433 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | |
2434 | struct ext4_inode_info *ei = EXT4_I(inode); | |
2435 | ||
2436 | dquot_reclaim_block(inode, EXT4_C2B(sbi, 1)); | |
2437 | ||
2438 | spin_lock(&ei->i_block_reservation_lock); | |
2439 | ei->i_reserved_data_blocks++; | |
2440 | percpu_counter_add(&sbi->s_dirtyclusters_counter, 1); | |
2441 | spin_unlock(&ei->i_block_reservation_lock); | |
2442 | ||
2443 | percpu_counter_add(&sbi->s_freeclusters_counter, 1); | |
2444 | ext4_remove_pending(inode, lblk); | |
2445 | } | |
2446 | ||
a86c6181 | 2447 | static int ext4_remove_blocks(handle_t *handle, struct inode *inode, |
0aa06000 | 2448 | struct ext4_extent *ex, |
9fe67149 | 2449 | struct partial_cluster *partial, |
0aa06000 | 2450 | ext4_lblk_t from, ext4_lblk_t to) |
a86c6181 | 2451 | { |
0aa06000 | 2452 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
345ee947 | 2453 | unsigned short ee_len = ext4_ext_get_actual_len(ex); |
9fe67149 EW |
2454 | ext4_fsblk_t last_pblk, pblk; |
2455 | ext4_lblk_t num; | |
2456 | int flags; | |
2457 | ||
2458 | /* only extent tail removal is allowed */ | |
2459 | if (from < le32_to_cpu(ex->ee_block) || | |
2460 | to != le32_to_cpu(ex->ee_block) + ee_len - 1) { | |
2461 | ext4_error(sbi->s_sb, | |
2462 | "strange request: removal(2) %u-%u from %u:%u", | |
2463 | from, to, le32_to_cpu(ex->ee_block), ee_len); | |
2464 | return 0; | |
2465 | } | |
2466 | ||
2467 | #ifdef EXTENTS_STATS | |
2468 | spin_lock(&sbi->s_ext_stats_lock); | |
2469 | sbi->s_ext_blocks += ee_len; | |
2470 | sbi->s_ext_extents++; | |
2471 | if (ee_len < sbi->s_ext_min) | |
2472 | sbi->s_ext_min = ee_len; | |
2473 | if (ee_len > sbi->s_ext_max) | |
2474 | sbi->s_ext_max = ee_len; | |
2475 | if (ext_depth(inode) > sbi->s_depth_max) | |
2476 | sbi->s_depth_max = ext_depth(inode); | |
2477 | spin_unlock(&sbi->s_ext_stats_lock); | |
2478 | #endif | |
2479 | ||
2480 | trace_ext4_remove_blocks(inode, ex, from, to, partial); | |
18888cf0 | 2481 | |
0aa06000 | 2482 | /* |
9fe67149 EW |
2483 | * if we have a partial cluster, and it's different from the |
2484 | * cluster of the last block in the extent, we free it | |
0aa06000 | 2485 | */ |
9fe67149 EW |
2486 | last_pblk = ext4_ext_pblock(ex) + ee_len - 1; |
2487 | ||
2488 | if (partial->state != initial && | |
2489 | partial->pclu != EXT4_B2C(sbi, last_pblk)) { | |
2490 | if (partial->state == tofree) { | |
2491 | flags = get_default_free_blocks_flags(inode); | |
2492 | if (ext4_is_pending(inode, partial->lblk)) | |
2493 | flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; | |
2494 | ext4_free_blocks(handle, inode, NULL, | |
2495 | EXT4_C2B(sbi, partial->pclu), | |
2496 | sbi->s_cluster_ratio, flags); | |
2497 | if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) | |
2498 | ext4_rereserve_cluster(inode, partial->lblk); | |
2499 | } | |
2500 | partial->state = initial; | |
2501 | } | |
2502 | ||
2503 | num = le32_to_cpu(ex->ee_block) + ee_len - from; | |
2504 | pblk = ext4_ext_pblock(ex) + ee_len - num; | |
0aa06000 TT |
2505 | |
2506 | /* | |
9fe67149 EW |
2507 | * We free the partial cluster at the end of the extent (if any), |
2508 | * unless the cluster is used by another extent (partial_cluster | |
2509 | * state is nofree). If a partial cluster exists here, it must be | |
2510 | * shared with the last block in the extent. | |
0aa06000 | 2511 | */ |
9fe67149 EW |
2512 | flags = get_default_free_blocks_flags(inode); |
2513 | ||
2514 | /* partial, left end cluster aligned, right end unaligned */ | |
2515 | if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) && | |
2516 | (EXT4_LBLK_CMASK(sbi, to) >= from) && | |
2517 | (partial->state != nofree)) { | |
2518 | if (ext4_is_pending(inode, to)) | |
2519 | flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; | |
0aa06000 | 2520 | ext4_free_blocks(handle, inode, NULL, |
9fe67149 | 2521 | EXT4_PBLK_CMASK(sbi, last_pblk), |
0aa06000 | 2522 | sbi->s_cluster_ratio, flags); |
9fe67149 EW |
2523 | if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) |
2524 | ext4_rereserve_cluster(inode, to); | |
2525 | partial->state = initial; | |
2526 | flags = get_default_free_blocks_flags(inode); | |
0aa06000 TT |
2527 | } |
2528 | ||
9fe67149 | 2529 | flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; |
d23142c6 | 2530 | |
9fe67149 EW |
2531 | /* |
2532 | * For bigalloc file systems, we never free a partial cluster | |
2533 | * at the beginning of the extent. Instead, we check to see if we | |
2534 | * need to free it on a subsequent call to ext4_remove_blocks, | |
2535 | * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. | |
2536 | */ | |
2537 | flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; | |
2538 | ext4_free_blocks(handle, inode, NULL, pblk, num, flags); | |
2539 | ||
2540 | /* reset the partial cluster if we've freed past it */ | |
2541 | if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk)) | |
2542 | partial->state = initial; | |
2543 | ||
2544 | /* | |
2545 | * If we've freed the entire extent but the beginning is not left | |
2546 | * cluster aligned and is not marked as ineligible for freeing we | |
2547 | * record the partial cluster at the beginning of the extent. It | |
2548 | * wasn't freed by the preceding ext4_free_blocks() call, and we | |
2549 | * need to look farther to the left to determine if it's to be freed | |
2550 | * (not shared with another extent). Else, reset the partial | |
2551 | * cluster - we're either done freeing or the beginning of the | |
2552 | * extent is left cluster aligned. | |
2553 | */ | |
2554 | if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) { | |
2555 | if (partial->state == initial) { | |
2556 | partial->pclu = EXT4_B2C(sbi, pblk); | |
2557 | partial->lblk = from; | |
2558 | partial->state = tofree; | |
345ee947 | 2559 | } |
9fe67149 EW |
2560 | } else { |
2561 | partial->state = initial; | |
2562 | } | |
2563 | ||
a86c6181 AT |
2564 | return 0; |
2565 | } | |
2566 | ||
d583fb87 AH |
2567 | /* |
2568 | * ext4_ext_rm_leaf() Removes the extents associated with the | |
5bf43760 EW |
2569 | * blocks appearing between "start" and "end". Both "start" |
2570 | * and "end" must appear in the same extent or EIO is returned. | |
d583fb87 AH |
2571 | * |
2572 | * @handle: The journal handle | |
2573 | * @inode: The files inode | |
2574 | * @path: The path to the leaf | |
d23142c6 | 2575 | * @partial_cluster: The cluster which we'll have to free if all extents |
5bf43760 EW |
2576 | * has been released from it. However, if this value is |
2577 | * negative, it's a cluster just to the right of the | |
2578 | * punched region and it must not be freed. | |
d583fb87 AH |
2579 | * @start: The first block to remove |
2580 | * @end: The last block to remove | |
2581 | */ | |
a86c6181 AT |
2582 | static int |
2583 | ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |
d23142c6 | 2584 | struct ext4_ext_path *path, |
9fe67149 | 2585 | struct partial_cluster *partial, |
0aa06000 | 2586 | ext4_lblk_t start, ext4_lblk_t end) |
a86c6181 | 2587 | { |
0aa06000 | 2588 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
a86c6181 | 2589 | int err = 0, correct_index = 0; |
83448bdf | 2590 | int depth = ext_depth(inode), credits, revoke_credits; |
a86c6181 | 2591 | struct ext4_extent_header *eh; |
750c9c47 | 2592 | ext4_lblk_t a, b; |
725d26d3 AK |
2593 | unsigned num; |
2594 | ext4_lblk_t ex_ee_block; | |
a86c6181 | 2595 | unsigned short ex_ee_len; |
556615dc | 2596 | unsigned unwritten = 0; |
a86c6181 | 2597 | struct ext4_extent *ex; |
d23142c6 | 2598 | ext4_fsblk_t pblk; |
a86c6181 | 2599 | |
c29c0ae7 | 2600 | /* the header must be checked already in ext4_ext_remove_space() */ |
70aa1554 | 2601 | ext_debug(inode, "truncate since %u in leaf to %u\n", start, end); |
a86c6181 AT |
2602 | if (!path[depth].p_hdr) |
2603 | path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); | |
2604 | eh = path[depth].p_hdr; | |
273df556 FM |
2605 | if (unlikely(path[depth].p_hdr == NULL)) { |
2606 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | |
6a797d27 | 2607 | return -EFSCORRUPTED; |
273df556 | 2608 | } |
a86c6181 | 2609 | /* find where to start removing */ |
6ae06ff5 AS |
2610 | ex = path[depth].p_ext; |
2611 | if (!ex) | |
2612 | ex = EXT_LAST_EXTENT(eh); | |
a86c6181 AT |
2613 | |
2614 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
a2df2a63 | 2615 | ex_ee_len = ext4_ext_get_actual_len(ex); |
a86c6181 | 2616 | |
9fe67149 | 2617 | trace_ext4_ext_rm_leaf(inode, start, ex, partial); |
d8990240 | 2618 | |
a86c6181 AT |
2619 | while (ex >= EXT_FIRST_EXTENT(eh) && |
2620 | ex_ee_block + ex_ee_len > start) { | |
a41f2071 | 2621 | |
556615dc LC |
2622 | if (ext4_ext_is_unwritten(ex)) |
2623 | unwritten = 1; | |
a41f2071 | 2624 | else |
556615dc | 2625 | unwritten = 0; |
a41f2071 | 2626 | |
70aa1554 | 2627 | ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block, |
556615dc | 2628 | unwritten, ex_ee_len); |
a86c6181 AT |
2629 | path[depth].p_ext = ex; |
2630 | ||
2631 | a = ex_ee_block > start ? ex_ee_block : start; | |
d583fb87 AH |
2632 | b = ex_ee_block+ex_ee_len - 1 < end ? |
2633 | ex_ee_block+ex_ee_len - 1 : end; | |
a86c6181 | 2634 | |
70aa1554 | 2635 | ext_debug(inode, " border %u:%u\n", a, b); |
a86c6181 | 2636 | |
d583fb87 | 2637 | /* If this extent is beyond the end of the hole, skip it */ |
5f95d21f | 2638 | if (end < ex_ee_block) { |
d23142c6 LC |
2639 | /* |
2640 | * We're going to skip this extent and move to another, | |
f4226d9e EW |
2641 | * so note that its first cluster is in use to avoid |
2642 | * freeing it when removing blocks. Eventually, the | |
2643 | * right edge of the truncated/punched region will | |
2644 | * be just to the left. | |
d23142c6 | 2645 | */ |
f4226d9e EW |
2646 | if (sbi->s_cluster_ratio > 1) { |
2647 | pblk = ext4_ext_pblock(ex); | |
9fe67149 EW |
2648 | partial->pclu = EXT4_B2C(sbi, pblk); |
2649 | partial->state = nofree; | |
f4226d9e | 2650 | } |
d583fb87 AH |
2651 | ex--; |
2652 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
2653 | ex_ee_len = ext4_ext_get_actual_len(ex); | |
2654 | continue; | |
750c9c47 | 2655 | } else if (b != ex_ee_block + ex_ee_len - 1) { |
dc1841d6 LC |
2656 | EXT4_ERROR_INODE(inode, |
2657 | "can not handle truncate %u:%u " | |
2658 | "on extent %u:%u", | |
2659 | start, end, ex_ee_block, | |
2660 | ex_ee_block + ex_ee_len - 1); | |
6a797d27 | 2661 | err = -EFSCORRUPTED; |
750c9c47 | 2662 | goto out; |
a86c6181 AT |
2663 | } else if (a != ex_ee_block) { |
2664 | /* remove tail of the extent */ | |
750c9c47 | 2665 | num = a - ex_ee_block; |
a86c6181 AT |
2666 | } else { |
2667 | /* remove whole extent: excellent! */ | |
a86c6181 | 2668 | num = 0; |
a86c6181 | 2669 | } |
34071da7 TT |
2670 | /* |
2671 | * 3 for leaf, sb, and inode plus 2 (bmap and group | |
2672 | * descriptor) for each block group; assume two block | |
2673 | * groups plus ex_ee_len/blocks_per_block_group for | |
2674 | * the worst case | |
2675 | */ | |
2676 | credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); | |
a86c6181 AT |
2677 | if (ex == EXT_FIRST_EXTENT(eh)) { |
2678 | correct_index = 1; | |
2679 | credits += (ext_depth(inode)) + 1; | |
2680 | } | |
5aca07eb | 2681 | credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); |
83448bdf JK |
2682 | /* |
2683 | * We may end up freeing some index blocks and data from the | |
2684 | * punched range. Note that partial clusters are accounted for | |
2685 | * by ext4_free_data_revoke_credits(). | |
2686 | */ | |
2687 | revoke_credits = | |
2688 | ext4_free_metadata_revoke_credits(inode->i_sb, | |
2689 | ext_depth(inode)) + | |
2690 | ext4_free_data_revoke_credits(inode, b - a + 1); | |
a86c6181 | 2691 | |
a4130367 | 2692 | err = ext4_datasem_ensure_credits(handle, inode, credits, |
83448bdf | 2693 | credits, revoke_credits); |
a4130367 JK |
2694 | if (err) { |
2695 | if (err > 0) | |
2696 | err = -EAGAIN; | |
a86c6181 | 2697 | goto out; |
a4130367 | 2698 | } |
a86c6181 AT |
2699 | |
2700 | err = ext4_ext_get_access(handle, inode, path + depth); | |
2701 | if (err) | |
2702 | goto out; | |
2703 | ||
9fe67149 | 2704 | err = ext4_remove_blocks(handle, inode, ex, partial, a, b); |
a86c6181 AT |
2705 | if (err) |
2706 | goto out; | |
2707 | ||
750c9c47 | 2708 | if (num == 0) |
d0d856e8 | 2709 | /* this extent is removed; mark slot entirely unused */ |
f65e6fba | 2710 | ext4_ext_store_pblock(ex, 0); |
a86c6181 | 2711 | |
a86c6181 | 2712 | ex->ee_len = cpu_to_le16(num); |
749269fa | 2713 | /* |
556615dc | 2714 | * Do not mark unwritten if all the blocks in the |
749269fa AA |
2715 | * extent have been removed. |
2716 | */ | |
556615dc LC |
2717 | if (unwritten && num) |
2718 | ext4_ext_mark_unwritten(ex); | |
d583fb87 AH |
2719 | /* |
2720 | * If the extent was completely released, | |
2721 | * we need to remove it from the leaf | |
2722 | */ | |
2723 | if (num == 0) { | |
f17722f9 | 2724 | if (end != EXT_MAX_BLOCKS - 1) { |
d583fb87 AH |
2725 | /* |
2726 | * For hole punching, we need to scoot all the | |
2727 | * extents up when an extent is removed so that | |
2728 | * we dont have blank extents in the middle | |
2729 | */ | |
2730 | memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * | |
2731 | sizeof(struct ext4_extent)); | |
2732 | ||
2733 | /* Now get rid of the one at the end */ | |
2734 | memset(EXT_LAST_EXTENT(eh), 0, | |
2735 | sizeof(struct ext4_extent)); | |
2736 | } | |
2737 | le16_add_cpu(&eh->eh_entries, -1); | |
5bf43760 | 2738 | } |
d583fb87 | 2739 | |
750c9c47 DM |
2740 | err = ext4_ext_dirty(handle, inode, path + depth); |
2741 | if (err) | |
2742 | goto out; | |
2743 | ||
70aa1554 | 2744 | ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num, |
bf89d16f | 2745 | ext4_ext_pblock(ex)); |
a86c6181 AT |
2746 | ex--; |
2747 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
a2df2a63 | 2748 | ex_ee_len = ext4_ext_get_actual_len(ex); |
a86c6181 AT |
2749 | } |
2750 | ||
2751 | if (correct_index && eh->eh_entries) | |
2752 | err = ext4_ext_correct_indexes(handle, inode, path); | |
2753 | ||
0aa06000 | 2754 | /* |
ad6599ab EW |
2755 | * If there's a partial cluster and at least one extent remains in |
2756 | * the leaf, free the partial cluster if it isn't shared with the | |
5bf43760 | 2757 | * current extent. If it is shared with the current extent |
9fe67149 | 2758 | * we reset the partial cluster because we've reached the start of the |
5bf43760 | 2759 | * truncated/punched region and we're done removing blocks. |
0aa06000 | 2760 | */ |
9fe67149 | 2761 | if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) { |
5bf43760 | 2762 | pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; |
9fe67149 EW |
2763 | if (partial->pclu != EXT4_B2C(sbi, pblk)) { |
2764 | int flags = get_default_free_blocks_flags(inode); | |
2765 | ||
2766 | if (ext4_is_pending(inode, partial->lblk)) | |
2767 | flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; | |
5bf43760 | 2768 | ext4_free_blocks(handle, inode, NULL, |
9fe67149 EW |
2769 | EXT4_C2B(sbi, partial->pclu), |
2770 | sbi->s_cluster_ratio, flags); | |
2771 | if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) | |
2772 | ext4_rereserve_cluster(inode, partial->lblk); | |
5bf43760 | 2773 | } |
9fe67149 | 2774 | partial->state = initial; |
0aa06000 TT |
2775 | } |
2776 | ||
a86c6181 AT |
2777 | /* if this leaf is free, then we should |
2778 | * remove it from index block above */ | |
2779 | if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) | |
c36575e6 | 2780 | err = ext4_ext_rm_idx(handle, inode, path, depth); |
a86c6181 AT |
2781 | |
2782 | out: | |
2783 | return err; | |
2784 | } | |
2785 | ||
2786 | /* | |
d0d856e8 RD |
2787 | * ext4_ext_more_to_rm: |
2788 | * returns 1 if current index has to be freed (even partial) | |
a86c6181 | 2789 | */ |
09b88252 | 2790 | static int |
a86c6181 AT |
2791 | ext4_ext_more_to_rm(struct ext4_ext_path *path) |
2792 | { | |
2793 | BUG_ON(path->p_idx == NULL); | |
2794 | ||
2795 | if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) | |
2796 | return 0; | |
2797 | ||
2798 | /* | |
d0d856e8 | 2799 | * if truncate on deeper level happened, it wasn't partial, |
a86c6181 AT |
2800 | * so we have to consider current index for truncation |
2801 | */ | |
2802 | if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) | |
2803 | return 0; | |
2804 | return 1; | |
2805 | } | |
2806 | ||
26a4c0c6 TT |
2807 | int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, |
2808 | ext4_lblk_t end) | |
a86c6181 | 2809 | { |
f4226d9e | 2810 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
a86c6181 | 2811 | int depth = ext_depth(inode); |
968dee77 | 2812 | struct ext4_ext_path *path = NULL; |
9fe67149 | 2813 | struct partial_cluster partial; |
a86c6181 | 2814 | handle_t *handle; |
6f2080e6 | 2815 | int i = 0, err = 0; |
a86c6181 | 2816 | |
9fe67149 EW |
2817 | partial.pclu = 0; |
2818 | partial.lblk = 0; | |
2819 | partial.state = initial; | |
2820 | ||
70aa1554 | 2821 | ext_debug(inode, "truncate since %u to %u\n", start, end); |
a86c6181 AT |
2822 | |
2823 | /* probably first extent we're gonna free will be last in block */ | |
83448bdf JK |
2824 | handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE, |
2825 | depth + 1, | |
2826 | ext4_free_metadata_revoke_credits(inode->i_sb, depth)); | |
a86c6181 AT |
2827 | if (IS_ERR(handle)) |
2828 | return PTR_ERR(handle); | |
2829 | ||
0617b83f | 2830 | again: |
61801325 | 2831 | trace_ext4_ext_remove_space(inode, start, end, depth); |
d8990240 | 2832 | |
5f95d21f LC |
2833 | /* |
2834 | * Check if we are removing extents inside the extent tree. If that | |
2835 | * is the case, we are going to punch a hole inside the extent tree | |
2836 | * so we have to check whether we need to split the extent covering | |
2837 | * the last block to remove so we can easily remove the part of it | |
2838 | * in ext4_ext_rm_leaf(). | |
2839 | */ | |
2840 | if (end < EXT_MAX_BLOCKS - 1) { | |
2841 | struct ext4_extent *ex; | |
f4226d9e EW |
2842 | ext4_lblk_t ee_block, ex_end, lblk; |
2843 | ext4_fsblk_t pblk; | |
5f95d21f | 2844 | |
f4226d9e | 2845 | /* find extent for or closest extent to this block */ |
73c384c0 TT |
2846 | path = ext4_find_extent(inode, end, NULL, |
2847 | EXT4_EX_NOCACHE | EXT4_EX_NOFAIL); | |
5f95d21f LC |
2848 | if (IS_ERR(path)) { |
2849 | ext4_journal_stop(handle); | |
2850 | return PTR_ERR(path); | |
2851 | } | |
2852 | depth = ext_depth(inode); | |
6f2080e6 | 2853 | /* Leaf not may not exist only if inode has no blocks at all */ |
5f95d21f | 2854 | ex = path[depth].p_ext; |
968dee77 | 2855 | if (!ex) { |
6f2080e6 DM |
2856 | if (depth) { |
2857 | EXT4_ERROR_INODE(inode, | |
2858 | "path[%d].p_hdr == NULL", | |
2859 | depth); | |
6a797d27 | 2860 | err = -EFSCORRUPTED; |
6f2080e6 DM |
2861 | } |
2862 | goto out; | |
968dee77 | 2863 | } |
5f95d21f LC |
2864 | |
2865 | ee_block = le32_to_cpu(ex->ee_block); | |
f4226d9e | 2866 | ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; |
5f95d21f LC |
2867 | |
2868 | /* | |
2869 | * See if the last block is inside the extent, if so split | |
2870 | * the extent at 'end' block so we can easily remove the | |
2871 | * tail of the first part of the split extent in | |
2872 | * ext4_ext_rm_leaf(). | |
2873 | */ | |
f4226d9e EW |
2874 | if (end >= ee_block && end < ex_end) { |
2875 | ||
2876 | /* | |
2877 | * If we're going to split the extent, note that | |
2878 | * the cluster containing the block after 'end' is | |
2879 | * in use to avoid freeing it when removing blocks. | |
2880 | */ | |
2881 | if (sbi->s_cluster_ratio > 1) { | |
cfb3c85a | 2882 | pblk = ext4_ext_pblock(ex) + end - ee_block + 1; |
9fe67149 EW |
2883 | partial.pclu = EXT4_B2C(sbi, pblk); |
2884 | partial.state = nofree; | |
f4226d9e EW |
2885 | } |
2886 | ||
5f95d21f LC |
2887 | /* |
2888 | * Split the extent in two so that 'end' is the last | |
27dd4385 LC |
2889 | * block in the first new extent. Also we should not |
2890 | * fail removing space due to ENOSPC so try to use | |
2891 | * reserved block if that happens. | |
5f95d21f | 2892 | */ |
dfe50809 | 2893 | err = ext4_force_split_extent_at(handle, inode, &path, |
fcf6b1b7 | 2894 | end + 1, 1); |
5f95d21f LC |
2895 | if (err < 0) |
2896 | goto out; | |
f4226d9e | 2897 | |
7bd75230 EW |
2898 | } else if (sbi->s_cluster_ratio > 1 && end >= ex_end && |
2899 | partial.state == initial) { | |
f4226d9e | 2900 | /* |
7bd75230 EW |
2901 | * If we're punching, there's an extent to the right. |
2902 | * If the partial cluster hasn't been set, set it to | |
2903 | * that extent's first cluster and its state to nofree | |
2904 | * so it won't be freed should it contain blocks to be | |
2905 | * removed. If it's already set (tofree/nofree), we're | |
2906 | * retrying and keep the original partial cluster info | |
2907 | * so a cluster marked tofree as a result of earlier | |
2908 | * extent removal is not lost. | |
f4226d9e EW |
2909 | */ |
2910 | lblk = ex_end + 1; | |
2911 | err = ext4_ext_search_right(inode, path, &lblk, &pblk, | |
d7dce9e0 | 2912 | NULL); |
2913 | if (err < 0) | |
f4226d9e | 2914 | goto out; |
9fe67149 EW |
2915 | if (pblk) { |
2916 | partial.pclu = EXT4_B2C(sbi, pblk); | |
2917 | partial.state = nofree; | |
2918 | } | |
5f95d21f | 2919 | } |
5f95d21f | 2920 | } |
a86c6181 | 2921 | /* |
d0d856e8 RD |
2922 | * We start scanning from right side, freeing all the blocks |
2923 | * after i_size and walking into the tree depth-wise. | |
a86c6181 | 2924 | */ |
0617b83f | 2925 | depth = ext_depth(inode); |
968dee77 AS |
2926 | if (path) { |
2927 | int k = i = depth; | |
2928 | while (--k > 0) | |
2929 | path[k].p_block = | |
2930 | le16_to_cpu(path[k].p_hdr->eh_entries)+1; | |
2931 | } else { | |
6396bb22 | 2932 | path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), |
73c384c0 | 2933 | GFP_NOFS | __GFP_NOFAIL); |
968dee77 AS |
2934 | if (path == NULL) { |
2935 | ext4_journal_stop(handle); | |
2936 | return -ENOMEM; | |
2937 | } | |
10809df8 | 2938 | path[0].p_maxdepth = path[0].p_depth = depth; |
968dee77 | 2939 | path[0].p_hdr = ext_inode_hdr(inode); |
89a4e48f | 2940 | i = 0; |
5f95d21f | 2941 | |
c349179b | 2942 | if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { |
6a797d27 | 2943 | err = -EFSCORRUPTED; |
968dee77 AS |
2944 | goto out; |
2945 | } | |
a86c6181 | 2946 | } |
968dee77 | 2947 | err = 0; |
a86c6181 AT |
2948 | |
2949 | while (i >= 0 && err == 0) { | |
2950 | if (i == depth) { | |
2951 | /* this is leaf block */ | |
d583fb87 | 2952 | err = ext4_ext_rm_leaf(handle, inode, path, |
9fe67149 | 2953 | &partial, start, end); |
d0d856e8 | 2954 | /* root level has p_bh == NULL, brelse() eats this */ |
a86c6181 AT |
2955 | brelse(path[i].p_bh); |
2956 | path[i].p_bh = NULL; | |
2957 | i--; | |
2958 | continue; | |
2959 | } | |
2960 | ||
2961 | /* this is index block */ | |
2962 | if (!path[i].p_hdr) { | |
70aa1554 | 2963 | ext_debug(inode, "initialize header\n"); |
a86c6181 | 2964 | path[i].p_hdr = ext_block_hdr(path[i].p_bh); |
a86c6181 AT |
2965 | } |
2966 | ||
a86c6181 | 2967 | if (!path[i].p_idx) { |
d0d856e8 | 2968 | /* this level hasn't been touched yet */ |
a86c6181 AT |
2969 | path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); |
2970 | path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; | |
70aa1554 | 2971 | ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n", |
a86c6181 AT |
2972 | path[i].p_hdr, |
2973 | le16_to_cpu(path[i].p_hdr->eh_entries)); | |
2974 | } else { | |
d0d856e8 | 2975 | /* we were already here, see at next index */ |
a86c6181 AT |
2976 | path[i].p_idx--; |
2977 | } | |
2978 | ||
70aa1554 | 2979 | ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n", |
a86c6181 AT |
2980 | i, EXT_FIRST_INDEX(path[i].p_hdr), |
2981 | path[i].p_idx); | |
2982 | if (ext4_ext_more_to_rm(path + i)) { | |
c29c0ae7 | 2983 | struct buffer_head *bh; |
a86c6181 | 2984 | /* go to the next level */ |
70aa1554 | 2985 | ext_debug(inode, "move to level %d (block %llu)\n", |
bf89d16f | 2986 | i + 1, ext4_idx_pblock(path[i].p_idx)); |
a86c6181 | 2987 | memset(path + i + 1, 0, sizeof(*path)); |
9c6e0719 ZY |
2988 | bh = read_extent_tree_block(inode, path[i].p_idx, |
2989 | depth - i - 1, | |
2990 | EXT4_EX_NOCACHE); | |
7d7ea89e | 2991 | if (IS_ERR(bh)) { |
a86c6181 | 2992 | /* should we reset i_size? */ |
7d7ea89e | 2993 | err = PTR_ERR(bh); |
a86c6181 AT |
2994 | break; |
2995 | } | |
76828c88 TT |
2996 | /* Yield here to deal with large extent trees. |
2997 | * Should be a no-op if we did IO above. */ | |
2998 | cond_resched(); | |
c29c0ae7 | 2999 | if (WARN_ON(i + 1 > depth)) { |
6a797d27 | 3000 | err = -EFSCORRUPTED; |
c29c0ae7 AT |
3001 | break; |
3002 | } | |
3003 | path[i + 1].p_bh = bh; | |
a86c6181 | 3004 | |
d0d856e8 RD |
3005 | /* save actual number of indexes since this |
3006 | * number is changed at the next iteration */ | |
a86c6181 AT |
3007 | path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); |
3008 | i++; | |
3009 | } else { | |
d0d856e8 | 3010 | /* we finished processing this index, go up */ |
a86c6181 | 3011 | if (path[i].p_hdr->eh_entries == 0 && i > 0) { |
d0d856e8 | 3012 | /* index is empty, remove it; |
a86c6181 AT |
3013 | * handle must be already prepared by the |
3014 | * truncatei_leaf() */ | |
c36575e6 | 3015 | err = ext4_ext_rm_idx(handle, inode, path, i); |
a86c6181 | 3016 | } |
d0d856e8 | 3017 | /* root level has p_bh == NULL, brelse() eats this */ |
a86c6181 AT |
3018 | brelse(path[i].p_bh); |
3019 | path[i].p_bh = NULL; | |
3020 | i--; | |
70aa1554 | 3021 | ext_debug(inode, "return to level %d\n", i); |
a86c6181 AT |
3022 | } |
3023 | } | |
3024 | ||
9fe67149 EW |
3025 | trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial, |
3026 | path->p_hdr->eh_entries); | |
d8990240 | 3027 | |
0756b908 | 3028 | /* |
9fe67149 EW |
3029 | * if there's a partial cluster and we have removed the first extent |
3030 | * in the file, then we also free the partial cluster, if any | |
0756b908 | 3031 | */ |
9fe67149 EW |
3032 | if (partial.state == tofree && err == 0) { |
3033 | int flags = get_default_free_blocks_flags(inode); | |
3034 | ||
3035 | if (ext4_is_pending(inode, partial.lblk)) | |
3036 | flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; | |
7b415bf6 | 3037 | ext4_free_blocks(handle, inode, NULL, |
9fe67149 EW |
3038 | EXT4_C2B(sbi, partial.pclu), |
3039 | sbi->s_cluster_ratio, flags); | |
3040 | if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) | |
3041 | ext4_rereserve_cluster(inode, partial.lblk); | |
3042 | partial.state = initial; | |
7b415bf6 AK |
3043 | } |
3044 | ||
a86c6181 AT |
3045 | /* TODO: flexible tree reduction should be here */ |
3046 | if (path->p_hdr->eh_entries == 0) { | |
3047 | /* | |
d0d856e8 RD |
3048 | * truncate to zero freed all the tree, |
3049 | * so we need to correct eh_depth | |
a86c6181 AT |
3050 | */ |
3051 | err = ext4_ext_get_access(handle, inode, path); | |
3052 | if (err == 0) { | |
3053 | ext_inode_hdr(inode)->eh_depth = 0; | |
3054 | ext_inode_hdr(inode)->eh_max = | |
55ad63bf | 3055 | cpu_to_le16(ext4_ext_space_root(inode, 0)); |
a86c6181 AT |
3056 | err = ext4_ext_dirty(handle, inode, path); |
3057 | } | |
3058 | } | |
3059 | out: | |
b7ea89ad TT |
3060 | ext4_ext_drop_refs(path); |
3061 | kfree(path); | |
3062 | path = NULL; | |
dfe50809 TT |
3063 | if (err == -EAGAIN) |
3064 | goto again; | |
a86c6181 AT |
3065 | ext4_journal_stop(handle); |
3066 | ||
3067 | return err; | |
3068 | } | |
3069 | ||
3070 | /* | |
3071 | * called at mount time | |
3072 | */ | |
3073 | void ext4_ext_init(struct super_block *sb) | |
3074 | { | |
3075 | /* | |
3076 | * possible initialization would be here | |
3077 | */ | |
3078 | ||
e2b911c5 | 3079 | if (ext4_has_feature_extents(sb)) { |
90576c0b | 3080 | #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) |
92b97816 | 3081 | printk(KERN_INFO "EXT4-fs: file extents enabled" |
bbf2f9fb | 3082 | #ifdef AGGRESSIVE_TEST |
92b97816 | 3083 | ", aggressive tests" |
a86c6181 AT |
3084 | #endif |
3085 | #ifdef CHECK_BINSEARCH | |
92b97816 | 3086 | ", check binsearch" |
a86c6181 AT |
3087 | #endif |
3088 | #ifdef EXTENTS_STATS | |
92b97816 | 3089 | ", stats" |
a86c6181 | 3090 | #endif |
92b97816 | 3091 | "\n"); |
90576c0b | 3092 | #endif |
a86c6181 AT |
3093 | #ifdef EXTENTS_STATS |
3094 | spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); | |
3095 | EXT4_SB(sb)->s_ext_min = 1 << 30; | |
3096 | EXT4_SB(sb)->s_ext_max = 0; | |
3097 | #endif | |
3098 | } | |
3099 | } | |
3100 | ||
3101 | /* | |
3102 | * called at umount time | |
3103 | */ | |
3104 | void ext4_ext_release(struct super_block *sb) | |
3105 | { | |
e2b911c5 | 3106 | if (!ext4_has_feature_extents(sb)) |
a86c6181 AT |
3107 | return; |
3108 | ||
3109 | #ifdef EXTENTS_STATS | |
3110 | if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { | |
3111 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
3112 | printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", | |
3113 | sbi->s_ext_blocks, sbi->s_ext_extents, | |
3114 | sbi->s_ext_blocks / sbi->s_ext_extents); | |
3115 | printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", | |
3116 | sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); | |
3117 | } | |
3118 | #endif | |
3119 | } | |
3120 | ||
d7b2a00c ZL |
3121 | static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) |
3122 | { | |
3123 | ext4_lblk_t ee_block; | |
3124 | ext4_fsblk_t ee_pblock; | |
3125 | unsigned int ee_len; | |
3126 | ||
3127 | ee_block = le32_to_cpu(ex->ee_block); | |
3128 | ee_len = ext4_ext_get_actual_len(ex); | |
3129 | ee_pblock = ext4_ext_pblock(ex); | |
3130 | ||
3131 | if (ee_len == 0) | |
3132 | return 0; | |
3133 | ||
3134 | return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, | |
3135 | EXTENT_STATUS_WRITTEN); | |
3136 | } | |
3137 | ||
093a088b AK |
3138 | /* FIXME!! we need to try to merge to left or right after zero-out */ |
3139 | static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |
3140 | { | |
2407518d LC |
3141 | ext4_fsblk_t ee_pblock; |
3142 | unsigned int ee_len; | |
093a088b | 3143 | |
093a088b | 3144 | ee_len = ext4_ext_get_actual_len(ex); |
bf89d16f | 3145 | ee_pblock = ext4_ext_pblock(ex); |
53085fac JK |
3146 | return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock, |
3147 | ee_len); | |
093a088b AK |
3148 | } |
3149 | ||
47ea3bb5 YY |
3150 | /* |
3151 | * ext4_split_extent_at() splits an extent at given block. | |
3152 | * | |
3153 | * @handle: the journal handle | |
3154 | * @inode: the file inode | |
3155 | * @path: the path to the extent | |
3156 | * @split: the logical block where the extent is splitted. | |
3157 | * @split_flags: indicates if the extent could be zeroout if split fails, and | |
556615dc | 3158 | * the states(init or unwritten) of new extents. |
47ea3bb5 YY |
3159 | * @flags: flags used to insert new extent to extent tree. |
3160 | * | |
3161 | * | |
3162 | * Splits extent [a, b] into two extents [a, @split) and [@split, b], states | |
e4d7f2d3 | 3163 | * of which are determined by split_flag. |
47ea3bb5 YY |
3164 | * |
3165 | * There are two cases: | |
3166 | * a> the extent are splitted into two extent. | |
3167 | * b> split is not needed, and just mark the extent. | |
3168 | * | |
3169 | * return 0 on success. | |
3170 | */ | |
3171 | static int ext4_split_extent_at(handle_t *handle, | |
3172 | struct inode *inode, | |
dfe50809 | 3173 | struct ext4_ext_path **ppath, |
47ea3bb5 YY |
3174 | ext4_lblk_t split, |
3175 | int split_flag, | |
3176 | int flags) | |
3177 | { | |
dfe50809 | 3178 | struct ext4_ext_path *path = *ppath; |
47ea3bb5 YY |
3179 | ext4_fsblk_t newblock; |
3180 | ext4_lblk_t ee_block; | |
adb23551 | 3181 | struct ext4_extent *ex, newex, orig_ex, zero_ex; |
47ea3bb5 YY |
3182 | struct ext4_extent *ex2 = NULL; |
3183 | unsigned int ee_len, depth; | |
3184 | int err = 0; | |
3185 | ||
dee1f973 DM |
3186 | BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == |
3187 | (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); | |
3188 | ||
70aa1554 | 3189 | ext_debug(inode, "logical block %llu\n", (unsigned long long)split); |
47ea3bb5 YY |
3190 | |
3191 | ext4_ext_show_leaf(inode, path); | |
3192 | ||
3193 | depth = ext_depth(inode); | |
3194 | ex = path[depth].p_ext; | |
3195 | ee_block = le32_to_cpu(ex->ee_block); | |
3196 | ee_len = ext4_ext_get_actual_len(ex); | |
3197 | newblock = split - ee_block + ext4_ext_pblock(ex); | |
3198 | ||
3199 | BUG_ON(split < ee_block || split >= (ee_block + ee_len)); | |
556615dc | 3200 | BUG_ON(!ext4_ext_is_unwritten(ex) && |
357b66fd | 3201 | split_flag & (EXT4_EXT_MAY_ZEROOUT | |
556615dc LC |
3202 | EXT4_EXT_MARK_UNWRIT1 | |
3203 | EXT4_EXT_MARK_UNWRIT2)); | |
47ea3bb5 YY |
3204 | |
3205 | err = ext4_ext_get_access(handle, inode, path + depth); | |
3206 | if (err) | |
3207 | goto out; | |
3208 | ||
3209 | if (split == ee_block) { | |
3210 | /* | |
3211 | * case b: block @split is the block that the extent begins with | |
3212 | * then we just change the state of the extent, and splitting | |
3213 | * is not needed. | |
3214 | */ | |
556615dc LC |
3215 | if (split_flag & EXT4_EXT_MARK_UNWRIT2) |
3216 | ext4_ext_mark_unwritten(ex); | |
47ea3bb5 YY |
3217 | else |
3218 | ext4_ext_mark_initialized(ex); | |
3219 | ||
3220 | if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) | |
ecb94f5f | 3221 | ext4_ext_try_to_merge(handle, inode, path, ex); |
47ea3bb5 | 3222 | |
ecb94f5f | 3223 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
47ea3bb5 YY |
3224 | goto out; |
3225 | } | |
3226 | ||
3227 | /* case a */ | |
3228 | memcpy(&orig_ex, ex, sizeof(orig_ex)); | |
3229 | ex->ee_len = cpu_to_le16(split - ee_block); | |
556615dc LC |
3230 | if (split_flag & EXT4_EXT_MARK_UNWRIT1) |
3231 | ext4_ext_mark_unwritten(ex); | |
47ea3bb5 YY |
3232 | |
3233 | /* | |
3234 | * path may lead to new leaf, not to original leaf any more | |
3235 | * after ext4_ext_insert_extent() returns, | |
3236 | */ | |
3237 | err = ext4_ext_dirty(handle, inode, path + depth); | |
3238 | if (err) | |
3239 | goto fix_extent_len; | |
3240 | ||
3241 | ex2 = &newex; | |
3242 | ex2->ee_block = cpu_to_le32(split); | |
3243 | ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); | |
3244 | ext4_ext_store_pblock(ex2, newblock); | |
556615dc LC |
3245 | if (split_flag & EXT4_EXT_MARK_UNWRIT2) |
3246 | ext4_ext_mark_unwritten(ex2); | |
47ea3bb5 | 3247 | |
dfe50809 | 3248 | err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); |
082cd4ec YB |
3249 | if (err != -ENOSPC && err != -EDQUOT) |
3250 | goto out; | |
3251 | ||
3252 | if (EXT4_EXT_MAY_ZEROOUT & split_flag) { | |
dee1f973 | 3253 | if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { |
adb23551 | 3254 | if (split_flag & EXT4_EXT_DATA_VALID1) { |
dee1f973 | 3255 | err = ext4_ext_zeroout(inode, ex2); |
adb23551 | 3256 | zero_ex.ee_block = ex2->ee_block; |
8cde7ad1 ZL |
3257 | zero_ex.ee_len = cpu_to_le16( |
3258 | ext4_ext_get_actual_len(ex2)); | |
adb23551 ZL |
3259 | ext4_ext_store_pblock(&zero_ex, |
3260 | ext4_ext_pblock(ex2)); | |
3261 | } else { | |
dee1f973 | 3262 | err = ext4_ext_zeroout(inode, ex); |
adb23551 | 3263 | zero_ex.ee_block = ex->ee_block; |
8cde7ad1 ZL |
3264 | zero_ex.ee_len = cpu_to_le16( |
3265 | ext4_ext_get_actual_len(ex)); | |
adb23551 ZL |
3266 | ext4_ext_store_pblock(&zero_ex, |
3267 | ext4_ext_pblock(ex)); | |
3268 | } | |
3269 | } else { | |
dee1f973 | 3270 | err = ext4_ext_zeroout(inode, &orig_ex); |
adb23551 | 3271 | zero_ex.ee_block = orig_ex.ee_block; |
8cde7ad1 ZL |
3272 | zero_ex.ee_len = cpu_to_le16( |
3273 | ext4_ext_get_actual_len(&orig_ex)); | |
adb23551 ZL |
3274 | ext4_ext_store_pblock(&zero_ex, |
3275 | ext4_ext_pblock(&orig_ex)); | |
3276 | } | |
dee1f973 | 3277 | |
082cd4ec YB |
3278 | if (!err) { |
3279 | /* update the extent length and mark as initialized */ | |
3280 | ex->ee_len = cpu_to_le16(ee_len); | |
3281 | ext4_ext_try_to_merge(handle, inode, path, ex); | |
3282 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); | |
3283 | if (!err) | |
3284 | /* update extent status tree */ | |
3285 | err = ext4_zeroout_es(inode, &zero_ex); | |
3286 | /* If we failed at this point, we don't know in which | |
3287 | * state the extent tree exactly is so don't try to fix | |
3288 | * length of the original extent as it may do even more | |
3289 | * damage. | |
3290 | */ | |
3291 | goto out; | |
3292 | } | |
3293 | } | |
47ea3bb5 YY |
3294 | |
3295 | fix_extent_len: | |
3296 | ex->ee_len = orig_ex.ee_len; | |
b60ca334 HS |
3297 | /* |
3298 | * Ignore ext4_ext_dirty return value since we are already in error path | |
3299 | * and err is a non-zero error code. | |
3300 | */ | |
29faed16 | 3301 | ext4_ext_dirty(handle, inode, path + path->p_depth); |
47ea3bb5 | 3302 | return err; |
082cd4ec YB |
3303 | out: |
3304 | ext4_ext_show_leaf(inode, path); | |
3305 | return err; | |
47ea3bb5 YY |
3306 | } |
3307 | ||
3308 | /* | |
3309 | * ext4_split_extents() splits an extent and mark extent which is covered | |
3310 | * by @map as split_flags indicates | |
3311 | * | |
70261f56 | 3312 | * It may result in splitting the extent into multiple extents (up to three) |
47ea3bb5 YY |
3313 | * There are three possibilities: |
3314 | * a> There is no split required | |
3315 | * b> Splits in two extents: Split is happening at either end of the extent | |
3316 | * c> Splits in three extents: Somone is splitting in middle of the extent | |
3317 | * | |
3318 | */ | |
3319 | static int ext4_split_extent(handle_t *handle, | |
3320 | struct inode *inode, | |
dfe50809 | 3321 | struct ext4_ext_path **ppath, |
47ea3bb5 YY |
3322 | struct ext4_map_blocks *map, |
3323 | int split_flag, | |
3324 | int flags) | |
3325 | { | |
dfe50809 | 3326 | struct ext4_ext_path *path = *ppath; |
47ea3bb5 YY |
3327 | ext4_lblk_t ee_block; |
3328 | struct ext4_extent *ex; | |
3329 | unsigned int ee_len, depth; | |
3330 | int err = 0; | |
556615dc | 3331 | int unwritten; |
47ea3bb5 | 3332 | int split_flag1, flags1; |
3a225670 | 3333 | int allocated = map->m_len; |
47ea3bb5 YY |
3334 | |
3335 | depth = ext_depth(inode); | |
3336 | ex = path[depth].p_ext; | |
3337 | ee_block = le32_to_cpu(ex->ee_block); | |
3338 | ee_len = ext4_ext_get_actual_len(ex); | |
556615dc | 3339 | unwritten = ext4_ext_is_unwritten(ex); |
47ea3bb5 YY |
3340 | |
3341 | if (map->m_lblk + map->m_len < ee_block + ee_len) { | |
dee1f973 | 3342 | split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; |
47ea3bb5 | 3343 | flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; |
556615dc LC |
3344 | if (unwritten) |
3345 | split_flag1 |= EXT4_EXT_MARK_UNWRIT1 | | |
3346 | EXT4_EXT_MARK_UNWRIT2; | |
dee1f973 DM |
3347 | if (split_flag & EXT4_EXT_DATA_VALID2) |
3348 | split_flag1 |= EXT4_EXT_DATA_VALID1; | |
dfe50809 | 3349 | err = ext4_split_extent_at(handle, inode, ppath, |
47ea3bb5 | 3350 | map->m_lblk + map->m_len, split_flag1, flags1); |
93917411 YY |
3351 | if (err) |
3352 | goto out; | |
3a225670 ZL |
3353 | } else { |
3354 | allocated = ee_len - (map->m_lblk - ee_block); | |
47ea3bb5 | 3355 | } |
357b66fd DM |
3356 | /* |
3357 | * Update path is required because previous ext4_split_extent_at() may | |
3358 | * result in split of original leaf or extent zeroout. | |
3359 | */ | |
73c384c0 | 3360 | path = ext4_find_extent(inode, map->m_lblk, ppath, flags); |
47ea3bb5 YY |
3361 | if (IS_ERR(path)) |
3362 | return PTR_ERR(path); | |
357b66fd DM |
3363 | depth = ext_depth(inode); |
3364 | ex = path[depth].p_ext; | |
a18ed359 DM |
3365 | if (!ex) { |
3366 | EXT4_ERROR_INODE(inode, "unexpected hole at %lu", | |
3367 | (unsigned long) map->m_lblk); | |
6a797d27 | 3368 | return -EFSCORRUPTED; |
a18ed359 | 3369 | } |
556615dc | 3370 | unwritten = ext4_ext_is_unwritten(ex); |
357b66fd | 3371 | split_flag1 = 0; |
47ea3bb5 YY |
3372 | |
3373 | if (map->m_lblk >= ee_block) { | |
357b66fd | 3374 | split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; |
556615dc LC |
3375 | if (unwritten) { |
3376 | split_flag1 |= EXT4_EXT_MARK_UNWRIT1; | |
357b66fd | 3377 | split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | |
556615dc | 3378 | EXT4_EXT_MARK_UNWRIT2); |
357b66fd | 3379 | } |
dfe50809 | 3380 | err = ext4_split_extent_at(handle, inode, ppath, |
47ea3bb5 YY |
3381 | map->m_lblk, split_flag1, flags); |
3382 | if (err) | |
3383 | goto out; | |
3384 | } | |
3385 | ||
3386 | ext4_ext_show_leaf(inode, path); | |
3387 | out: | |
3a225670 | 3388 | return err ? err : allocated; |
47ea3bb5 YY |
3389 | } |
3390 | ||
56055d3a | 3391 | /* |
e35fd660 | 3392 | * This function is called by ext4_ext_map_blocks() if someone tries to write |
556615dc | 3393 | * to an unwritten extent. It may result in splitting the unwritten |
25985edc | 3394 | * extent into multiple extents (up to three - one initialized and two |
556615dc | 3395 | * unwritten). |
56055d3a AA |
3396 | * There are three possibilities: |
3397 | * a> There is no split required: Entire extent should be initialized | |
3398 | * b> Splits in two extents: Write is happening at either end of the extent | |
3399 | * c> Splits in three extents: Somone is writing in middle of the extent | |
6f91bc5f EG |
3400 | * |
3401 | * Pre-conditions: | |
556615dc | 3402 | * - The extent pointed to by 'path' is unwritten. |
6f91bc5f EG |
3403 | * - The extent pointed to by 'path' contains a superset |
3404 | * of the logical span [map->m_lblk, map->m_lblk + map->m_len). | |
3405 | * | |
3406 | * Post-conditions on success: | |
3407 | * - the returned value is the number of blocks beyond map->l_lblk | |
3408 | * that are allocated and initialized. | |
3409 | * It is guaranteed to be >= map->m_len. | |
56055d3a | 3410 | */ |
725d26d3 | 3411 | static int ext4_ext_convert_to_initialized(handle_t *handle, |
e35fd660 TT |
3412 | struct inode *inode, |
3413 | struct ext4_map_blocks *map, | |
dfe50809 | 3414 | struct ext4_ext_path **ppath, |
27dd4385 | 3415 | int flags) |
56055d3a | 3416 | { |
dfe50809 | 3417 | struct ext4_ext_path *path = *ppath; |
67a5da56 | 3418 | struct ext4_sb_info *sbi; |
6f91bc5f | 3419 | struct ext4_extent_header *eh; |
667eff35 | 3420 | struct ext4_map_blocks split_map; |
4f8caa60 | 3421 | struct ext4_extent zero_ex1, zero_ex2; |
bc2d9db4 | 3422 | struct ext4_extent *ex, *abut_ex; |
21ca087a | 3423 | ext4_lblk_t ee_block, eof_block; |
bc2d9db4 LC |
3424 | unsigned int ee_len, depth, map_len = map->m_len; |
3425 | int allocated = 0, max_zeroout = 0; | |
56055d3a | 3426 | int err = 0; |
4f8caa60 | 3427 | int split_flag = EXT4_EXT_DATA_VALID2; |
21ca087a | 3428 | |
70aa1554 RH |
3429 | ext_debug(inode, "logical block %llu, max_blocks %u\n", |
3430 | (unsigned long long)map->m_lblk, map_len); | |
21ca087a | 3431 | |
67a5da56 | 3432 | sbi = EXT4_SB(inode->i_sb); |
801674f3 JK |
3433 | eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) |
3434 | >> inode->i_sb->s_blocksize_bits; | |
bc2d9db4 LC |
3435 | if (eof_block < map->m_lblk + map_len) |
3436 | eof_block = map->m_lblk + map_len; | |
56055d3a AA |
3437 | |
3438 | depth = ext_depth(inode); | |
6f91bc5f | 3439 | eh = path[depth].p_hdr; |
56055d3a AA |
3440 | ex = path[depth].p_ext; |
3441 | ee_block = le32_to_cpu(ex->ee_block); | |
3442 | ee_len = ext4_ext_get_actual_len(ex); | |
4f8caa60 JK |
3443 | zero_ex1.ee_len = 0; |
3444 | zero_ex2.ee_len = 0; | |
56055d3a | 3445 | |
6f91bc5f EG |
3446 | trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); |
3447 | ||
3448 | /* Pre-conditions */ | |
556615dc | 3449 | BUG_ON(!ext4_ext_is_unwritten(ex)); |
6f91bc5f | 3450 | BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); |
6f91bc5f EG |
3451 | |
3452 | /* | |
3453 | * Attempt to transfer newly initialized blocks from the currently | |
556615dc | 3454 | * unwritten extent to its neighbor. This is much cheaper |
6f91bc5f | 3455 | * than an insertion followed by a merge as those involve costly |
bc2d9db4 LC |
3456 | * memmove() calls. Transferring to the left is the common case in |
3457 | * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) | |
3458 | * followed by append writes. | |
6f91bc5f EG |
3459 | * |
3460 | * Limitations of the current logic: | |
bc2d9db4 | 3461 | * - L1: we do not deal with writes covering the whole extent. |
6f91bc5f EG |
3462 | * This would require removing the extent if the transfer |
3463 | * is possible. | |
bc2d9db4 | 3464 | * - L2: we only attempt to merge with an extent stored in the |
6f91bc5f EG |
3465 | * same extent tree node. |
3466 | */ | |
bc2d9db4 LC |
3467 | if ((map->m_lblk == ee_block) && |
3468 | /* See if we can merge left */ | |
3469 | (map_len < ee_len) && /*L1*/ | |
3470 | (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ | |
6f91bc5f EG |
3471 | ext4_lblk_t prev_lblk; |
3472 | ext4_fsblk_t prev_pblk, ee_pblk; | |
bc2d9db4 | 3473 | unsigned int prev_len; |
6f91bc5f | 3474 | |
bc2d9db4 LC |
3475 | abut_ex = ex - 1; |
3476 | prev_lblk = le32_to_cpu(abut_ex->ee_block); | |
3477 | prev_len = ext4_ext_get_actual_len(abut_ex); | |
3478 | prev_pblk = ext4_ext_pblock(abut_ex); | |
6f91bc5f | 3479 | ee_pblk = ext4_ext_pblock(ex); |
6f91bc5f EG |
3480 | |
3481 | /* | |
bc2d9db4 | 3482 | * A transfer of blocks from 'ex' to 'abut_ex' is allowed |
6f91bc5f | 3483 | * upon those conditions: |
bc2d9db4 LC |
3484 | * - C1: abut_ex is initialized, |
3485 | * - C2: abut_ex is logically abutting ex, | |
3486 | * - C3: abut_ex is physically abutting ex, | |
3487 | * - C4: abut_ex can receive the additional blocks without | |
6f91bc5f EG |
3488 | * overflowing the (initialized) length limit. |
3489 | */ | |
556615dc | 3490 | if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ |
6f91bc5f EG |
3491 | ((prev_lblk + prev_len) == ee_block) && /*C2*/ |
3492 | ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ | |
bc2d9db4 | 3493 | (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ |
6f91bc5f EG |
3494 | err = ext4_ext_get_access(handle, inode, path + depth); |
3495 | if (err) | |
3496 | goto out; | |
3497 | ||
3498 | trace_ext4_ext_convert_to_initialized_fastpath(inode, | |
bc2d9db4 | 3499 | map, ex, abut_ex); |
6f91bc5f | 3500 | |
bc2d9db4 LC |
3501 | /* Shift the start of ex by 'map_len' blocks */ |
3502 | ex->ee_block = cpu_to_le32(ee_block + map_len); | |
3503 | ext4_ext_store_pblock(ex, ee_pblk + map_len); | |
3504 | ex->ee_len = cpu_to_le16(ee_len - map_len); | |
556615dc | 3505 | ext4_ext_mark_unwritten(ex); /* Restore the flag */ |
6f91bc5f | 3506 | |
bc2d9db4 LC |
3507 | /* Extend abut_ex by 'map_len' blocks */ |
3508 | abut_ex->ee_len = cpu_to_le16(prev_len + map_len); | |
6f91bc5f | 3509 | |
bc2d9db4 LC |
3510 | /* Result: number of initialized blocks past m_lblk */ |
3511 | allocated = map_len; | |
3512 | } | |
3513 | } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && | |
3514 | (map_len < ee_len) && /*L1*/ | |
3515 | ex < EXT_LAST_EXTENT(eh)) { /*L2*/ | |
3516 | /* See if we can merge right */ | |
3517 | ext4_lblk_t next_lblk; | |
3518 | ext4_fsblk_t next_pblk, ee_pblk; | |
3519 | unsigned int next_len; | |
3520 | ||
3521 | abut_ex = ex + 1; | |
3522 | next_lblk = le32_to_cpu(abut_ex->ee_block); | |
3523 | next_len = ext4_ext_get_actual_len(abut_ex); | |
3524 | next_pblk = ext4_ext_pblock(abut_ex); | |
3525 | ee_pblk = ext4_ext_pblock(ex); | |
6f91bc5f | 3526 | |
bc2d9db4 LC |
3527 | /* |
3528 | * A transfer of blocks from 'ex' to 'abut_ex' is allowed | |
3529 | * upon those conditions: | |
3530 | * - C1: abut_ex is initialized, | |
3531 | * - C2: abut_ex is logically abutting ex, | |
3532 | * - C3: abut_ex is physically abutting ex, | |
3533 | * - C4: abut_ex can receive the additional blocks without | |
3534 | * overflowing the (initialized) length limit. | |
3535 | */ | |
556615dc | 3536 | if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ |
bc2d9db4 LC |
3537 | ((map->m_lblk + map_len) == next_lblk) && /*C2*/ |
3538 | ((ee_pblk + ee_len) == next_pblk) && /*C3*/ | |
3539 | (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ | |
3540 | err = ext4_ext_get_access(handle, inode, path + depth); | |
3541 | if (err) | |
3542 | goto out; | |
3543 | ||
3544 | trace_ext4_ext_convert_to_initialized_fastpath(inode, | |
3545 | map, ex, abut_ex); | |
3546 | ||
3547 | /* Shift the start of abut_ex by 'map_len' blocks */ | |
3548 | abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); | |
3549 | ext4_ext_store_pblock(abut_ex, next_pblk - map_len); | |
3550 | ex->ee_len = cpu_to_le16(ee_len - map_len); | |
556615dc | 3551 | ext4_ext_mark_unwritten(ex); /* Restore the flag */ |
bc2d9db4 LC |
3552 | |
3553 | /* Extend abut_ex by 'map_len' blocks */ | |
3554 | abut_ex->ee_len = cpu_to_le16(next_len + map_len); | |
6f91bc5f EG |
3555 | |
3556 | /* Result: number of initialized blocks past m_lblk */ | |
bc2d9db4 | 3557 | allocated = map_len; |
6f91bc5f EG |
3558 | } |
3559 | } | |
bc2d9db4 LC |
3560 | if (allocated) { |
3561 | /* Mark the block containing both extents as dirty */ | |
b60ca334 | 3562 | err = ext4_ext_dirty(handle, inode, path + depth); |
bc2d9db4 LC |
3563 | |
3564 | /* Update path to point to the right extent */ | |
3565 | path[depth].p_ext = abut_ex; | |
3566 | goto out; | |
3567 | } else | |
3568 | allocated = ee_len - (map->m_lblk - ee_block); | |
6f91bc5f | 3569 | |
667eff35 | 3570 | WARN_ON(map->m_lblk < ee_block); |
21ca087a DM |
3571 | /* |
3572 | * It is safe to convert extent to initialized via explicit | |
9e740568 | 3573 | * zeroout only if extent is fully inside i_size or new_size. |
21ca087a | 3574 | */ |
667eff35 | 3575 | split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; |
21ca087a | 3576 | |
67a5da56 ZL |
3577 | if (EXT4_EXT_MAY_ZEROOUT & split_flag) |
3578 | max_zeroout = sbi->s_extent_max_zeroout_kb >> | |
4f42f80a | 3579 | (inode->i_sb->s_blocksize_bits - 10); |
67a5da56 | 3580 | |
56055d3a | 3581 | /* |
4f8caa60 | 3582 | * five cases: |
667eff35 | 3583 | * 1. split the extent into three extents. |
4f8caa60 JK |
3584 | * 2. split the extent into two extents, zeroout the head of the first |
3585 | * extent. | |
3586 | * 3. split the extent into two extents, zeroout the tail of the second | |
3587 | * extent. | |
667eff35 | 3588 | * 4. split the extent into two extents with out zeroout. |
4f8caa60 JK |
3589 | * 5. no splitting needed, just possibly zeroout the head and / or the |
3590 | * tail of the extent. | |
56055d3a | 3591 | */ |
667eff35 YY |
3592 | split_map.m_lblk = map->m_lblk; |
3593 | split_map.m_len = map->m_len; | |
3594 | ||
4f8caa60 | 3595 | if (max_zeroout && (allocated > split_map.m_len)) { |
67a5da56 | 3596 | if (allocated <= max_zeroout) { |
4f8caa60 JK |
3597 | /* case 3 or 5 */ |
3598 | zero_ex1.ee_block = | |
3599 | cpu_to_le32(split_map.m_lblk + | |
3600 | split_map.m_len); | |
3601 | zero_ex1.ee_len = | |
3602 | cpu_to_le16(allocated - split_map.m_len); | |
3603 | ext4_ext_store_pblock(&zero_ex1, | |
3604 | ext4_ext_pblock(ex) + split_map.m_lblk + | |
3605 | split_map.m_len - ee_block); | |
3606 | err = ext4_ext_zeroout(inode, &zero_ex1); | |
56055d3a | 3607 | if (err) |
308c57cc | 3608 | goto fallback; |
667eff35 | 3609 | split_map.m_len = allocated; |
4f8caa60 JK |
3610 | } |
3611 | if (split_map.m_lblk - ee_block + split_map.m_len < | |
3612 | max_zeroout) { | |
3613 | /* case 2 or 5 */ | |
3614 | if (split_map.m_lblk != ee_block) { | |
3615 | zero_ex2.ee_block = ex->ee_block; | |
3616 | zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk - | |
667eff35 | 3617 | ee_block); |
4f8caa60 | 3618 | ext4_ext_store_pblock(&zero_ex2, |
667eff35 | 3619 | ext4_ext_pblock(ex)); |
4f8caa60 | 3620 | err = ext4_ext_zeroout(inode, &zero_ex2); |
667eff35 | 3621 | if (err) |
308c57cc | 3622 | goto fallback; |
667eff35 YY |
3623 | } |
3624 | ||
4f8caa60 | 3625 | split_map.m_len += split_map.m_lblk - ee_block; |
667eff35 | 3626 | split_map.m_lblk = ee_block; |
9b940f8e | 3627 | allocated = map->m_len; |
56055d3a AA |
3628 | } |
3629 | } | |
667eff35 | 3630 | |
308c57cc | 3631 | fallback: |
ae9e9c6a JK |
3632 | err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, |
3633 | flags); | |
3634 | if (err > 0) | |
3635 | err = 0; | |
56055d3a | 3636 | out: |
adb23551 | 3637 | /* If we have gotten a failure, don't zero out status tree */ |
4f8caa60 JK |
3638 | if (!err) { |
3639 | err = ext4_zeroout_es(inode, &zero_ex1); | |
3640 | if (!err) | |
3641 | err = ext4_zeroout_es(inode, &zero_ex2); | |
3642 | } | |
56055d3a AA |
3643 | return err ? err : allocated; |
3644 | } | |
3645 | ||
0031462b | 3646 | /* |
e35fd660 | 3647 | * This function is called by ext4_ext_map_blocks() from |
0031462b | 3648 | * ext4_get_blocks_dio_write() when DIO to write |
556615dc | 3649 | * to an unwritten extent. |
0031462b | 3650 | * |
556615dc LC |
3651 | * Writing to an unwritten extent may result in splitting the unwritten |
3652 | * extent into multiple initialized/unwritten extents (up to three) | |
0031462b | 3653 | * There are three possibilities: |
556615dc | 3654 | * a> There is no split required: Entire extent should be unwritten |
0031462b MC |
3655 | * b> Splits in two extents: Write is happening at either end of the extent |
3656 | * c> Splits in three extents: Somone is writing in middle of the extent | |
3657 | * | |
b8a86845 LC |
3658 | * This works the same way in the case of initialized -> unwritten conversion. |
3659 | * | |
0031462b | 3660 | * One of more index blocks maybe needed if the extent tree grow after |
556615dc LC |
3661 | * the unwritten extent split. To prevent ENOSPC occur at the IO |
3662 | * complete, we need to split the unwritten extent before DIO submit | |
3663 | * the IO. The unwritten extent called at this time will be split | |
3664 | * into three unwritten extent(at most). After IO complete, the part | |
0031462b MC |
3665 | * being filled will be convert to initialized by the end_io callback function |
3666 | * via ext4_convert_unwritten_extents(). | |
ba230c3f | 3667 | * |
556615dc | 3668 | * Returns the size of unwritten extent to be written on success. |
0031462b | 3669 | */ |
b8a86845 | 3670 | static int ext4_split_convert_extents(handle_t *handle, |
0031462b | 3671 | struct inode *inode, |
e35fd660 | 3672 | struct ext4_map_blocks *map, |
dfe50809 | 3673 | struct ext4_ext_path **ppath, |
0031462b MC |
3674 | int flags) |
3675 | { | |
dfe50809 | 3676 | struct ext4_ext_path *path = *ppath; |
667eff35 YY |
3677 | ext4_lblk_t eof_block; |
3678 | ext4_lblk_t ee_block; | |
3679 | struct ext4_extent *ex; | |
3680 | unsigned int ee_len; | |
3681 | int split_flag = 0, depth; | |
21ca087a | 3682 | |
70aa1554 | 3683 | ext_debug(inode, "logical block %llu, max_blocks %u\n", |
b8a86845 | 3684 | (unsigned long long)map->m_lblk, map->m_len); |
21ca087a | 3685 | |
801674f3 JK |
3686 | eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) |
3687 | >> inode->i_sb->s_blocksize_bits; | |
e35fd660 TT |
3688 | if (eof_block < map->m_lblk + map->m_len) |
3689 | eof_block = map->m_lblk + map->m_len; | |
21ca087a DM |
3690 | /* |
3691 | * It is safe to convert extent to initialized via explicit | |
e4d7f2d3 | 3692 | * zeroout only if extent is fully inside i_size or new_size. |
21ca087a | 3693 | */ |
667eff35 YY |
3694 | depth = ext_depth(inode); |
3695 | ex = path[depth].p_ext; | |
3696 | ee_block = le32_to_cpu(ex->ee_block); | |
3697 | ee_len = ext4_ext_get_actual_len(ex); | |
0031462b | 3698 | |
b8a86845 LC |
3699 | /* Convert to unwritten */ |
3700 | if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { | |
3701 | split_flag |= EXT4_EXT_DATA_VALID1; | |
3702 | /* Convert to initialized */ | |
3703 | } else if (flags & EXT4_GET_BLOCKS_CONVERT) { | |
3704 | split_flag |= ee_block + ee_len <= eof_block ? | |
3705 | EXT4_EXT_MAY_ZEROOUT : 0; | |
556615dc | 3706 | split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); |
b8a86845 | 3707 | } |
667eff35 | 3708 | flags |= EXT4_GET_BLOCKS_PRE_IO; |
dfe50809 | 3709 | return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); |
0031462b | 3710 | } |
197217a5 | 3711 | |
c7064ef1 | 3712 | static int ext4_convert_unwritten_extents_endio(handle_t *handle, |
dee1f973 DM |
3713 | struct inode *inode, |
3714 | struct ext4_map_blocks *map, | |
dfe50809 | 3715 | struct ext4_ext_path **ppath) |
0031462b | 3716 | { |
dfe50809 | 3717 | struct ext4_ext_path *path = *ppath; |
0031462b | 3718 | struct ext4_extent *ex; |
dee1f973 DM |
3719 | ext4_lblk_t ee_block; |
3720 | unsigned int ee_len; | |
0031462b MC |
3721 | int depth; |
3722 | int err = 0; | |
0031462b MC |
3723 | |
3724 | depth = ext_depth(inode); | |
0031462b | 3725 | ex = path[depth].p_ext; |
dee1f973 DM |
3726 | ee_block = le32_to_cpu(ex->ee_block); |
3727 | ee_len = ext4_ext_get_actual_len(ex); | |
0031462b | 3728 | |
70aa1554 | 3729 | ext_debug(inode, "logical block %llu, max_blocks %u\n", |
dee1f973 DM |
3730 | (unsigned long long)ee_block, ee_len); |
3731 | ||
ff95ec22 DM |
3732 | /* If extent is larger than requested it is a clear sign that we still |
3733 | * have some extent state machine issues left. So extent_split is still | |
3734 | * required. | |
3735 | * TODO: Once all related issues will be fixed this situation should be | |
3736 | * illegal. | |
3737 | */ | |
dee1f973 | 3738 | if (ee_block != map->m_lblk || ee_len > map->m_len) { |
e3d550c2 RP |
3739 | #ifdef CONFIG_EXT4_DEBUG |
3740 | ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu," | |
8d2ae1cb | 3741 | " len %u; IO logical block %llu, len %u", |
ff95ec22 DM |
3742 | inode->i_ino, (unsigned long long)ee_block, ee_len, |
3743 | (unsigned long long)map->m_lblk, map->m_len); | |
3744 | #endif | |
dfe50809 | 3745 | err = ext4_split_convert_extents(handle, inode, map, ppath, |
b8a86845 | 3746 | EXT4_GET_BLOCKS_CONVERT); |
dee1f973 | 3747 | if (err < 0) |
dfe50809 | 3748 | return err; |
ed8a1a76 | 3749 | path = ext4_find_extent(inode, map->m_lblk, ppath, 0); |
dfe50809 TT |
3750 | if (IS_ERR(path)) |
3751 | return PTR_ERR(path); | |
dee1f973 DM |
3752 | depth = ext_depth(inode); |
3753 | ex = path[depth].p_ext; | |
3754 | } | |
197217a5 | 3755 | |
0031462b MC |
3756 | err = ext4_ext_get_access(handle, inode, path + depth); |
3757 | if (err) | |
3758 | goto out; | |
3759 | /* first mark the extent as initialized */ | |
3760 | ext4_ext_mark_initialized(ex); | |
3761 | ||
197217a5 YY |
3762 | /* note: ext4_ext_correct_indexes() isn't needed here because |
3763 | * borders are not changed | |
0031462b | 3764 | */ |
ecb94f5f | 3765 | ext4_ext_try_to_merge(handle, inode, path, ex); |
197217a5 | 3766 | |
0031462b | 3767 | /* Mark modified extent as dirty */ |
ecb94f5f | 3768 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
0031462b MC |
3769 | out: |
3770 | ext4_ext_show_leaf(inode, path); | |
3771 | return err; | |
3772 | } | |
3773 | ||
b8a86845 | 3774 | static int |
e8b83d93 TT |
3775 | convert_initialized_extent(handle_t *handle, struct inode *inode, |
3776 | struct ext4_map_blocks *map, | |
29c6eaff | 3777 | struct ext4_ext_path **ppath, |
f064a9d6 | 3778 | unsigned int *allocated) |
b8a86845 | 3779 | { |
4f224b8b | 3780 | struct ext4_ext_path *path = *ppath; |
e8b83d93 TT |
3781 | struct ext4_extent *ex; |
3782 | ext4_lblk_t ee_block; | |
3783 | unsigned int ee_len; | |
3784 | int depth; | |
b8a86845 LC |
3785 | int err = 0; |
3786 | ||
3787 | /* | |
3788 | * Make sure that the extent is no bigger than we support with | |
556615dc | 3789 | * unwritten extent |
b8a86845 | 3790 | */ |
556615dc LC |
3791 | if (map->m_len > EXT_UNWRITTEN_MAX_LEN) |
3792 | map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; | |
b8a86845 | 3793 | |
e8b83d93 TT |
3794 | depth = ext_depth(inode); |
3795 | ex = path[depth].p_ext; | |
3796 | ee_block = le32_to_cpu(ex->ee_block); | |
3797 | ee_len = ext4_ext_get_actual_len(ex); | |
3798 | ||
70aa1554 | 3799 | ext_debug(inode, "logical block %llu, max_blocks %u\n", |
e8b83d93 TT |
3800 | (unsigned long long)ee_block, ee_len); |
3801 | ||
3802 | if (ee_block != map->m_lblk || ee_len > map->m_len) { | |
dfe50809 | 3803 | err = ext4_split_convert_extents(handle, inode, map, ppath, |
e8b83d93 TT |
3804 | EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); |
3805 | if (err < 0) | |
3806 | return err; | |
ed8a1a76 | 3807 | path = ext4_find_extent(inode, map->m_lblk, ppath, 0); |
e8b83d93 TT |
3808 | if (IS_ERR(path)) |
3809 | return PTR_ERR(path); | |
3810 | depth = ext_depth(inode); | |
3811 | ex = path[depth].p_ext; | |
3812 | if (!ex) { | |
3813 | EXT4_ERROR_INODE(inode, "unexpected hole at %lu", | |
3814 | (unsigned long) map->m_lblk); | |
6a797d27 | 3815 | return -EFSCORRUPTED; |
e8b83d93 TT |
3816 | } |
3817 | } | |
3818 | ||
3819 | err = ext4_ext_get_access(handle, inode, path + depth); | |
3820 | if (err) | |
3821 | return err; | |
3822 | /* first mark the extent as unwritten */ | |
3823 | ext4_ext_mark_unwritten(ex); | |
3824 | ||
3825 | /* note: ext4_ext_correct_indexes() isn't needed here because | |
3826 | * borders are not changed | |
3827 | */ | |
3828 | ext4_ext_try_to_merge(handle, inode, path, ex); | |
3829 | ||
3830 | /* Mark modified extent as dirty */ | |
3831 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); | |
3832 | if (err) | |
3833 | return err; | |
3834 | ext4_ext_show_leaf(inode, path); | |
3835 | ||
3836 | ext4_update_inode_fsync_trans(handle, inode, 1); | |
4337ecd1 | 3837 | |
b8a86845 | 3838 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
f064a9d6 EW |
3839 | if (*allocated > map->m_len) |
3840 | *allocated = map->m_len; | |
3841 | map->m_len = *allocated; | |
3842 | return 0; | |
b8a86845 LC |
3843 | } |
3844 | ||
0031462b | 3845 | static int |
556615dc | 3846 | ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, |
e35fd660 | 3847 | struct ext4_map_blocks *map, |
dfe50809 | 3848 | struct ext4_ext_path **ppath, int flags, |
e35fd660 | 3849 | unsigned int allocated, ext4_fsblk_t newblock) |
0031462b | 3850 | { |
8ec2d31b | 3851 | struct ext4_ext_path __maybe_unused *path = *ppath; |
0031462b MC |
3852 | int ret = 0; |
3853 | int err = 0; | |
3854 | ||
70aa1554 RH |
3855 | ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n", |
3856 | (unsigned long long)map->m_lblk, map->m_len, flags, | |
3857 | allocated); | |
0031462b MC |
3858 | ext4_ext_show_leaf(inode, path); |
3859 | ||
27dd4385 | 3860 | /* |
556615dc | 3861 | * When writing into unwritten space, we should not fail to |
27dd4385 LC |
3862 | * allocate metadata blocks for the new extent block if needed. |
3863 | */ | |
3864 | flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; | |
3865 | ||
556615dc | 3866 | trace_ext4_ext_handle_unwritten_extents(inode, map, flags, |
b5645534 | 3867 | allocated, newblock); |
d8990240 | 3868 | |
779e2651 | 3869 | /* get_block() before submitting IO, split the extent */ |
c8b459f4 | 3870 | if (flags & EXT4_GET_BLOCKS_PRE_IO) { |
dfe50809 TT |
3871 | ret = ext4_split_convert_extents(handle, inode, map, ppath, |
3872 | flags | EXT4_GET_BLOCKS_CONVERT); | |
779e2651 EW |
3873 | if (ret < 0) { |
3874 | err = ret; | |
3875 | goto out2; | |
3876 | } | |
3877 | /* | |
3878 | * shouldn't get a 0 return when splitting an extent unless | |
3879 | * m_len is 0 (bug) or extent has been corrupted | |
3880 | */ | |
3881 | if (unlikely(ret == 0)) { | |
3882 | EXT4_ERROR_INODE(inode, | |
3883 | "unexpected ret == 0, m_len = %u", | |
3884 | map->m_len); | |
3885 | err = -EFSCORRUPTED; | |
3886 | goto out2; | |
3887 | } | |
a25a4e1a | 3888 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
0031462b MC |
3889 | goto out; |
3890 | } | |
c7064ef1 | 3891 | /* IO end_io complete, convert the filled extent to written */ |
c8b459f4 | 3892 | if (flags & EXT4_GET_BLOCKS_CONVERT) { |
bee6cf00 | 3893 | err = ext4_convert_unwritten_extents_endio(handle, inode, map, |
dfe50809 | 3894 | ppath); |
bee6cf00 EW |
3895 | if (err < 0) |
3896 | goto out2; | |
3897 | ext4_update_inode_fsync_trans(handle, inode, 1); | |
3898 | goto map_out; | |
0031462b | 3899 | } |
bee6cf00 | 3900 | /* buffered IO cases */ |
0031462b MC |
3901 | /* |
3902 | * repeat fallocate creation request | |
3903 | * we already have an unwritten extent | |
3904 | */ | |
556615dc | 3905 | if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { |
a25a4e1a | 3906 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
0031462b | 3907 | goto map_out; |
a25a4e1a | 3908 | } |
0031462b MC |
3909 | |
3910 | /* buffered READ or buffered write_begin() lookup */ | |
3911 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { | |
3912 | /* | |
3913 | * We have blocks reserved already. We | |
3914 | * return allocated blocks so that delalloc | |
3915 | * won't do block reservation for us. But | |
3916 | * the buffer head will be unmapped so that | |
3917 | * a read from the block returns 0s. | |
3918 | */ | |
e35fd660 | 3919 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
0031462b MC |
3920 | goto out1; |
3921 | } | |
3922 | ||
be809e12 EW |
3923 | /* |
3924 | * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1. | |
3925 | * For buffered writes, at writepage time, etc. Convert a | |
3926 | * discovered unwritten extent to written. | |
3927 | */ | |
dfe50809 | 3928 | ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); |
be809e12 | 3929 | if (ret < 0) { |
0031462b MC |
3930 | err = ret; |
3931 | goto out2; | |
779e2651 | 3932 | } |
be809e12 EW |
3933 | ext4_update_inode_fsync_trans(handle, inode, 1); |
3934 | /* | |
3935 | * shouldn't get a 0 return when converting an unwritten extent | |
3936 | * unless m_len is 0 (bug) or extent has been corrupted | |
3937 | */ | |
3938 | if (unlikely(ret == 0)) { | |
3939 | EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u", | |
3940 | map->m_len); | |
3941 | err = -EFSCORRUPTED; | |
3942 | goto out2; | |
3943 | } | |
3944 | ||
779e2651 EW |
3945 | out: |
3946 | allocated = ret; | |
e35fd660 | 3947 | map->m_flags |= EXT4_MAP_NEW; |
0031462b | 3948 | map_out: |
e35fd660 | 3949 | map->m_flags |= EXT4_MAP_MAPPED; |
0031462b | 3950 | out1: |
bee6cf00 | 3951 | map->m_pblk = newblock; |
e35fd660 TT |
3952 | if (allocated > map->m_len) |
3953 | allocated = map->m_len; | |
e35fd660 | 3954 | map->m_len = allocated; |
bee6cf00 | 3955 | ext4_ext_show_leaf(inode, path); |
0031462b | 3956 | out2: |
0031462b MC |
3957 | return err ? err : allocated; |
3958 | } | |
58590b06 | 3959 | |
4d33b1ef TT |
3960 | /* |
3961 | * get_implied_cluster_alloc - check to see if the requested | |
3962 | * allocation (in the map structure) overlaps with a cluster already | |
3963 | * allocated in an extent. | |
d8990240 | 3964 | * @sb The filesystem superblock structure |
4d33b1ef TT |
3965 | * @map The requested lblk->pblk mapping |
3966 | * @ex The extent structure which might contain an implied | |
3967 | * cluster allocation | |
3968 | * | |
3969 | * This function is called by ext4_ext_map_blocks() after we failed to | |
3970 | * find blocks that were already in the inode's extent tree. Hence, | |
3971 | * we know that the beginning of the requested region cannot overlap | |
3972 | * the extent from the inode's extent tree. There are three cases we | |
3973 | * want to catch. The first is this case: | |
3974 | * | |
3975 | * |--- cluster # N--| | |
3976 | * |--- extent ---| |---- requested region ---| | |
3977 | * |==========| | |
3978 | * | |
3979 | * The second case that we need to test for is this one: | |
3980 | * | |
3981 | * |--------- cluster # N ----------------| | |
3982 | * |--- requested region --| |------- extent ----| | |
3983 | * |=======================| | |
3984 | * | |
3985 | * The third case is when the requested region lies between two extents | |
3986 | * within the same cluster: | |
3987 | * |------------- cluster # N-------------| | |
3988 | * |----- ex -----| |---- ex_right ----| | |
3989 | * |------ requested region ------| | |
3990 | * |================| | |
3991 | * | |
3992 | * In each of the above cases, we need to set the map->m_pblk and | |
3993 | * map->m_len so it corresponds to the return the extent labelled as | |
3994 | * "|====|" from cluster #N, since it is already in use for data in | |
3995 | * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to | |
3996 | * signal to ext4_ext_map_blocks() that map->m_pblk should be treated | |
3997 | * as a new "allocated" block region. Otherwise, we will return 0 and | |
3998 | * ext4_ext_map_blocks() will then allocate one or more new clusters | |
3999 | * by calling ext4_mb_new_blocks(). | |
4000 | */ | |
d8990240 | 4001 | static int get_implied_cluster_alloc(struct super_block *sb, |
4d33b1ef TT |
4002 | struct ext4_map_blocks *map, |
4003 | struct ext4_extent *ex, | |
4004 | struct ext4_ext_path *path) | |
4005 | { | |
d8990240 | 4006 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
f5a44db5 | 4007 | ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
4d33b1ef | 4008 | ext4_lblk_t ex_cluster_start, ex_cluster_end; |
14d7f3ef | 4009 | ext4_lblk_t rr_cluster_start; |
4d33b1ef TT |
4010 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); |
4011 | ext4_fsblk_t ee_start = ext4_ext_pblock(ex); | |
4012 | unsigned short ee_len = ext4_ext_get_actual_len(ex); | |
4013 | ||
4014 | /* The extent passed in that we are trying to match */ | |
4015 | ex_cluster_start = EXT4_B2C(sbi, ee_block); | |
4016 | ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); | |
4017 | ||
4018 | /* The requested region passed into ext4_map_blocks() */ | |
4019 | rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); | |
4d33b1ef TT |
4020 | |
4021 | if ((rr_cluster_start == ex_cluster_end) || | |
4022 | (rr_cluster_start == ex_cluster_start)) { | |
4023 | if (rr_cluster_start == ex_cluster_end) | |
4024 | ee_start += ee_len - 1; | |
f5a44db5 | 4025 | map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; |
4d33b1ef TT |
4026 | map->m_len = min(map->m_len, |
4027 | (unsigned) sbi->s_cluster_ratio - c_offset); | |
4028 | /* | |
4029 | * Check for and handle this case: | |
4030 | * | |
4031 | * |--------- cluster # N-------------| | |
4032 | * |------- extent ----| | |
4033 | * |--- requested region ---| | |
4034 | * |===========| | |
4035 | */ | |
4036 | ||
4037 | if (map->m_lblk < ee_block) | |
4038 | map->m_len = min(map->m_len, ee_block - map->m_lblk); | |
4039 | ||
4040 | /* | |
4041 | * Check for the case where there is already another allocated | |
4042 | * block to the right of 'ex' but before the end of the cluster. | |
4043 | * | |
4044 | * |------------- cluster # N-------------| | |
4045 | * |----- ex -----| |---- ex_right ----| | |
4046 | * |------ requested region ------| | |
4047 | * |================| | |
4048 | */ | |
4049 | if (map->m_lblk > ee_block) { | |
4050 | ext4_lblk_t next = ext4_ext_next_allocated_block(path); | |
4051 | map->m_len = min(map->m_len, next - map->m_lblk); | |
4052 | } | |
d8990240 AK |
4053 | |
4054 | trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); | |
4d33b1ef TT |
4055 | return 1; |
4056 | } | |
d8990240 AK |
4057 | |
4058 | trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); | |
4d33b1ef TT |
4059 | return 0; |
4060 | } | |
4061 | ||
4062 | ||
c278bfec | 4063 | /* |
f5ab0d1f MC |
4064 | * Block allocation/map/preallocation routine for extents based files |
4065 | * | |
4066 | * | |
c278bfec | 4067 | * Need to be called with |
0e855ac8 AK |
4068 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block |
4069 | * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) | |
f5ab0d1f | 4070 | * |
b483bb77 | 4071 | * return > 0, number of blocks already mapped/allocated |
f5ab0d1f MC |
4072 | * if create == 0 and these are pre-allocated blocks |
4073 | * buffer head is unmapped | |
4074 | * otherwise blocks are mapped | |
4075 | * | |
4076 | * return = 0, if plain look up failed (blocks have not been allocated) | |
4077 | * buffer head is unmapped | |
4078 | * | |
4079 | * return < 0, error case. | |
c278bfec | 4080 | */ |
e35fd660 TT |
4081 | int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, |
4082 | struct ext4_map_blocks *map, int flags) | |
a86c6181 AT |
4083 | { |
4084 | struct ext4_ext_path *path = NULL; | |
d7dce9e0 | 4085 | struct ext4_extent newex, *ex, ex2; |
4d33b1ef | 4086 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
8ad8d710 | 4087 | ext4_fsblk_t newblock = 0, pblk; |
34990461 | 4088 | int err = 0, depth, ret; |
4d33b1ef | 4089 | unsigned int allocated = 0, offset = 0; |
81fdbb4a | 4090 | unsigned int allocated_clusters = 0; |
c9de560d | 4091 | struct ext4_allocation_request ar; |
4d33b1ef | 4092 | ext4_lblk_t cluster_offset; |
a86c6181 | 4093 | |
70aa1554 | 4094 | ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len); |
0562e0ba | 4095 | trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); |
a86c6181 | 4096 | |
a86c6181 | 4097 | /* find extent for this block */ |
ed8a1a76 | 4098 | path = ext4_find_extent(inode, map->m_lblk, NULL, 0); |
a86c6181 AT |
4099 | if (IS_ERR(path)) { |
4100 | err = PTR_ERR(path); | |
4101 | path = NULL; | |
8ad8d710 | 4102 | goto out; |
a86c6181 AT |
4103 | } |
4104 | ||
4105 | depth = ext_depth(inode); | |
4106 | ||
4107 | /* | |
d0d856e8 RD |
4108 | * consistent leaf must not be empty; |
4109 | * this situation is possible, though, _during_ tree modification; | |
ed8a1a76 | 4110 | * this is why assert can't be put in ext4_find_extent() |
a86c6181 | 4111 | */ |
273df556 FM |
4112 | if (unlikely(path[depth].p_ext == NULL && depth != 0)) { |
4113 | EXT4_ERROR_INODE(inode, "bad extent address " | |
f70f362b TT |
4114 | "lblock: %lu, depth: %d pblock %lld", |
4115 | (unsigned long) map->m_lblk, depth, | |
4116 | path[depth].p_block); | |
6a797d27 | 4117 | err = -EFSCORRUPTED; |
8ad8d710 | 4118 | goto out; |
034fb4c9 | 4119 | } |
a86c6181 | 4120 | |
7e028976 AM |
4121 | ex = path[depth].p_ext; |
4122 | if (ex) { | |
725d26d3 | 4123 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); |
bf89d16f | 4124 | ext4_fsblk_t ee_start = ext4_ext_pblock(ex); |
a2df2a63 | 4125 | unsigned short ee_len; |
471d4011 | 4126 | |
b8a86845 | 4127 | |
471d4011 | 4128 | /* |
556615dc | 4129 | * unwritten extents are treated as holes, except that |
56055d3a | 4130 | * we split out initialized portions during a write. |
471d4011 | 4131 | */ |
a2df2a63 | 4132 | ee_len = ext4_ext_get_actual_len(ex); |
d8990240 AK |
4133 | |
4134 | trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); | |
4135 | ||
d0d856e8 | 4136 | /* if found extent covers block, simply return it */ |
e35fd660 TT |
4137 | if (in_range(map->m_lblk, ee_block, ee_len)) { |
4138 | newblock = map->m_lblk - ee_block + ee_start; | |
d0d856e8 | 4139 | /* number of remaining blocks in the extent */ |
e35fd660 | 4140 | allocated = ee_len - (map->m_lblk - ee_block); |
70aa1554 RH |
4141 | ext_debug(inode, "%u fit into %u:%d -> %llu\n", |
4142 | map->m_lblk, ee_block, ee_len, newblock); | |
56055d3a | 4143 | |
b8a86845 LC |
4144 | /* |
4145 | * If the extent is initialized check whether the | |
4146 | * caller wants to convert it to unwritten. | |
4147 | */ | |
556615dc | 4148 | if ((!ext4_ext_is_unwritten(ex)) && |
b8a86845 | 4149 | (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { |
f064a9d6 EW |
4150 | err = convert_initialized_extent(handle, |
4151 | inode, map, &path, &allocated); | |
8ad8d710 | 4152 | goto out; |
f064a9d6 | 4153 | } else if (!ext4_ext_is_unwritten(ex)) { |
8ad8d710 EW |
4154 | map->m_flags |= EXT4_MAP_MAPPED; |
4155 | map->m_pblk = newblock; | |
4156 | if (allocated > map->m_len) | |
4157 | allocated = map->m_len; | |
4158 | map->m_len = allocated; | |
4159 | ext4_ext_show_leaf(inode, path); | |
7877191c | 4160 | goto out; |
f064a9d6 | 4161 | } |
69eb33dc | 4162 | |
556615dc | 4163 | ret = ext4_ext_handle_unwritten_extents( |
dfe50809 | 4164 | handle, inode, map, &path, flags, |
7877191c | 4165 | allocated, newblock); |
ce37c429 EW |
4166 | if (ret < 0) |
4167 | err = ret; | |
4168 | else | |
4169 | allocated = ret; | |
8ad8d710 | 4170 | goto out; |
a86c6181 AT |
4171 | } |
4172 | } | |
4173 | ||
4174 | /* | |
d0d856e8 | 4175 | * requested block isn't allocated yet; |
a86c6181 AT |
4176 | * we couldn't try to create block if create flag is zero |
4177 | */ | |
c2177057 | 4178 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
140a5250 JK |
4179 | ext4_lblk_t hole_start, hole_len; |
4180 | ||
facab4d9 JK |
4181 | hole_start = map->m_lblk; |
4182 | hole_len = ext4_ext_determine_hole(inode, path, &hole_start); | |
56055d3a AA |
4183 | /* |
4184 | * put just found gap into cache to speed up | |
4185 | * subsequent requests | |
4186 | */ | |
140a5250 | 4187 | ext4_ext_put_gap_in_cache(inode, hole_start, hole_len); |
facab4d9 JK |
4188 | |
4189 | /* Update hole_len to reflect hole size after map->m_lblk */ | |
4190 | if (hole_start != map->m_lblk) | |
4191 | hole_len -= map->m_lblk - hole_start; | |
4192 | map->m_pblk = 0; | |
4193 | map->m_len = min_t(unsigned int, map->m_len, hole_len); | |
4194 | ||
8ad8d710 | 4195 | goto out; |
a86c6181 | 4196 | } |
4d33b1ef | 4197 | |
a86c6181 | 4198 | /* |
c2ea3fde | 4199 | * Okay, we need to do block allocation. |
63f57933 | 4200 | */ |
4d33b1ef | 4201 | newex.ee_block = cpu_to_le32(map->m_lblk); |
d0abafac | 4202 | cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
4d33b1ef TT |
4203 | |
4204 | /* | |
4205 | * If we are doing bigalloc, check to see if the extent returned | |
ed8a1a76 | 4206 | * by ext4_find_extent() implies a cluster we can use. |
4d33b1ef TT |
4207 | */ |
4208 | if (cluster_offset && ex && | |
d8990240 | 4209 | get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { |
4d33b1ef TT |
4210 | ar.len = allocated = map->m_len; |
4211 | newblock = map->m_pblk; | |
4212 | goto got_allocated_blocks; | |
4213 | } | |
a86c6181 | 4214 | |
c9de560d | 4215 | /* find neighbour allocated blocks */ |
e35fd660 | 4216 | ar.lleft = map->m_lblk; |
c9de560d AT |
4217 | err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); |
4218 | if (err) | |
8ad8d710 | 4219 | goto out; |
e35fd660 | 4220 | ar.lright = map->m_lblk; |
4d33b1ef | 4221 | err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); |
d7dce9e0 | 4222 | if (err < 0) |
8ad8d710 | 4223 | goto out; |
25d14f98 | 4224 | |
4d33b1ef TT |
4225 | /* Check if the extent after searching to the right implies a |
4226 | * cluster we can use. */ | |
d7dce9e0 | 4227 | if ((sbi->s_cluster_ratio > 1) && err && |
4228 | get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) { | |
4d33b1ef TT |
4229 | ar.len = allocated = map->m_len; |
4230 | newblock = map->m_pblk; | |
4231 | goto got_allocated_blocks; | |
4232 | } | |
4233 | ||
749269fa AA |
4234 | /* |
4235 | * See if request is beyond maximum number of blocks we can have in | |
4236 | * a single extent. For an initialized extent this limit is | |
556615dc LC |
4237 | * EXT_INIT_MAX_LEN and for an unwritten extent this limit is |
4238 | * EXT_UNWRITTEN_MAX_LEN. | |
749269fa | 4239 | */ |
e35fd660 | 4240 | if (map->m_len > EXT_INIT_MAX_LEN && |
556615dc | 4241 | !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) |
e35fd660 | 4242 | map->m_len = EXT_INIT_MAX_LEN; |
556615dc LC |
4243 | else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && |
4244 | (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) | |
4245 | map->m_len = EXT_UNWRITTEN_MAX_LEN; | |
749269fa | 4246 | |
e35fd660 | 4247 | /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ |
e35fd660 | 4248 | newex.ee_len = cpu_to_le16(map->m_len); |
4d33b1ef | 4249 | err = ext4_ext_check_overlap(sbi, inode, &newex, path); |
25d14f98 | 4250 | if (err) |
b939e376 | 4251 | allocated = ext4_ext_get_actual_len(&newex); |
25d14f98 | 4252 | else |
e35fd660 | 4253 | allocated = map->m_len; |
c9de560d AT |
4254 | |
4255 | /* allocate new block */ | |
4256 | ar.inode = inode; | |
e35fd660 TT |
4257 | ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); |
4258 | ar.logical = map->m_lblk; | |
4d33b1ef TT |
4259 | /* |
4260 | * We calculate the offset from the beginning of the cluster | |
4261 | * for the logical block number, since when we allocate a | |
4262 | * physical cluster, the physical block should start at the | |
4263 | * same offset from the beginning of the cluster. This is | |
4264 | * needed so that future calls to get_implied_cluster_alloc() | |
4265 | * work correctly. | |
4266 | */ | |
f5a44db5 | 4267 | offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
4d33b1ef TT |
4268 | ar.len = EXT4_NUM_B2C(sbi, offset+allocated); |
4269 | ar.goal -= offset; | |
4270 | ar.logical -= offset; | |
c9de560d AT |
4271 | if (S_ISREG(inode->i_mode)) |
4272 | ar.flags = EXT4_MB_HINT_DATA; | |
4273 | else | |
4274 | /* disable in-core preallocation for non-regular files */ | |
4275 | ar.flags = 0; | |
556b27ab VH |
4276 | if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) |
4277 | ar.flags |= EXT4_MB_HINT_NOPREALLOC; | |
e3cf5d5d TT |
4278 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
4279 | ar.flags |= EXT4_MB_DELALLOC_RESERVED; | |
c5e298ae TT |
4280 | if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) |
4281 | ar.flags |= EXT4_MB_USE_RESERVED; | |
c9de560d | 4282 | newblock = ext4_mb_new_blocks(handle, &ar, &err); |
a86c6181 | 4283 | if (!newblock) |
8ad8d710 | 4284 | goto out; |
7b415bf6 | 4285 | allocated_clusters = ar.len; |
4d33b1ef | 4286 | ar.len = EXT4_C2B(sbi, ar.len) - offset; |
70aa1554 | 4287 | ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n", |
ec8c60be | 4288 | ar.goal, newblock, ar.len, allocated); |
4d33b1ef TT |
4289 | if (ar.len > allocated) |
4290 | ar.len = allocated; | |
a86c6181 | 4291 | |
4d33b1ef | 4292 | got_allocated_blocks: |
a86c6181 | 4293 | /* try to insert new extent into found leaf and return */ |
8ad8d710 EW |
4294 | pblk = newblock + offset; |
4295 | ext4_ext_store_pblock(&newex, pblk); | |
c9de560d | 4296 | newex.ee_len = cpu_to_le16(ar.len); |
556615dc | 4297 | /* Mark unwritten */ |
34990461 | 4298 | if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { |
556615dc | 4299 | ext4_ext_mark_unwritten(&newex); |
a25a4e1a | 4300 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
8d5d02e6 | 4301 | } |
c8d46e41 | 4302 | |
4337ecd1 | 4303 | err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); |
34990461 EW |
4304 | if (err) { |
4305 | if (allocated_clusters) { | |
4306 | int fb_flags = 0; | |
82e54229 | 4307 | |
34990461 EW |
4308 | /* |
4309 | * free data blocks we just allocated. | |
4310 | * not a good idea to call discard here directly, | |
4311 | * but otherwise we'd need to call it every free(). | |
4312 | */ | |
27bc446e | 4313 | ext4_discard_preallocations(inode, 0); |
34990461 EW |
4314 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
4315 | fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE; | |
4316 | ext4_free_blocks(handle, inode, NULL, newblock, | |
4317 | EXT4_C2B(sbi, allocated_clusters), | |
4318 | fb_flags); | |
4319 | } | |
8ad8d710 | 4320 | goto out; |
315054f0 | 4321 | } |
a86c6181 | 4322 | |
5f634d06 | 4323 | /* |
b6bf9171 EW |
4324 | * Reduce the reserved cluster count to reflect successful deferred |
4325 | * allocation of delayed allocated clusters or direct allocation of | |
4326 | * clusters discovered to be delayed allocated. Once allocated, a | |
4327 | * cluster is not included in the reserved count. | |
5f634d06 | 4328 | */ |
2971148d | 4329 | if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) { |
b6bf9171 | 4330 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { |
232ec872 | 4331 | /* |
b6bf9171 EW |
4332 | * When allocating delayed allocated clusters, simply |
4333 | * reduce the reserved cluster count and claim quota | |
232ec872 LC |
4334 | */ |
4335 | ext4_da_update_reserve_space(inode, allocated_clusters, | |
4336 | 1); | |
b6bf9171 EW |
4337 | } else { |
4338 | ext4_lblk_t lblk, len; | |
4339 | unsigned int n; | |
4340 | ||
4341 | /* | |
4342 | * When allocating non-delayed allocated clusters | |
4343 | * (from fallocate, filemap, DIO, or clusters | |
4344 | * allocated when delalloc has been disabled by | |
4345 | * ext4_nonda_switch), reduce the reserved cluster | |
4346 | * count by the number of allocated clusters that | |
4347 | * have previously been delayed allocated. Quota | |
4348 | * has been claimed by ext4_mb_new_blocks() above, | |
4349 | * so release the quota reservations made for any | |
4350 | * previously delayed allocated clusters. | |
4351 | */ | |
4352 | lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk); | |
4353 | len = allocated_clusters << sbi->s_cluster_bits; | |
4354 | n = ext4_es_delayed_clu(inode, lblk, len); | |
4355 | if (n > 0) | |
4356 | ext4_da_update_reserve_space(inode, (int) n, 0); | |
7b415bf6 AK |
4357 | } |
4358 | } | |
5f634d06 | 4359 | |
b436b9be JK |
4360 | /* |
4361 | * Cache the extent and update transaction to commit on fdatasync only | |
556615dc | 4362 | * when it is _not_ an unwritten extent. |
b436b9be | 4363 | */ |
556615dc | 4364 | if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) |
b436b9be | 4365 | ext4_update_inode_fsync_trans(handle, inode, 1); |
69eb33dc | 4366 | else |
b436b9be | 4367 | ext4_update_inode_fsync_trans(handle, inode, 0); |
8ad8d710 EW |
4368 | |
4369 | map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED); | |
4370 | map->m_pblk = pblk; | |
4371 | map->m_len = ar.len; | |
4372 | allocated = map->m_len; | |
a86c6181 | 4373 | ext4_ext_show_leaf(inode, path); |
8ad8d710 | 4374 | out: |
b7ea89ad TT |
4375 | ext4_ext_drop_refs(path); |
4376 | kfree(path); | |
e861304b | 4377 | |
63b99968 TT |
4378 | trace_ext4_ext_map_blocks_exit(inode, flags, map, |
4379 | err ? err : allocated); | |
7877191c | 4380 | return err ? err : allocated; |
a86c6181 AT |
4381 | } |
4382 | ||
d0abb36d | 4383 | int ext4_ext_truncate(handle_t *handle, struct inode *inode) |
a86c6181 | 4384 | { |
a86c6181 | 4385 | struct super_block *sb = inode->i_sb; |
725d26d3 | 4386 | ext4_lblk_t last_block; |
a86c6181 AT |
4387 | int err = 0; |
4388 | ||
a86c6181 | 4389 | /* |
d0d856e8 RD |
4390 | * TODO: optimization is possible here. |
4391 | * Probably we need not scan at all, | |
4392 | * because page truncation is enough. | |
a86c6181 | 4393 | */ |
a86c6181 AT |
4394 | |
4395 | /* we have to know where to truncate from in crash case */ | |
4396 | EXT4_I(inode)->i_disksize = inode->i_size; | |
d0abb36d TT |
4397 | err = ext4_mark_inode_dirty(handle, inode); |
4398 | if (err) | |
4399 | return err; | |
a86c6181 AT |
4400 | |
4401 | last_block = (inode->i_size + sb->s_blocksize - 1) | |
4402 | >> EXT4_BLOCK_SIZE_BITS(sb); | |
8acd5e9b | 4403 | retry: |
51865fda ZL |
4404 | err = ext4_es_remove_extent(inode, last_block, |
4405 | EXT_MAX_BLOCKS - last_block); | |
94eec0fc | 4406 | if (err == -ENOMEM) { |
4034247a | 4407 | memalloc_retry_wait(GFP_ATOMIC); |
8acd5e9b TT |
4408 | goto retry; |
4409 | } | |
d0abb36d TT |
4410 | if (err) |
4411 | return err; | |
73c384c0 TT |
4412 | retry_remove_space: |
4413 | err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); | |
4414 | if (err == -ENOMEM) { | |
4034247a | 4415 | memalloc_retry_wait(GFP_ATOMIC); |
73c384c0 TT |
4416 | goto retry_remove_space; |
4417 | } | |
4418 | return err; | |
a86c6181 AT |
4419 | } |
4420 | ||
0e8b6879 | 4421 | static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, |
c174e6d6 | 4422 | ext4_lblk_t len, loff_t new_size, |
77a2e84d | 4423 | int flags) |
0e8b6879 LC |
4424 | { |
4425 | struct inode *inode = file_inode(file); | |
4426 | handle_t *handle; | |
64395d95 | 4427 | int ret = 0, ret2 = 0, ret3 = 0; |
0e8b6879 | 4428 | int retries = 0; |
4134f5c8 | 4429 | int depth = 0; |
0e8b6879 LC |
4430 | struct ext4_map_blocks map; |
4431 | unsigned int credits; | |
c174e6d6 | 4432 | loff_t epos; |
0e8b6879 | 4433 | |
c3fe493c | 4434 | BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)); |
0e8b6879 | 4435 | map.m_lblk = offset; |
c174e6d6 | 4436 | map.m_len = len; |
0e8b6879 LC |
4437 | /* |
4438 | * Don't normalize the request if it can fit in one extent so | |
4439 | * that it doesn't get unnecessarily split into multiple | |
4440 | * extents. | |
4441 | */ | |
556615dc | 4442 | if (len <= EXT_UNWRITTEN_MAX_LEN) |
0e8b6879 LC |
4443 | flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; |
4444 | ||
4445 | /* | |
4446 | * credits to insert 1 extent into extent tree | |
4447 | */ | |
4448 | credits = ext4_chunk_trans_blocks(inode, len); | |
c3fe493c | 4449 | depth = ext_depth(inode); |
0e8b6879 LC |
4450 | |
4451 | retry: | |
3258386a | 4452 | while (len) { |
4134f5c8 LC |
4453 | /* |
4454 | * Recalculate credits when extent tree depth changes. | |
4455 | */ | |
011c88e3 | 4456 | if (depth != ext_depth(inode)) { |
4134f5c8 LC |
4457 | credits = ext4_chunk_trans_blocks(inode, len); |
4458 | depth = ext_depth(inode); | |
4459 | } | |
4460 | ||
0e8b6879 LC |
4461 | handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, |
4462 | credits); | |
4463 | if (IS_ERR(handle)) { | |
4464 | ret = PTR_ERR(handle); | |
4465 | break; | |
4466 | } | |
4467 | ret = ext4_map_blocks(handle, inode, &map, flags); | |
4468 | if (ret <= 0) { | |
4469 | ext4_debug("inode #%lu: block %u: len %u: " | |
4470 | "ext4_ext_map_blocks returned %d", | |
4471 | inode->i_ino, map.m_lblk, | |
4472 | map.m_len, ret); | |
4473 | ext4_mark_inode_dirty(handle, inode); | |
3258386a | 4474 | ext4_journal_stop(handle); |
0e8b6879 LC |
4475 | break; |
4476 | } | |
3258386a EW |
4477 | /* |
4478 | * allow a full retry cycle for any remaining allocations | |
4479 | */ | |
4480 | retries = 0; | |
c174e6d6 DM |
4481 | map.m_lblk += ret; |
4482 | map.m_len = len = len - ret; | |
4483 | epos = (loff_t)map.m_lblk << inode->i_blkbits; | |
eeca7ea1 | 4484 | inode->i_ctime = current_time(inode); |
c174e6d6 DM |
4485 | if (new_size) { |
4486 | if (epos > new_size) | |
4487 | epos = new_size; | |
4488 | if (ext4_update_inode_size(inode, epos) & 0x1) | |
4489 | inode->i_mtime = inode->i_ctime; | |
c174e6d6 | 4490 | } |
4209ae12 | 4491 | ret2 = ext4_mark_inode_dirty(handle, inode); |
c894aa97 | 4492 | ext4_update_inode_fsync_trans(handle, inode, 1); |
4209ae12 HS |
4493 | ret3 = ext4_journal_stop(handle); |
4494 | ret2 = ret3 ? ret3 : ret2; | |
4495 | if (unlikely(ret2)) | |
0e8b6879 LC |
4496 | break; |
4497 | } | |
3258386a | 4498 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
0e8b6879 | 4499 | goto retry; |
0e8b6879 LC |
4500 | |
4501 | return ret > 0 ? ret2 : ret; | |
4502 | } | |
4503 | ||
43f81677 EB |
4504 | static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len); |
4505 | ||
4506 | static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len); | |
4507 | ||
b8a86845 LC |
4508 | static long ext4_zero_range(struct file *file, loff_t offset, |
4509 | loff_t len, int mode) | |
4510 | { | |
4511 | struct inode *inode = file_inode(file); | |
d4f5258e | 4512 | struct address_space *mapping = file->f_mapping; |
b8a86845 LC |
4513 | handle_t *handle = NULL; |
4514 | unsigned int max_blocks; | |
4515 | loff_t new_size = 0; | |
4516 | int ret = 0; | |
4517 | int flags; | |
69dc9536 | 4518 | int credits; |
c174e6d6 | 4519 | int partial_begin, partial_end; |
b8a86845 LC |
4520 | loff_t start, end; |
4521 | ext4_lblk_t lblk; | |
b8a86845 LC |
4522 | unsigned int blkbits = inode->i_blkbits; |
4523 | ||
4524 | trace_ext4_zero_range(inode, offset, len, mode); | |
4525 | ||
e1ee60fd NJ |
4526 | /* Call ext4_force_commit to flush all data in case of data=journal. */ |
4527 | if (ext4_should_journal_data(inode)) { | |
4528 | ret = ext4_force_commit(inode->i_sb); | |
4529 | if (ret) | |
4530 | return ret; | |
4531 | } | |
4532 | ||
b8a86845 | 4533 | /* |
e4d7f2d3 | 4534 | * Round up offset. This is not fallocate, we need to zero out |
b8a86845 LC |
4535 | * blocks, so convert interior block aligned part of the range to |
4536 | * unwritten and possibly manually zero out unaligned parts of the | |
4537 | * range. | |
4538 | */ | |
4539 | start = round_up(offset, 1 << blkbits); | |
4540 | end = round_down((offset + len), 1 << blkbits); | |
4541 | ||
4542 | if (start < offset || end > offset + len) | |
4543 | return -EINVAL; | |
c174e6d6 DM |
4544 | partial_begin = offset & ((1 << blkbits) - 1); |
4545 | partial_end = (offset + len) & ((1 << blkbits) - 1); | |
b8a86845 LC |
4546 | |
4547 | lblk = start >> blkbits; | |
4548 | max_blocks = (end >> blkbits); | |
4549 | if (max_blocks < lblk) | |
4550 | max_blocks = 0; | |
4551 | else | |
4552 | max_blocks -= lblk; | |
4553 | ||
5955102c | 4554 | inode_lock(inode); |
b8a86845 LC |
4555 | |
4556 | /* | |
80dd4978 | 4557 | * Indirect files do not support unwritten extents |
b8a86845 LC |
4558 | */ |
4559 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { | |
4560 | ret = -EOPNOTSUPP; | |
4561 | goto out_mutex; | |
4562 | } | |
4563 | ||
4564 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | |
9b02e498 | 4565 | (offset + len > inode->i_size || |
51e3ae81 | 4566 | offset + len > EXT4_I(inode)->i_disksize)) { |
b8a86845 LC |
4567 | new_size = offset + len; |
4568 | ret = inode_newsize_ok(inode, new_size); | |
4569 | if (ret) | |
4570 | goto out_mutex; | |
b8a86845 LC |
4571 | } |
4572 | ||
0f2af21a | 4573 | flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; |
0f2af21a | 4574 | |
17048e8a | 4575 | /* Wait all existing dio workers, newcomers will block on i_mutex */ |
17048e8a JK |
4576 | inode_dio_wait(inode); |
4577 | ||
0f2af21a LC |
4578 | /* Preallocate the range including the unaligned edges */ |
4579 | if (partial_begin || partial_end) { | |
4580 | ret = ext4_alloc_file_blocks(file, | |
4581 | round_down(offset, 1 << blkbits) >> blkbits, | |
4582 | (round_up((offset + len), 1 << blkbits) - | |
4583 | round_down(offset, 1 << blkbits)) >> blkbits, | |
77a2e84d | 4584 | new_size, flags); |
0f2af21a | 4585 | if (ret) |
1d39834f | 4586 | goto out_mutex; |
0f2af21a LC |
4587 | |
4588 | } | |
4589 | ||
4590 | /* Zero range excluding the unaligned edges */ | |
b8a86845 | 4591 | if (max_blocks > 0) { |
0f2af21a LC |
4592 | flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | |
4593 | EXT4_EX_NOCACHE); | |
b8a86845 | 4594 | |
ea3d7209 JK |
4595 | /* |
4596 | * Prevent page faults from reinstantiating pages we have | |
4597 | * released from page cache. | |
4598 | */ | |
d4f5258e | 4599 | filemap_invalidate_lock(mapping); |
430657b6 RZ |
4600 | |
4601 | ret = ext4_break_layouts(inode); | |
4602 | if (ret) { | |
d4f5258e | 4603 | filemap_invalidate_unlock(mapping); |
430657b6 RZ |
4604 | goto out_mutex; |
4605 | } | |
4606 | ||
01127848 JK |
4607 | ret = ext4_update_disksize_before_punch(inode, offset, len); |
4608 | if (ret) { | |
d4f5258e | 4609 | filemap_invalidate_unlock(mapping); |
1d39834f | 4610 | goto out_mutex; |
01127848 | 4611 | } |
ea3d7209 JK |
4612 | /* Now release the pages and zero block aligned part of pages */ |
4613 | truncate_pagecache_range(inode, start, end - 1); | |
eeca7ea1 | 4614 | inode->i_mtime = inode->i_ctime = current_time(inode); |
ea3d7209 | 4615 | |
713e8dde | 4616 | ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, |
77a2e84d | 4617 | flags); |
d4f5258e | 4618 | filemap_invalidate_unlock(mapping); |
713e8dde | 4619 | if (ret) |
1d39834f | 4620 | goto out_mutex; |
b8a86845 | 4621 | } |
c174e6d6 | 4622 | if (!partial_begin && !partial_end) |
1d39834f | 4623 | goto out_mutex; |
c174e6d6 | 4624 | |
69dc9536 DM |
4625 | /* |
4626 | * In worst case we have to writeout two nonadjacent unwritten | |
4627 | * blocks and update the inode | |
4628 | */ | |
4629 | credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; | |
4630 | if (ext4_should_journal_data(inode)) | |
4631 | credits += 2; | |
4632 | handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); | |
b8a86845 LC |
4633 | if (IS_ERR(handle)) { |
4634 | ret = PTR_ERR(handle); | |
4635 | ext4_std_error(inode->i_sb, ret); | |
1d39834f | 4636 | goto out_mutex; |
b8a86845 LC |
4637 | } |
4638 | ||
eeca7ea1 | 4639 | inode->i_mtime = inode->i_ctime = current_time(inode); |
4337ecd1 | 4640 | if (new_size) |
4631dbf6 | 4641 | ext4_update_inode_size(inode, new_size); |
4209ae12 HS |
4642 | ret = ext4_mark_inode_dirty(handle, inode); |
4643 | if (unlikely(ret)) | |
4644 | goto out_handle; | |
b8a86845 LC |
4645 | /* Zero out partial block at the edges of the range */ |
4646 | ret = ext4_zero_partial_blocks(handle, inode, offset, len); | |
67a7d5f5 JK |
4647 | if (ret >= 0) |
4648 | ext4_update_inode_fsync_trans(handle, inode, 1); | |
b8a86845 LC |
4649 | |
4650 | if (file->f_flags & O_SYNC) | |
4651 | ext4_handle_sync(handle); | |
4652 | ||
4209ae12 | 4653 | out_handle: |
b8a86845 | 4654 | ext4_journal_stop(handle); |
b8a86845 | 4655 | out_mutex: |
5955102c | 4656 | inode_unlock(inode); |
b8a86845 LC |
4657 | return ret; |
4658 | } | |
4659 | ||
a2df2a63 | 4660 | /* |
2fe17c10 | 4661 | * preallocate space for a file. This implements ext4's fallocate file |
a2df2a63 AA |
4662 | * operation, which gets called from sys_fallocate system call. |
4663 | * For block-mapped files, posix_fallocate should fall back to the method | |
4664 | * of writing zeroes to the required new blocks (the same behavior which is | |
4665 | * expected for file systems which do not support fallocate() system call). | |
4666 | */ | |
2fe17c10 | 4667 | long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) |
a2df2a63 | 4668 | { |
496ad9aa | 4669 | struct inode *inode = file_inode(file); |
f282ac19 | 4670 | loff_t new_size = 0; |
498e5f24 | 4671 | unsigned int max_blocks; |
a2df2a63 | 4672 | int ret = 0; |
a4e5d88b | 4673 | int flags; |
0e8b6879 | 4674 | ext4_lblk_t lblk; |
0e8b6879 | 4675 | unsigned int blkbits = inode->i_blkbits; |
a2df2a63 | 4676 | |
2058f83a MH |
4677 | /* |
4678 | * Encrypted inodes can't handle collapse range or insert | |
4679 | * range since we would need to re-encrypt blocks with a | |
4680 | * different IV or XTS tweak (which are based on the logical | |
4681 | * block number). | |
2058f83a | 4682 | */ |
592ddec7 | 4683 | if (IS_ENCRYPTED(inode) && |
457b1e35 | 4684 | (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) |
2058f83a MH |
4685 | return -EOPNOTSUPP; |
4686 | ||
a4bb6b64 | 4687 | /* Return error if mode is not supported */ |
9eb79482 | 4688 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | |
331573fe NJ |
4689 | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | |
4690 | FALLOC_FL_INSERT_RANGE)) | |
a4bb6b64 AH |
4691 | return -EOPNOTSUPP; |
4692 | ||
aa75f4d3 HS |
4693 | if (mode & FALLOC_FL_PUNCH_HOLE) { |
4694 | ret = ext4_punch_hole(inode, offset, len); | |
4695 | goto exit; | |
4696 | } | |
a4bb6b64 | 4697 | |
0c8d414f TM |
4698 | ret = ext4_convert_inline_data(inode); |
4699 | if (ret) | |
aa75f4d3 | 4700 | goto exit; |
0c8d414f | 4701 | |
aa75f4d3 HS |
4702 | if (mode & FALLOC_FL_COLLAPSE_RANGE) { |
4703 | ret = ext4_collapse_range(inode, offset, len); | |
4704 | goto exit; | |
4705 | } | |
331573fe | 4706 | |
aa75f4d3 HS |
4707 | if (mode & FALLOC_FL_INSERT_RANGE) { |
4708 | ret = ext4_insert_range(inode, offset, len); | |
4709 | goto exit; | |
4710 | } | |
b8a86845 | 4711 | |
aa75f4d3 HS |
4712 | if (mode & FALLOC_FL_ZERO_RANGE) { |
4713 | ret = ext4_zero_range(file, offset, len, mode); | |
4714 | goto exit; | |
4715 | } | |
0562e0ba | 4716 | trace_ext4_fallocate_enter(inode, offset, len, mode); |
0e8b6879 | 4717 | lblk = offset >> blkbits; |
0e8b6879 | 4718 | |
518eaa63 | 4719 | max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); |
556615dc | 4720 | flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; |
0e8b6879 | 4721 | |
5955102c | 4722 | inode_lock(inode); |
f282ac19 | 4723 | |
280227a7 DI |
4724 | /* |
4725 | * We only support preallocation for extent-based files only | |
4726 | */ | |
4727 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { | |
4728 | ret = -EOPNOTSUPP; | |
4729 | goto out; | |
4730 | } | |
4731 | ||
f282ac19 | 4732 | if (!(mode & FALLOC_FL_KEEP_SIZE) && |
9b02e498 | 4733 | (offset + len > inode->i_size || |
51e3ae81 | 4734 | offset + len > EXT4_I(inode)->i_disksize)) { |
f282ac19 LC |
4735 | new_size = offset + len; |
4736 | ret = inode_newsize_ok(inode, new_size); | |
4737 | if (ret) | |
4738 | goto out; | |
6d19c42b | 4739 | } |
f282ac19 | 4740 | |
17048e8a | 4741 | /* Wait all existing dio workers, newcomers will block on i_mutex */ |
17048e8a JK |
4742 | inode_dio_wait(inode); |
4743 | ||
77a2e84d | 4744 | ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); |
0e8b6879 LC |
4745 | if (ret) |
4746 | goto out; | |
f282ac19 | 4747 | |
c174e6d6 | 4748 | if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { |
aa75f4d3 HS |
4749 | ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal, |
4750 | EXT4_I(inode)->i_sync_tid); | |
f282ac19 | 4751 | } |
f282ac19 | 4752 | out: |
5955102c | 4753 | inode_unlock(inode); |
0e8b6879 | 4754 | trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); |
aa75f4d3 | 4755 | exit: |
0e8b6879 | 4756 | return ret; |
a2df2a63 | 4757 | } |
6873fa0d | 4758 | |
0031462b MC |
4759 | /* |
4760 | * This function convert a range of blocks to written extents | |
4761 | * The caller of this function will pass the start offset and the size. | |
4762 | * all unwritten extents within this range will be converted to | |
4763 | * written extents. | |
4764 | * | |
4765 | * This function is called from the direct IO end io call back | |
4766 | * function, to convert the fallocated extents after IO is completed. | |
109f5565 | 4767 | * Returns 0 on success. |
0031462b | 4768 | */ |
6b523df4 JK |
4769 | int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, |
4770 | loff_t offset, ssize_t len) | |
0031462b | 4771 | { |
0031462b | 4772 | unsigned int max_blocks; |
4209ae12 | 4773 | int ret = 0, ret2 = 0, ret3 = 0; |
2ed88685 | 4774 | struct ext4_map_blocks map; |
a00713ea RH |
4775 | unsigned int blkbits = inode->i_blkbits; |
4776 | unsigned int credits = 0; | |
0031462b | 4777 | |
2ed88685 | 4778 | map.m_lblk = offset >> blkbits; |
518eaa63 FF |
4779 | max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); |
4780 | ||
a00713ea | 4781 | if (!handle) { |
6b523df4 JK |
4782 | /* |
4783 | * credits to insert 1 extent into extent tree | |
4784 | */ | |
4785 | credits = ext4_chunk_trans_blocks(inode, max_blocks); | |
4786 | } | |
0031462b | 4787 | while (ret >= 0 && ret < max_blocks) { |
2ed88685 TT |
4788 | map.m_lblk += ret; |
4789 | map.m_len = (max_blocks -= ret); | |
6b523df4 JK |
4790 | if (credits) { |
4791 | handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, | |
4792 | credits); | |
4793 | if (IS_ERR(handle)) { | |
4794 | ret = PTR_ERR(handle); | |
4795 | break; | |
4796 | } | |
0031462b | 4797 | } |
2ed88685 | 4798 | ret = ext4_map_blocks(handle, inode, &map, |
c7064ef1 | 4799 | EXT4_GET_BLOCKS_IO_CONVERT_EXT); |
b06acd38 LC |
4800 | if (ret <= 0) |
4801 | ext4_warning(inode->i_sb, | |
4802 | "inode #%lu: block %u: len %u: " | |
4803 | "ext4_ext_map_blocks returned %d", | |
4804 | inode->i_ino, map.m_lblk, | |
4805 | map.m_len, ret); | |
4209ae12 HS |
4806 | ret2 = ext4_mark_inode_dirty(handle, inode); |
4807 | if (credits) { | |
4808 | ret3 = ext4_journal_stop(handle); | |
4809 | if (unlikely(ret3)) | |
4810 | ret2 = ret3; | |
4811 | } | |
4812 | ||
6b523df4 | 4813 | if (ret <= 0 || ret2) |
0031462b MC |
4814 | break; |
4815 | } | |
4816 | return ret > 0 ? ret2 : ret; | |
4817 | } | |
6d9c85eb | 4818 | |
a00713ea RH |
4819 | int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end) |
4820 | { | |
d1e18b88 | 4821 | int ret = 0, err = 0; |
c8cc8816 | 4822 | struct ext4_io_end_vec *io_end_vec; |
a00713ea RH |
4823 | |
4824 | /* | |
4825 | * This is somewhat ugly but the idea is clear: When transaction is | |
4826 | * reserved, everything goes into it. Otherwise we rather start several | |
4827 | * smaller transactions for conversion of each extent separately. | |
4828 | */ | |
4829 | if (handle) { | |
4830 | handle = ext4_journal_start_reserved(handle, | |
4831 | EXT4_HT_EXT_CONVERT); | |
4832 | if (IS_ERR(handle)) | |
4833 | return PTR_ERR(handle); | |
4834 | } | |
4835 | ||
c8cc8816 RH |
4836 | list_for_each_entry(io_end_vec, &io_end->list_vec, list) { |
4837 | ret = ext4_convert_unwritten_extents(handle, io_end->inode, | |
4838 | io_end_vec->offset, | |
4839 | io_end_vec->size); | |
4840 | if (ret) | |
4841 | break; | |
4842 | } | |
4843 | ||
a00713ea RH |
4844 | if (handle) |
4845 | err = ext4_journal_stop(handle); | |
4846 | ||
4847 | return ret < 0 ? ret : err; | |
4848 | } | |
4849 | ||
d3b6f23f | 4850 | static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap) |
6873fa0d ES |
4851 | { |
4852 | __u64 physical = 0; | |
d3b6f23f | 4853 | __u64 length = 0; |
6873fa0d ES |
4854 | int blockbits = inode->i_sb->s_blocksize_bits; |
4855 | int error = 0; | |
d3b6f23f | 4856 | u16 iomap_type; |
6873fa0d ES |
4857 | |
4858 | /* in-inode? */ | |
19f5fb7a | 4859 | if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { |
6873fa0d ES |
4860 | struct ext4_iloc iloc; |
4861 | int offset; /* offset of xattr in inode */ | |
4862 | ||
4863 | error = ext4_get_inode_loc(inode, &iloc); | |
4864 | if (error) | |
4865 | return error; | |
a60697f4 | 4866 | physical = (__u64)iloc.bh->b_blocknr << blockbits; |
6873fa0d ES |
4867 | offset = EXT4_GOOD_OLD_INODE_SIZE + |
4868 | EXT4_I(inode)->i_extra_isize; | |
4869 | physical += offset; | |
4870 | length = EXT4_SB(inode->i_sb)->s_inode_size - offset; | |
fd2dd9fb | 4871 | brelse(iloc.bh); |
d3b6f23f RH |
4872 | iomap_type = IOMAP_INLINE; |
4873 | } else if (EXT4_I(inode)->i_file_acl) { /* external block */ | |
a60697f4 | 4874 | physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; |
6873fa0d | 4875 | length = inode->i_sb->s_blocksize; |
d3b6f23f RH |
4876 | iomap_type = IOMAP_MAPPED; |
4877 | } else { | |
4878 | /* no in-inode or external block for xattr, so return -ENOENT */ | |
4879 | error = -ENOENT; | |
4880 | goto out; | |
6873fa0d ES |
4881 | } |
4882 | ||
d3b6f23f RH |
4883 | iomap->addr = physical; |
4884 | iomap->offset = 0; | |
4885 | iomap->length = length; | |
4886 | iomap->type = iomap_type; | |
4887 | iomap->flags = 0; | |
4888 | out: | |
4889 | return error; | |
6873fa0d ES |
4890 | } |
4891 | ||
d3b6f23f RH |
4892 | static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset, |
4893 | loff_t length, unsigned flags, | |
4894 | struct iomap *iomap, struct iomap *srcmap) | |
6873fa0d | 4895 | { |
d3b6f23f | 4896 | int error; |
bb5835ed | 4897 | |
d3b6f23f RH |
4898 | error = ext4_iomap_xattr_fiemap(inode, iomap); |
4899 | if (error == 0 && (offset >= iomap->length)) | |
4900 | error = -ENOENT; | |
4901 | return error; | |
4902 | } | |
94191985 | 4903 | |
d3b6f23f RH |
4904 | static const struct iomap_ops ext4_iomap_xattr_ops = { |
4905 | .iomap_begin = ext4_iomap_xattr_begin, | |
4906 | }; | |
94191985 | 4907 | |
328e24ae CH |
4908 | static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len) |
4909 | { | |
4910 | u64 maxbytes; | |
4911 | ||
4912 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | |
4913 | maxbytes = inode->i_sb->s_maxbytes; | |
4914 | else | |
4915 | maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; | |
4916 | ||
4917 | if (*len == 0) | |
4918 | return -EINVAL; | |
4919 | if (start > maxbytes) | |
4920 | return -EFBIG; | |
4921 | ||
4922 | /* | |
4923 | * Shrink request scope to what the fs can actually handle. | |
4924 | */ | |
4925 | if (*len > maxbytes || (maxbytes - *len) < start) | |
4926 | *len = maxbytes - start; | |
4927 | return 0; | |
4928 | } | |
4929 | ||
03a5ed24 CH |
4930 | int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
4931 | u64 start, u64 len) | |
d3b6f23f | 4932 | { |
d3b6f23f | 4933 | int error = 0; |
94191985 | 4934 | |
7869a4a6 TT |
4935 | if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { |
4936 | error = ext4_ext_precache(inode); | |
4937 | if (error) | |
4938 | return error; | |
bb5835ed | 4939 | fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; |
7869a4a6 TT |
4940 | } |
4941 | ||
328e24ae CH |
4942 | /* |
4943 | * For bitmap files the maximum size limit could be smaller than | |
4944 | * s_maxbytes, so check len here manually instead of just relying on the | |
4945 | * generic check. | |
4946 | */ | |
4947 | error = ext4_fiemap_check_ranges(inode, start, &len); | |
4948 | if (error) | |
4949 | return error; | |
4950 | ||
6873fa0d | 4951 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { |
d3b6f23f | 4952 | fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR; |
03a5ed24 CH |
4953 | return iomap_fiemap(inode, fieinfo, start, len, |
4954 | &ext4_iomap_xattr_ops); | |
6873fa0d | 4955 | } |
9eb79482 | 4956 | |
03a5ed24 | 4957 | return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops); |
bb5835ed TT |
4958 | } |
4959 | ||
4960 | int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo, | |
4961 | __u64 start, __u64 len) | |
4962 | { | |
03a5ed24 CH |
4963 | ext4_lblk_t start_blk, len_blks; |
4964 | __u64 last_blk; | |
4965 | int error = 0; | |
4966 | ||
bb5835ed TT |
4967 | if (ext4_has_inline_data(inode)) { |
4968 | int has_inline; | |
4969 | ||
4970 | down_read(&EXT4_I(inode)->xattr_sem); | |
4971 | has_inline = ext4_has_inline_data(inode); | |
4972 | up_read(&EXT4_I(inode)->xattr_sem); | |
4973 | if (has_inline) | |
4974 | return 0; | |
4975 | } | |
4976 | ||
03a5ed24 CH |
4977 | if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { |
4978 | error = ext4_ext_precache(inode); | |
4979 | if (error) | |
4980 | return error; | |
4981 | fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; | |
4982 | } | |
4983 | ||
45dd052e | 4984 | error = fiemap_prep(inode, fieinfo, start, &len, 0); |
cddf8a2c CH |
4985 | if (error) |
4986 | return error; | |
bb5835ed | 4987 | |
03a5ed24 CH |
4988 | error = ext4_fiemap_check_ranges(inode, start, &len); |
4989 | if (error) | |
4990 | return error; | |
4991 | ||
4992 | start_blk = start >> inode->i_sb->s_blocksize_bits; | |
4993 | last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; | |
4994 | if (last_blk >= EXT_MAX_BLOCKS) | |
4995 | last_blk = EXT_MAX_BLOCKS-1; | |
4996 | len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; | |
4997 | ||
4998 | /* | |
4999 | * Walk the extent tree gathering extent information | |
5000 | * and pushing extents back to the user. | |
5001 | */ | |
5002 | return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo); | |
5003 | } | |
bb5835ed | 5004 | |
9eb79482 NJ |
5005 | /* |
5006 | * ext4_ext_shift_path_extents: | |
5007 | * Shift the extents of a path structure lying between path[depth].p_ext | |
331573fe NJ |
5008 | * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells |
5009 | * if it is right shift or left shift operation. | |
9eb79482 NJ |
5010 | */ |
5011 | static int | |
5012 | ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, | |
5013 | struct inode *inode, handle_t *handle, | |
331573fe | 5014 | enum SHIFT_DIRECTION SHIFT) |
9eb79482 NJ |
5015 | { |
5016 | int depth, err = 0; | |
5017 | struct ext4_extent *ex_start, *ex_last; | |
4756ee18 | 5018 | bool update = false; |
4268496e | 5019 | int credits, restart_credits; |
9eb79482 NJ |
5020 | depth = path->p_depth; |
5021 | ||
5022 | while (depth >= 0) { | |
5023 | if (depth == path->p_depth) { | |
5024 | ex_start = path[depth].p_ext; | |
5025 | if (!ex_start) | |
6a797d27 | 5026 | return -EFSCORRUPTED; |
9eb79482 NJ |
5027 | |
5028 | ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); | |
4268496e | 5029 | /* leaf + sb + inode */ |
5030 | credits = 3; | |
5031 | if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) { | |
5032 | update = true; | |
5033 | /* extent tree + sb + inode */ | |
5034 | credits = depth + 2; | |
5035 | } | |
9eb79482 | 5036 | |
4268496e | 5037 | restart_credits = ext4_writepage_trans_blocks(inode); |
5038 | err = ext4_datasem_ensure_credits(handle, inode, credits, | |
5039 | restart_credits, 0); | |
1811bc40 | 5040 | if (err) { |
5041 | if (err > 0) | |
5042 | err = -EAGAIN; | |
9eb79482 | 5043 | goto out; |
1811bc40 | 5044 | } |
9eb79482 | 5045 | |
4268496e | 5046 | err = ext4_ext_get_access(handle, inode, path + depth); |
5047 | if (err) | |
5048 | goto out; | |
9eb79482 | 5049 | |
9eb79482 | 5050 | while (ex_start <= ex_last) { |
331573fe NJ |
5051 | if (SHIFT == SHIFT_LEFT) { |
5052 | le32_add_cpu(&ex_start->ee_block, | |
5053 | -shift); | |
5054 | /* Try to merge to the left. */ | |
5055 | if ((ex_start > | |
5056 | EXT_FIRST_EXTENT(path[depth].p_hdr)) | |
5057 | && | |
5058 | ext4_ext_try_to_merge_right(inode, | |
5059 | path, ex_start - 1)) | |
5060 | ex_last--; | |
5061 | else | |
5062 | ex_start++; | |
5063 | } else { | |
5064 | le32_add_cpu(&ex_last->ee_block, shift); | |
5065 | ext4_ext_try_to_merge_right(inode, path, | |
5066 | ex_last); | |
6dd834ef | 5067 | ex_last--; |
331573fe | 5068 | } |
9eb79482 NJ |
5069 | } |
5070 | err = ext4_ext_dirty(handle, inode, path + depth); | |
5071 | if (err) | |
5072 | goto out; | |
5073 | ||
5074 | if (--depth < 0 || !update) | |
5075 | break; | |
5076 | } | |
5077 | ||
5078 | /* Update index too */ | |
4268496e | 5079 | err = ext4_ext_get_access(handle, inode, path + depth); |
9eb79482 NJ |
5080 | if (err) |
5081 | goto out; | |
5082 | ||
331573fe NJ |
5083 | if (SHIFT == SHIFT_LEFT) |
5084 | le32_add_cpu(&path[depth].p_idx->ei_block, -shift); | |
5085 | else | |
5086 | le32_add_cpu(&path[depth].p_idx->ei_block, shift); | |
9eb79482 NJ |
5087 | err = ext4_ext_dirty(handle, inode, path + depth); |
5088 | if (err) | |
5089 | goto out; | |
5090 | ||
5091 | /* we are done if current index is not a starting index */ | |
5092 | if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) | |
5093 | break; | |
5094 | ||
5095 | depth--; | |
5096 | } | |
5097 | ||
5098 | out: | |
5099 | return err; | |
5100 | } | |
5101 | ||
5102 | /* | |
5103 | * ext4_ext_shift_extents: | |
331573fe NJ |
5104 | * All the extents which lies in the range from @start to the last allocated |
5105 | * block for the @inode are shifted either towards left or right (depending | |
5106 | * upon @SHIFT) by @shift blocks. | |
9eb79482 NJ |
5107 | * On success, 0 is returned, error otherwise. |
5108 | */ | |
5109 | static int | |
5110 | ext4_ext_shift_extents(struct inode *inode, handle_t *handle, | |
331573fe NJ |
5111 | ext4_lblk_t start, ext4_lblk_t shift, |
5112 | enum SHIFT_DIRECTION SHIFT) | |
9eb79482 NJ |
5113 | { |
5114 | struct ext4_ext_path *path; | |
5115 | int ret = 0, depth; | |
5116 | struct ext4_extent *extent; | |
331573fe | 5117 | ext4_lblk_t stop, *iterator, ex_start, ex_end; |
1811bc40 | 5118 | ext4_lblk_t tmp = EXT_MAX_BLOCKS; |
9eb79482 NJ |
5119 | |
5120 | /* Let path point to the last extent */ | |
03e916fa RP |
5121 | path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, |
5122 | EXT4_EX_NOCACHE); | |
9eb79482 NJ |
5123 | if (IS_ERR(path)) |
5124 | return PTR_ERR(path); | |
5125 | ||
5126 | depth = path->p_depth; | |
5127 | extent = path[depth].p_ext; | |
ee4bd0d9 TT |
5128 | if (!extent) |
5129 | goto out; | |
9eb79482 | 5130 | |
2a9b8cba | 5131 | stop = le32_to_cpu(extent->ee_block); |
9eb79482 | 5132 | |
331573fe | 5133 | /* |
349fa7d6 EB |
5134 | * For left shifts, make sure the hole on the left is big enough to |
5135 | * accommodate the shift. For right shifts, make sure the last extent | |
5136 | * won't be shifted beyond EXT_MAX_BLOCKS. | |
331573fe NJ |
5137 | */ |
5138 | if (SHIFT == SHIFT_LEFT) { | |
03e916fa RP |
5139 | path = ext4_find_extent(inode, start - 1, &path, |
5140 | EXT4_EX_NOCACHE); | |
331573fe NJ |
5141 | if (IS_ERR(path)) |
5142 | return PTR_ERR(path); | |
5143 | depth = path->p_depth; | |
5144 | extent = path[depth].p_ext; | |
5145 | if (extent) { | |
5146 | ex_start = le32_to_cpu(extent->ee_block); | |
5147 | ex_end = le32_to_cpu(extent->ee_block) + | |
5148 | ext4_ext_get_actual_len(extent); | |
5149 | } else { | |
5150 | ex_start = 0; | |
5151 | ex_end = 0; | |
5152 | } | |
9eb79482 | 5153 | |
331573fe NJ |
5154 | if ((start == ex_start && shift > ex_start) || |
5155 | (shift > start - ex_end)) { | |
349fa7d6 EB |
5156 | ret = -EINVAL; |
5157 | goto out; | |
5158 | } | |
5159 | } else { | |
5160 | if (shift > EXT_MAX_BLOCKS - | |
5161 | (stop + ext4_ext_get_actual_len(extent))) { | |
5162 | ret = -EINVAL; | |
5163 | goto out; | |
331573fe | 5164 | } |
8dc79ec4 | 5165 | } |
9eb79482 | 5166 | |
331573fe NJ |
5167 | /* |
5168 | * In case of left shift, iterator points to start and it is increased | |
5169 | * till we reach stop. In case of right shift, iterator points to stop | |
5170 | * and it is decreased till we reach start. | |
5171 | */ | |
1811bc40 | 5172 | again: |
331573fe NJ |
5173 | if (SHIFT == SHIFT_LEFT) |
5174 | iterator = &start; | |
5175 | else | |
5176 | iterator = &stop; | |
9eb79482 | 5177 | |
1811bc40 | 5178 | if (tmp != EXT_MAX_BLOCKS) |
5179 | *iterator = tmp; | |
5180 | ||
2a9b8cba RP |
5181 | /* |
5182 | * Its safe to start updating extents. Start and stop are unsigned, so | |
5183 | * in case of right shift if extent with 0 block is reached, iterator | |
5184 | * becomes NULL to indicate the end of the loop. | |
5185 | */ | |
5186 | while (iterator && start <= stop) { | |
03e916fa RP |
5187 | path = ext4_find_extent(inode, *iterator, &path, |
5188 | EXT4_EX_NOCACHE); | |
9eb79482 NJ |
5189 | if (IS_ERR(path)) |
5190 | return PTR_ERR(path); | |
5191 | depth = path->p_depth; | |
5192 | extent = path[depth].p_ext; | |
a18ed359 DM |
5193 | if (!extent) { |
5194 | EXT4_ERROR_INODE(inode, "unexpected hole at %lu", | |
331573fe | 5195 | (unsigned long) *iterator); |
6a797d27 | 5196 | return -EFSCORRUPTED; |
a18ed359 | 5197 | } |
331573fe NJ |
5198 | if (SHIFT == SHIFT_LEFT && *iterator > |
5199 | le32_to_cpu(extent->ee_block)) { | |
9eb79482 | 5200 | /* Hole, move to the next extent */ |
f8fb4f41 DM |
5201 | if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { |
5202 | path[depth].p_ext++; | |
5203 | } else { | |
331573fe | 5204 | *iterator = ext4_ext_next_allocated_block(path); |
f8fb4f41 | 5205 | continue; |
9eb79482 NJ |
5206 | } |
5207 | } | |
331573fe | 5208 | |
1811bc40 | 5209 | tmp = *iterator; |
331573fe NJ |
5210 | if (SHIFT == SHIFT_LEFT) { |
5211 | extent = EXT_LAST_EXTENT(path[depth].p_hdr); | |
5212 | *iterator = le32_to_cpu(extent->ee_block) + | |
5213 | ext4_ext_get_actual_len(extent); | |
5214 | } else { | |
5215 | extent = EXT_FIRST_EXTENT(path[depth].p_hdr); | |
2a9b8cba RP |
5216 | if (le32_to_cpu(extent->ee_block) > 0) |
5217 | *iterator = le32_to_cpu(extent->ee_block) - 1; | |
5218 | else | |
5219 | /* Beginning is reached, end of the loop */ | |
5220 | iterator = NULL; | |
331573fe NJ |
5221 | /* Update path extent in case we need to stop */ |
5222 | while (le32_to_cpu(extent->ee_block) < start) | |
5223 | extent++; | |
5224 | path[depth].p_ext = extent; | |
5225 | } | |
9eb79482 | 5226 | ret = ext4_ext_shift_path_extents(path, shift, inode, |
331573fe | 5227 | handle, SHIFT); |
1811bc40 | 5228 | /* iterator can be NULL which means we should break */ |
5229 | if (ret == -EAGAIN) | |
5230 | goto again; | |
9eb79482 NJ |
5231 | if (ret) |
5232 | break; | |
5233 | } | |
ee4bd0d9 TT |
5234 | out: |
5235 | ext4_ext_drop_refs(path); | |
5236 | kfree(path); | |
9eb79482 NJ |
5237 | return ret; |
5238 | } | |
5239 | ||
5240 | /* | |
5241 | * ext4_collapse_range: | |
5242 | * This implements the fallocate's collapse range functionality for ext4 | |
5243 | * Returns: 0 and non-zero on error. | |
5244 | */ | |
43f81677 | 5245 | static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) |
9eb79482 NJ |
5246 | { |
5247 | struct super_block *sb = inode->i_sb; | |
d4f5258e | 5248 | struct address_space *mapping = inode->i_mapping; |
9eb79482 NJ |
5249 | ext4_lblk_t punch_start, punch_stop; |
5250 | handle_t *handle; | |
5251 | unsigned int credits; | |
a8680e0d | 5252 | loff_t new_size, ioffset; |
9eb79482 NJ |
5253 | int ret; |
5254 | ||
b9576fc3 TT |
5255 | /* |
5256 | * We need to test this early because xfstests assumes that a | |
5257 | * collapse range of (0, 1) will return EOPNOTSUPP if the file | |
5258 | * system does not support collapse range. | |
5259 | */ | |
5260 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | |
5261 | return -EOPNOTSUPP; | |
5262 | ||
9b02e498 EB |
5263 | /* Collapse range works only on fs cluster size aligned regions. */ |
5264 | if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) | |
9eb79482 NJ |
5265 | return -EINVAL; |
5266 | ||
9eb79482 NJ |
5267 | trace_ext4_collapse_range(inode, offset, len); |
5268 | ||
5269 | punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); | |
5270 | punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); | |
5271 | ||
1ce01c4a NJ |
5272 | /* Call ext4_force_commit to flush all data in case of data=journal. */ |
5273 | if (ext4_should_journal_data(inode)) { | |
5274 | ret = ext4_force_commit(inode->i_sb); | |
5275 | if (ret) | |
5276 | return ret; | |
5277 | } | |
5278 | ||
5955102c | 5279 | inode_lock(inode); |
23fffa92 LC |
5280 | /* |
5281 | * There is no need to overlap collapse range with EOF, in which case | |
5282 | * it is effectively a truncate operation | |
5283 | */ | |
9b02e498 | 5284 | if (offset + len >= inode->i_size) { |
23fffa92 LC |
5285 | ret = -EINVAL; |
5286 | goto out_mutex; | |
5287 | } | |
5288 | ||
9eb79482 NJ |
5289 | /* Currently just for extent based files */ |
5290 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { | |
5291 | ret = -EOPNOTSUPP; | |
5292 | goto out_mutex; | |
5293 | } | |
5294 | ||
9eb79482 | 5295 | /* Wait for existing dio to complete */ |
9eb79482 NJ |
5296 | inode_dio_wait(inode); |
5297 | ||
ea3d7209 JK |
5298 | /* |
5299 | * Prevent page faults from reinstantiating pages we have released from | |
5300 | * page cache. | |
5301 | */ | |
d4f5258e | 5302 | filemap_invalidate_lock(mapping); |
430657b6 RZ |
5303 | |
5304 | ret = ext4_break_layouts(inode); | |
5305 | if (ret) | |
5306 | goto out_mmap; | |
5307 | ||
32ebffd3 JK |
5308 | /* |
5309 | * Need to round down offset to be aligned with page size boundary | |
5310 | * for page size > block size. | |
5311 | */ | |
5312 | ioffset = round_down(offset, PAGE_SIZE); | |
5313 | /* | |
5314 | * Write tail of the last page before removed range since it will get | |
5315 | * removed from the page cache below. | |
5316 | */ | |
d4f5258e | 5317 | ret = filemap_write_and_wait_range(mapping, ioffset, offset); |
32ebffd3 JK |
5318 | if (ret) |
5319 | goto out_mmap; | |
5320 | /* | |
5321 | * Write data that will be shifted to preserve them when discarding | |
5322 | * page cache below. We are also protected from pages becoming dirty | |
d4f5258e | 5323 | * by i_rwsem and invalidate_lock. |
32ebffd3 | 5324 | */ |
d4f5258e | 5325 | ret = filemap_write_and_wait_range(mapping, offset + len, |
32ebffd3 JK |
5326 | LLONG_MAX); |
5327 | if (ret) | |
5328 | goto out_mmap; | |
ea3d7209 JK |
5329 | truncate_pagecache(inode, ioffset); |
5330 | ||
9eb79482 NJ |
5331 | credits = ext4_writepage_trans_blocks(inode); |
5332 | handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); | |
5333 | if (IS_ERR(handle)) { | |
5334 | ret = PTR_ERR(handle); | |
ea3d7209 | 5335 | goto out_mmap; |
9eb79482 | 5336 | } |
7bbbe241 | 5337 | ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE); |
9eb79482 NJ |
5338 | |
5339 | down_write(&EXT4_I(inode)->i_data_sem); | |
27bc446e | 5340 | ext4_discard_preallocations(inode, 0); |
9eb79482 NJ |
5341 | |
5342 | ret = ext4_es_remove_extent(inode, punch_start, | |
2c1d2328 | 5343 | EXT_MAX_BLOCKS - punch_start); |
9eb79482 NJ |
5344 | if (ret) { |
5345 | up_write(&EXT4_I(inode)->i_data_sem); | |
5346 | goto out_stop; | |
5347 | } | |
5348 | ||
5349 | ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); | |
5350 | if (ret) { | |
5351 | up_write(&EXT4_I(inode)->i_data_sem); | |
5352 | goto out_stop; | |
5353 | } | |
27bc446e | 5354 | ext4_discard_preallocations(inode, 0); |
9eb79482 NJ |
5355 | |
5356 | ret = ext4_ext_shift_extents(inode, handle, punch_stop, | |
331573fe | 5357 | punch_stop - punch_start, SHIFT_LEFT); |
9eb79482 NJ |
5358 | if (ret) { |
5359 | up_write(&EXT4_I(inode)->i_data_sem); | |
5360 | goto out_stop; | |
5361 | } | |
5362 | ||
9b02e498 | 5363 | new_size = inode->i_size - len; |
9337d5d3 | 5364 | i_size_write(inode, new_size); |
9eb79482 NJ |
5365 | EXT4_I(inode)->i_disksize = new_size; |
5366 | ||
9eb79482 NJ |
5367 | up_write(&EXT4_I(inode)->i_data_sem); |
5368 | if (IS_SYNC(inode)) | |
5369 | ext4_handle_sync(handle); | |
eeca7ea1 | 5370 | inode->i_mtime = inode->i_ctime = current_time(inode); |
4209ae12 | 5371 | ret = ext4_mark_inode_dirty(handle, inode); |
67a7d5f5 | 5372 | ext4_update_inode_fsync_trans(handle, inode, 1); |
9eb79482 NJ |
5373 | |
5374 | out_stop: | |
5375 | ext4_journal_stop(handle); | |
ea3d7209 | 5376 | out_mmap: |
d4f5258e | 5377 | filemap_invalidate_unlock(mapping); |
9eb79482 | 5378 | out_mutex: |
5955102c | 5379 | inode_unlock(inode); |
9eb79482 NJ |
5380 | return ret; |
5381 | } | |
fcf6b1b7 | 5382 | |
331573fe NJ |
5383 | /* |
5384 | * ext4_insert_range: | |
5385 | * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate. | |
5386 | * The data blocks starting from @offset to the EOF are shifted by @len | |
5387 | * towards right to create a hole in the @inode. Inode size is increased | |
5388 | * by len bytes. | |
5389 | * Returns 0 on success, error otherwise. | |
5390 | */ | |
43f81677 | 5391 | static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) |
331573fe NJ |
5392 | { |
5393 | struct super_block *sb = inode->i_sb; | |
d4f5258e | 5394 | struct address_space *mapping = inode->i_mapping; |
331573fe NJ |
5395 | handle_t *handle; |
5396 | struct ext4_ext_path *path; | |
5397 | struct ext4_extent *extent; | |
5398 | ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; | |
5399 | unsigned int credits, ee_len; | |
5400 | int ret = 0, depth, split_flag = 0; | |
5401 | loff_t ioffset; | |
5402 | ||
5403 | /* | |
5404 | * We need to test this early because xfstests assumes that an | |
5405 | * insert range of (0, 1) will return EOPNOTSUPP if the file | |
5406 | * system does not support insert range. | |
5407 | */ | |
5408 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | |
5409 | return -EOPNOTSUPP; | |
5410 | ||
9b02e498 EB |
5411 | /* Insert range works only on fs cluster size aligned regions. */ |
5412 | if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) | |
331573fe NJ |
5413 | return -EINVAL; |
5414 | ||
331573fe NJ |
5415 | trace_ext4_insert_range(inode, offset, len); |
5416 | ||
5417 | offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); | |
5418 | len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); | |
5419 | ||
5420 | /* Call ext4_force_commit to flush all data in case of data=journal */ | |
5421 | if (ext4_should_journal_data(inode)) { | |
5422 | ret = ext4_force_commit(inode->i_sb); | |
5423 | if (ret) | |
5424 | return ret; | |
5425 | } | |
5426 | ||
5955102c | 5427 | inode_lock(inode); |
331573fe NJ |
5428 | /* Currently just for extent based files */ |
5429 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { | |
5430 | ret = -EOPNOTSUPP; | |
5431 | goto out_mutex; | |
5432 | } | |
5433 | ||
9b02e498 EB |
5434 | /* Check whether the maximum file size would be exceeded */ |
5435 | if (len > inode->i_sb->s_maxbytes - inode->i_size) { | |
331573fe NJ |
5436 | ret = -EFBIG; |
5437 | goto out_mutex; | |
5438 | } | |
5439 | ||
9b02e498 EB |
5440 | /* Offset must be less than i_size */ |
5441 | if (offset >= inode->i_size) { | |
331573fe NJ |
5442 | ret = -EINVAL; |
5443 | goto out_mutex; | |
5444 | } | |
5445 | ||
331573fe | 5446 | /* Wait for existing dio to complete */ |
331573fe NJ |
5447 | inode_dio_wait(inode); |
5448 | ||
ea3d7209 JK |
5449 | /* |
5450 | * Prevent page faults from reinstantiating pages we have released from | |
5451 | * page cache. | |
5452 | */ | |
d4f5258e | 5453 | filemap_invalidate_lock(mapping); |
430657b6 RZ |
5454 | |
5455 | ret = ext4_break_layouts(inode); | |
5456 | if (ret) | |
5457 | goto out_mmap; | |
5458 | ||
32ebffd3 JK |
5459 | /* |
5460 | * Need to round down to align start offset to page size boundary | |
5461 | * for page size > block size. | |
5462 | */ | |
5463 | ioffset = round_down(offset, PAGE_SIZE); | |
5464 | /* Write out all dirty pages */ | |
5465 | ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, | |
5466 | LLONG_MAX); | |
5467 | if (ret) | |
5468 | goto out_mmap; | |
ea3d7209 JK |
5469 | truncate_pagecache(inode, ioffset); |
5470 | ||
331573fe NJ |
5471 | credits = ext4_writepage_trans_blocks(inode); |
5472 | handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); | |
5473 | if (IS_ERR(handle)) { | |
5474 | ret = PTR_ERR(handle); | |
ea3d7209 | 5475 | goto out_mmap; |
331573fe | 5476 | } |
7bbbe241 | 5477 | ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE); |
331573fe NJ |
5478 | |
5479 | /* Expand file to avoid data loss if there is error while shifting */ | |
5480 | inode->i_size += len; | |
5481 | EXT4_I(inode)->i_disksize += len; | |
eeca7ea1 | 5482 | inode->i_mtime = inode->i_ctime = current_time(inode); |
331573fe NJ |
5483 | ret = ext4_mark_inode_dirty(handle, inode); |
5484 | if (ret) | |
5485 | goto out_stop; | |
5486 | ||
5487 | down_write(&EXT4_I(inode)->i_data_sem); | |
27bc446e | 5488 | ext4_discard_preallocations(inode, 0); |
331573fe NJ |
5489 | |
5490 | path = ext4_find_extent(inode, offset_lblk, NULL, 0); | |
5491 | if (IS_ERR(path)) { | |
5492 | up_write(&EXT4_I(inode)->i_data_sem); | |
5493 | goto out_stop; | |
5494 | } | |
5495 | ||
5496 | depth = ext_depth(inode); | |
5497 | extent = path[depth].p_ext; | |
5498 | if (extent) { | |
5499 | ee_start_lblk = le32_to_cpu(extent->ee_block); | |
5500 | ee_len = ext4_ext_get_actual_len(extent); | |
5501 | ||
5502 | /* | |
5503 | * If offset_lblk is not the starting block of extent, split | |
5504 | * the extent @offset_lblk | |
5505 | */ | |
5506 | if ((offset_lblk > ee_start_lblk) && | |
5507 | (offset_lblk < (ee_start_lblk + ee_len))) { | |
5508 | if (ext4_ext_is_unwritten(extent)) | |
5509 | split_flag = EXT4_EXT_MARK_UNWRIT1 | | |
5510 | EXT4_EXT_MARK_UNWRIT2; | |
5511 | ret = ext4_split_extent_at(handle, inode, &path, | |
5512 | offset_lblk, split_flag, | |
5513 | EXT4_EX_NOCACHE | | |
5514 | EXT4_GET_BLOCKS_PRE_IO | | |
5515 | EXT4_GET_BLOCKS_METADATA_NOFAIL); | |
5516 | } | |
5517 | ||
5518 | ext4_ext_drop_refs(path); | |
5519 | kfree(path); | |
5520 | if (ret < 0) { | |
5521 | up_write(&EXT4_I(inode)->i_data_sem); | |
5522 | goto out_stop; | |
5523 | } | |
edf15aa1 FF |
5524 | } else { |
5525 | ext4_ext_drop_refs(path); | |
5526 | kfree(path); | |
331573fe NJ |
5527 | } |
5528 | ||
5529 | ret = ext4_es_remove_extent(inode, offset_lblk, | |
5530 | EXT_MAX_BLOCKS - offset_lblk); | |
5531 | if (ret) { | |
5532 | up_write(&EXT4_I(inode)->i_data_sem); | |
5533 | goto out_stop; | |
5534 | } | |
5535 | ||
5536 | /* | |
5537 | * if offset_lblk lies in a hole which is at start of file, use | |
5538 | * ee_start_lblk to shift extents | |
5539 | */ | |
5540 | ret = ext4_ext_shift_extents(inode, handle, | |
5541 | ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk, | |
5542 | len_lblk, SHIFT_RIGHT); | |
5543 | ||
5544 | up_write(&EXT4_I(inode)->i_data_sem); | |
5545 | if (IS_SYNC(inode)) | |
5546 | ext4_handle_sync(handle); | |
67a7d5f5 JK |
5547 | if (ret >= 0) |
5548 | ext4_update_inode_fsync_trans(handle, inode, 1); | |
331573fe NJ |
5549 | |
5550 | out_stop: | |
5551 | ext4_journal_stop(handle); | |
ea3d7209 | 5552 | out_mmap: |
d4f5258e | 5553 | filemap_invalidate_unlock(mapping); |
331573fe | 5554 | out_mutex: |
5955102c | 5555 | inode_unlock(inode); |
331573fe NJ |
5556 | return ret; |
5557 | } | |
5558 | ||
fcf6b1b7 | 5559 | /** |
c60990b3 TT |
5560 | * ext4_swap_extents() - Swap extents between two inodes |
5561 | * @handle: handle for this transaction | |
fcf6b1b7 DM |
5562 | * @inode1: First inode |
5563 | * @inode2: Second inode | |
5564 | * @lblk1: Start block for first inode | |
5565 | * @lblk2: Start block for second inode | |
5566 | * @count: Number of blocks to swap | |
dcae058a | 5567 | * @unwritten: Mark second inode's extents as unwritten after swap |
fcf6b1b7 DM |
5568 | * @erp: Pointer to save error value |
5569 | * | |
5570 | * This helper routine does exactly what is promise "swap extents". All other | |
5571 | * stuff such as page-cache locking consistency, bh mapping consistency or | |
5572 | * extent's data copying must be performed by caller. | |
5573 | * Locking: | |
5574 | * i_mutex is held for both inodes | |
5575 | * i_data_sem is locked for write for both inodes | |
5576 | * Assumptions: | |
5577 | * All pages from requested range are locked for both inodes | |
5578 | */ | |
5579 | int | |
5580 | ext4_swap_extents(handle_t *handle, struct inode *inode1, | |
dcae058a | 5581 | struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, |
fcf6b1b7 DM |
5582 | ext4_lblk_t count, int unwritten, int *erp) |
5583 | { | |
5584 | struct ext4_ext_path *path1 = NULL; | |
5585 | struct ext4_ext_path *path2 = NULL; | |
5586 | int replaced_count = 0; | |
5587 | ||
5588 | BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); | |
5589 | BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); | |
5955102c AV |
5590 | BUG_ON(!inode_is_locked(inode1)); |
5591 | BUG_ON(!inode_is_locked(inode2)); | |
fcf6b1b7 DM |
5592 | |
5593 | *erp = ext4_es_remove_extent(inode1, lblk1, count); | |
19008f6d | 5594 | if (unlikely(*erp)) |
fcf6b1b7 DM |
5595 | return 0; |
5596 | *erp = ext4_es_remove_extent(inode2, lblk2, count); | |
19008f6d | 5597 | if (unlikely(*erp)) |
fcf6b1b7 DM |
5598 | return 0; |
5599 | ||
5600 | while (count) { | |
5601 | struct ext4_extent *ex1, *ex2, tmp_ex; | |
5602 | ext4_lblk_t e1_blk, e2_blk; | |
5603 | int e1_len, e2_len, len; | |
5604 | int split = 0; | |
5605 | ||
ed8a1a76 | 5606 | path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); |
a1c83681 | 5607 | if (IS_ERR(path1)) { |
fcf6b1b7 | 5608 | *erp = PTR_ERR(path1); |
19008f6d TT |
5609 | path1 = NULL; |
5610 | finish: | |
5611 | count = 0; | |
5612 | goto repeat; | |
fcf6b1b7 | 5613 | } |
ed8a1a76 | 5614 | path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); |
a1c83681 | 5615 | if (IS_ERR(path2)) { |
fcf6b1b7 | 5616 | *erp = PTR_ERR(path2); |
19008f6d TT |
5617 | path2 = NULL; |
5618 | goto finish; | |
fcf6b1b7 DM |
5619 | } |
5620 | ex1 = path1[path1->p_depth].p_ext; | |
5621 | ex2 = path2[path2->p_depth].p_ext; | |
e4d7f2d3 | 5622 | /* Do we have something to swap ? */ |
fcf6b1b7 | 5623 | if (unlikely(!ex2 || !ex1)) |
19008f6d | 5624 | goto finish; |
fcf6b1b7 DM |
5625 | |
5626 | e1_blk = le32_to_cpu(ex1->ee_block); | |
5627 | e2_blk = le32_to_cpu(ex2->ee_block); | |
5628 | e1_len = ext4_ext_get_actual_len(ex1); | |
5629 | e2_len = ext4_ext_get_actual_len(ex2); | |
5630 | ||
5631 | /* Hole handling */ | |
5632 | if (!in_range(lblk1, e1_blk, e1_len) || | |
5633 | !in_range(lblk2, e2_blk, e2_len)) { | |
5634 | ext4_lblk_t next1, next2; | |
5635 | ||
5636 | /* if hole after extent, then go to next extent */ | |
5637 | next1 = ext4_ext_next_allocated_block(path1); | |
5638 | next2 = ext4_ext_next_allocated_block(path2); | |
5639 | /* If hole before extent, then shift to that extent */ | |
5640 | if (e1_blk > lblk1) | |
5641 | next1 = e1_blk; | |
5642 | if (e2_blk > lblk2) | |
4e562013 | 5643 | next2 = e2_blk; |
fcf6b1b7 DM |
5644 | /* Do we have something to swap */ |
5645 | if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) | |
19008f6d | 5646 | goto finish; |
fcf6b1b7 DM |
5647 | /* Move to the rightest boundary */ |
5648 | len = next1 - lblk1; | |
5649 | if (len < next2 - lblk2) | |
5650 | len = next2 - lblk2; | |
5651 | if (len > count) | |
5652 | len = count; | |
5653 | lblk1 += len; | |
5654 | lblk2 += len; | |
5655 | count -= len; | |
5656 | goto repeat; | |
5657 | } | |
5658 | ||
5659 | /* Prepare left boundary */ | |
5660 | if (e1_blk < lblk1) { | |
5661 | split = 1; | |
5662 | *erp = ext4_force_split_extent_at(handle, inode1, | |
dfe50809 | 5663 | &path1, lblk1, 0); |
19008f6d TT |
5664 | if (unlikely(*erp)) |
5665 | goto finish; | |
fcf6b1b7 DM |
5666 | } |
5667 | if (e2_blk < lblk2) { | |
5668 | split = 1; | |
5669 | *erp = ext4_force_split_extent_at(handle, inode2, | |
dfe50809 | 5670 | &path2, lblk2, 0); |
19008f6d TT |
5671 | if (unlikely(*erp)) |
5672 | goto finish; | |
fcf6b1b7 | 5673 | } |
dfe50809 | 5674 | /* ext4_split_extent_at() may result in leaf extent split, |
fcf6b1b7 DM |
5675 | * path must to be revalidated. */ |
5676 | if (split) | |
5677 | goto repeat; | |
5678 | ||
5679 | /* Prepare right boundary */ | |
5680 | len = count; | |
5681 | if (len > e1_blk + e1_len - lblk1) | |
5682 | len = e1_blk + e1_len - lblk1; | |
5683 | if (len > e2_blk + e2_len - lblk2) | |
5684 | len = e2_blk + e2_len - lblk2; | |
5685 | ||
5686 | if (len != e1_len) { | |
5687 | split = 1; | |
5688 | *erp = ext4_force_split_extent_at(handle, inode1, | |
dfe50809 | 5689 | &path1, lblk1 + len, 0); |
19008f6d TT |
5690 | if (unlikely(*erp)) |
5691 | goto finish; | |
fcf6b1b7 DM |
5692 | } |
5693 | if (len != e2_len) { | |
5694 | split = 1; | |
5695 | *erp = ext4_force_split_extent_at(handle, inode2, | |
dfe50809 | 5696 | &path2, lblk2 + len, 0); |
fcf6b1b7 | 5697 | if (*erp) |
19008f6d | 5698 | goto finish; |
fcf6b1b7 | 5699 | } |
dfe50809 | 5700 | /* ext4_split_extent_at() may result in leaf extent split, |
fcf6b1b7 DM |
5701 | * path must to be revalidated. */ |
5702 | if (split) | |
5703 | goto repeat; | |
5704 | ||
5705 | BUG_ON(e2_len != e1_len); | |
5706 | *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); | |
19008f6d TT |
5707 | if (unlikely(*erp)) |
5708 | goto finish; | |
fcf6b1b7 | 5709 | *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); |
19008f6d TT |
5710 | if (unlikely(*erp)) |
5711 | goto finish; | |
fcf6b1b7 DM |
5712 | |
5713 | /* Both extents are fully inside boundaries. Swap it now */ | |
5714 | tmp_ex = *ex1; | |
5715 | ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); | |
5716 | ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); | |
5717 | ex1->ee_len = cpu_to_le16(e2_len); | |
5718 | ex2->ee_len = cpu_to_le16(e1_len); | |
5719 | if (unwritten) | |
5720 | ext4_ext_mark_unwritten(ex2); | |
5721 | if (ext4_ext_is_unwritten(&tmp_ex)) | |
5722 | ext4_ext_mark_unwritten(ex1); | |
5723 | ||
5724 | ext4_ext_try_to_merge(handle, inode2, path2, ex2); | |
5725 | ext4_ext_try_to_merge(handle, inode1, path1, ex1); | |
5726 | *erp = ext4_ext_dirty(handle, inode2, path2 + | |
5727 | path2->p_depth); | |
19008f6d TT |
5728 | if (unlikely(*erp)) |
5729 | goto finish; | |
fcf6b1b7 DM |
5730 | *erp = ext4_ext_dirty(handle, inode1, path1 + |
5731 | path1->p_depth); | |
5732 | /* | |
5733 | * Looks scarry ah..? second inode already points to new blocks, | |
5734 | * and it was successfully dirtied. But luckily error may happen | |
5735 | * only due to journal error, so full transaction will be | |
5736 | * aborted anyway. | |
5737 | */ | |
19008f6d TT |
5738 | if (unlikely(*erp)) |
5739 | goto finish; | |
fcf6b1b7 DM |
5740 | lblk1 += len; |
5741 | lblk2 += len; | |
5742 | replaced_count += len; | |
5743 | count -= len; | |
5744 | ||
5745 | repeat: | |
b7ea89ad TT |
5746 | ext4_ext_drop_refs(path1); |
5747 | kfree(path1); | |
5748 | ext4_ext_drop_refs(path2); | |
5749 | kfree(path2); | |
5750 | path1 = path2 = NULL; | |
fcf6b1b7 | 5751 | } |
fcf6b1b7 DM |
5752 | return replaced_count; |
5753 | } | |
0b02f4c0 EW |
5754 | |
5755 | /* | |
5756 | * ext4_clu_mapped - determine whether any block in a logical cluster has | |
5757 | * been mapped to a physical cluster | |
5758 | * | |
5759 | * @inode - file containing the logical cluster | |
5760 | * @lclu - logical cluster of interest | |
5761 | * | |
5762 | * Returns 1 if any block in the logical cluster is mapped, signifying | |
5763 | * that a physical cluster has been allocated for it. Otherwise, | |
5764 | * returns 0. Can also return negative error codes. Derived from | |
5765 | * ext4_ext_map_blocks(). | |
5766 | */ | |
5767 | int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) | |
5768 | { | |
5769 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | |
5770 | struct ext4_ext_path *path; | |
5771 | int depth, mapped = 0, err = 0; | |
5772 | struct ext4_extent *extent; | |
5773 | ext4_lblk_t first_lblk, first_lclu, last_lclu; | |
5774 | ||
5775 | /* search for the extent closest to the first block in the cluster */ | |
5776 | path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); | |
5777 | if (IS_ERR(path)) { | |
5778 | err = PTR_ERR(path); | |
5779 | path = NULL; | |
5780 | goto out; | |
5781 | } | |
5782 | ||
5783 | depth = ext_depth(inode); | |
5784 | ||
5785 | /* | |
5786 | * A consistent leaf must not be empty. This situation is possible, | |
5787 | * though, _during_ tree modification, and it's why an assert can't | |
5788 | * be put in ext4_find_extent(). | |
5789 | */ | |
5790 | if (unlikely(path[depth].p_ext == NULL && depth != 0)) { | |
5791 | EXT4_ERROR_INODE(inode, | |
5792 | "bad extent address - lblock: %lu, depth: %d, pblock: %lld", | |
5793 | (unsigned long) EXT4_C2B(sbi, lclu), | |
5794 | depth, path[depth].p_block); | |
5795 | err = -EFSCORRUPTED; | |
5796 | goto out; | |
5797 | } | |
5798 | ||
5799 | extent = path[depth].p_ext; | |
5800 | ||
5801 | /* can't be mapped if the extent tree is empty */ | |
5802 | if (extent == NULL) | |
5803 | goto out; | |
5804 | ||
5805 | first_lblk = le32_to_cpu(extent->ee_block); | |
5806 | first_lclu = EXT4_B2C(sbi, first_lblk); | |
5807 | ||
5808 | /* | |
5809 | * Three possible outcomes at this point - found extent spanning | |
5810 | * the target cluster, to the left of the target cluster, or to the | |
5811 | * right of the target cluster. The first two cases are handled here. | |
5812 | * The last case indicates the target cluster is not mapped. | |
5813 | */ | |
5814 | if (lclu >= first_lclu) { | |
5815 | last_lclu = EXT4_B2C(sbi, first_lblk + | |
5816 | ext4_ext_get_actual_len(extent) - 1); | |
5817 | if (lclu <= last_lclu) { | |
5818 | mapped = 1; | |
5819 | } else { | |
5820 | first_lblk = ext4_ext_next_allocated_block(path); | |
5821 | first_lclu = EXT4_B2C(sbi, first_lblk); | |
5822 | if (lclu == first_lclu) | |
5823 | mapped = 1; | |
5824 | } | |
5825 | } | |
5826 | ||
5827 | out: | |
5828 | ext4_ext_drop_refs(path); | |
5829 | kfree(path); | |
5830 | ||
5831 | return err ? err : mapped; | |
5832 | } | |
8016e29f HS |
5833 | |
5834 | /* | |
5835 | * Updates physical block address and unwritten status of extent | |
5836 | * starting at lblk start and of len. If such an extent doesn't exist, | |
5837 | * this function splits the extent tree appropriately to create an | |
5838 | * extent like this. This function is called in the fast commit | |
5839 | * replay path. Returns 0 on success and error on failure. | |
5840 | */ | |
5841 | int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start, | |
5842 | int len, int unwritten, ext4_fsblk_t pblk) | |
5843 | { | |
5844 | struct ext4_ext_path *path = NULL, *ppath; | |
5845 | struct ext4_extent *ex; | |
5846 | int ret; | |
5847 | ||
5848 | path = ext4_find_extent(inode, start, NULL, 0); | |
bc18546b DC |
5849 | if (IS_ERR(path)) |
5850 | return PTR_ERR(path); | |
8016e29f HS |
5851 | ex = path[path->p_depth].p_ext; |
5852 | if (!ex) { | |
5853 | ret = -EFSCORRUPTED; | |
5854 | goto out; | |
5855 | } | |
5856 | ||
5857 | if (le32_to_cpu(ex->ee_block) != start || | |
5858 | ext4_ext_get_actual_len(ex) != len) { | |
5859 | /* We need to split this extent to match our extent first */ | |
5860 | ppath = path; | |
5861 | down_write(&EXT4_I(inode)->i_data_sem); | |
5862 | ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1); | |
5863 | up_write(&EXT4_I(inode)->i_data_sem); | |
5864 | if (ret) | |
5865 | goto out; | |
5866 | kfree(path); | |
5867 | path = ext4_find_extent(inode, start, NULL, 0); | |
5868 | if (IS_ERR(path)) | |
5869 | return -1; | |
5870 | ppath = path; | |
5871 | ex = path[path->p_depth].p_ext; | |
5872 | WARN_ON(le32_to_cpu(ex->ee_block) != start); | |
5873 | if (ext4_ext_get_actual_len(ex) != len) { | |
5874 | down_write(&EXT4_I(inode)->i_data_sem); | |
5875 | ret = ext4_force_split_extent_at(NULL, inode, &ppath, | |
5876 | start + len, 1); | |
5877 | up_write(&EXT4_I(inode)->i_data_sem); | |
5878 | if (ret) | |
5879 | goto out; | |
5880 | kfree(path); | |
5881 | path = ext4_find_extent(inode, start, NULL, 0); | |
5882 | if (IS_ERR(path)) | |
5883 | return -EINVAL; | |
5884 | ex = path[path->p_depth].p_ext; | |
5885 | } | |
5886 | } | |
5887 | if (unwritten) | |
5888 | ext4_ext_mark_unwritten(ex); | |
5889 | else | |
5890 | ext4_ext_mark_initialized(ex); | |
5891 | ext4_ext_store_pblock(ex, pblk); | |
5892 | down_write(&EXT4_I(inode)->i_data_sem); | |
5893 | ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]); | |
5894 | up_write(&EXT4_I(inode)->i_data_sem); | |
5895 | out: | |
5896 | ext4_ext_drop_refs(path); | |
5897 | kfree(path); | |
5898 | ext4_mark_inode_dirty(NULL, inode); | |
5899 | return ret; | |
5900 | } | |
5901 | ||
5902 | /* Try to shrink the extent tree */ | |
5903 | void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end) | |
5904 | { | |
5905 | struct ext4_ext_path *path = NULL; | |
5906 | struct ext4_extent *ex; | |
5907 | ext4_lblk_t old_cur, cur = 0; | |
5908 | ||
5909 | while (cur < end) { | |
5910 | path = ext4_find_extent(inode, cur, NULL, 0); | |
5911 | if (IS_ERR(path)) | |
5912 | return; | |
5913 | ex = path[path->p_depth].p_ext; | |
5914 | if (!ex) { | |
5915 | ext4_ext_drop_refs(path); | |
5916 | kfree(path); | |
5917 | ext4_mark_inode_dirty(NULL, inode); | |
5918 | return; | |
5919 | } | |
5920 | old_cur = cur; | |
5921 | cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); | |
5922 | if (cur <= old_cur) | |
5923 | cur = old_cur + 1; | |
5924 | ext4_ext_try_to_merge(NULL, inode, path, ex); | |
5925 | down_write(&EXT4_I(inode)->i_data_sem); | |
5926 | ext4_ext_dirty(NULL, inode, &path[path->p_depth]); | |
5927 | up_write(&EXT4_I(inode)->i_data_sem); | |
5928 | ext4_mark_inode_dirty(NULL, inode); | |
5929 | ext4_ext_drop_refs(path); | |
5930 | kfree(path); | |
5931 | } | |
5932 | } | |
5933 | ||
5934 | /* Check if *cur is a hole and if it is, skip it */ | |
1fd95c05 | 5935 | static int skip_hole(struct inode *inode, ext4_lblk_t *cur) |
8016e29f HS |
5936 | { |
5937 | int ret; | |
5938 | struct ext4_map_blocks map; | |
5939 | ||
5940 | map.m_lblk = *cur; | |
5941 | map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur; | |
5942 | ||
5943 | ret = ext4_map_blocks(NULL, inode, &map, 0); | |
1fd95c05 TT |
5944 | if (ret < 0) |
5945 | return ret; | |
8016e29f | 5946 | if (ret != 0) |
1fd95c05 | 5947 | return 0; |
8016e29f | 5948 | *cur = *cur + map.m_len; |
1fd95c05 | 5949 | return 0; |
8016e29f HS |
5950 | } |
5951 | ||
5952 | /* Count number of blocks used by this inode and update i_blocks */ | |
5953 | int ext4_ext_replay_set_iblocks(struct inode *inode) | |
5954 | { | |
5955 | struct ext4_ext_path *path = NULL, *path2 = NULL; | |
5956 | struct ext4_extent *ex; | |
5957 | ext4_lblk_t cur = 0, end; | |
5958 | int numblks = 0, i, ret = 0; | |
5959 | ext4_fsblk_t cmp1, cmp2; | |
5960 | struct ext4_map_blocks map; | |
5961 | ||
5962 | /* Determin the size of the file first */ | |
5963 | path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, | |
5964 | EXT4_EX_NOCACHE); | |
5965 | if (IS_ERR(path)) | |
5966 | return PTR_ERR(path); | |
5967 | ex = path[path->p_depth].p_ext; | |
5968 | if (!ex) { | |
5969 | ext4_ext_drop_refs(path); | |
5970 | kfree(path); | |
5971 | goto out; | |
5972 | } | |
5973 | end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); | |
5974 | ext4_ext_drop_refs(path); | |
5975 | kfree(path); | |
5976 | ||
5977 | /* Count the number of data blocks */ | |
5978 | cur = 0; | |
5979 | while (cur < end) { | |
5980 | map.m_lblk = cur; | |
5981 | map.m_len = end - cur; | |
5982 | ret = ext4_map_blocks(NULL, inode, &map, 0); | |
5983 | if (ret < 0) | |
5984 | break; | |
5985 | if (ret > 0) | |
5986 | numblks += ret; | |
5987 | cur = cur + map.m_len; | |
5988 | } | |
5989 | ||
5990 | /* | |
5991 | * Count the number of extent tree blocks. We do it by looking up | |
5992 | * two successive extents and determining the difference between | |
5993 | * their paths. When path is different for 2 successive extents | |
5994 | * we compare the blocks in the path at each level and increment | |
5995 | * iblocks by total number of differences found. | |
5996 | */ | |
5997 | cur = 0; | |
1fd95c05 TT |
5998 | ret = skip_hole(inode, &cur); |
5999 | if (ret < 0) | |
6000 | goto out; | |
8016e29f HS |
6001 | path = ext4_find_extent(inode, cur, NULL, 0); |
6002 | if (IS_ERR(path)) | |
6003 | goto out; | |
6004 | numblks += path->p_depth; | |
6005 | ext4_ext_drop_refs(path); | |
6006 | kfree(path); | |
6007 | while (cur < end) { | |
6008 | path = ext4_find_extent(inode, cur, NULL, 0); | |
6009 | if (IS_ERR(path)) | |
6010 | break; | |
6011 | ex = path[path->p_depth].p_ext; | |
6012 | if (!ex) { | |
6013 | ext4_ext_drop_refs(path); | |
6014 | kfree(path); | |
6015 | return 0; | |
6016 | } | |
6017 | cur = max(cur + 1, le32_to_cpu(ex->ee_block) + | |
6018 | ext4_ext_get_actual_len(ex)); | |
1fd95c05 TT |
6019 | ret = skip_hole(inode, &cur); |
6020 | if (ret < 0) { | |
6021 | ext4_ext_drop_refs(path); | |
6022 | kfree(path); | |
6023 | break; | |
6024 | } | |
8016e29f HS |
6025 | path2 = ext4_find_extent(inode, cur, NULL, 0); |
6026 | if (IS_ERR(path2)) { | |
6027 | ext4_ext_drop_refs(path); | |
6028 | kfree(path); | |
6029 | break; | |
6030 | } | |
8016e29f HS |
6031 | for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) { |
6032 | cmp1 = cmp2 = 0; | |
6033 | if (i <= path->p_depth) | |
6034 | cmp1 = path[i].p_bh ? | |
6035 | path[i].p_bh->b_blocknr : 0; | |
6036 | if (i <= path2->p_depth) | |
6037 | cmp2 = path2[i].p_bh ? | |
6038 | path2[i].p_bh->b_blocknr : 0; | |
6039 | if (cmp1 != cmp2 && cmp2 != 0) | |
6040 | numblks++; | |
6041 | } | |
6042 | ext4_ext_drop_refs(path); | |
6043 | ext4_ext_drop_refs(path2); | |
6044 | kfree(path); | |
6045 | kfree(path2); | |
6046 | } | |
6047 | ||
6048 | out: | |
6049 | inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9); | |
6050 | ext4_mark_inode_dirty(NULL, inode); | |
6051 | return 0; | |
6052 | } | |
6053 | ||
6054 | int ext4_ext_clear_bb(struct inode *inode) | |
6055 | { | |
6056 | struct ext4_ext_path *path = NULL; | |
6057 | struct ext4_extent *ex; | |
6058 | ext4_lblk_t cur = 0, end; | |
6059 | int j, ret = 0; | |
6060 | struct ext4_map_blocks map; | |
6061 | ||
1ebf2178 HS |
6062 | if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA)) |
6063 | return 0; | |
6064 | ||
8016e29f HS |
6065 | /* Determin the size of the file first */ |
6066 | path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, | |
6067 | EXT4_EX_NOCACHE); | |
6068 | if (IS_ERR(path)) | |
6069 | return PTR_ERR(path); | |
6070 | ex = path[path->p_depth].p_ext; | |
6071 | if (!ex) { | |
6072 | ext4_ext_drop_refs(path); | |
6073 | kfree(path); | |
6074 | return 0; | |
6075 | } | |
6076 | end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); | |
6077 | ext4_ext_drop_refs(path); | |
6078 | kfree(path); | |
6079 | ||
6080 | cur = 0; | |
6081 | while (cur < end) { | |
6082 | map.m_lblk = cur; | |
6083 | map.m_len = end - cur; | |
6084 | ret = ext4_map_blocks(NULL, inode, &map, 0); | |
6085 | if (ret < 0) | |
6086 | break; | |
6087 | if (ret > 0) { | |
6088 | path = ext4_find_extent(inode, map.m_lblk, NULL, 0); | |
6089 | if (!IS_ERR_OR_NULL(path)) { | |
6090 | for (j = 0; j < path->p_depth; j++) { | |
6091 | ||
6092 | ext4_mb_mark_bb(inode->i_sb, | |
6093 | path[j].p_block, 1, 0); | |
6094 | } | |
6095 | ext4_ext_drop_refs(path); | |
6096 | kfree(path); | |
6097 | } | |
6098 | ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0); | |
6099 | } | |
6100 | cur = cur + map.m_len; | |
6101 | } | |
6102 | ||
6103 | return 0; | |
6104 | } |