Commit | Line | Data |
---|---|---|
f5166768 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a86c6181 AT |
2 | /* |
3 | * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com | |
4 | * Written by Alex Tomas <alex@clusterfs.com> | |
5 | * | |
6 | * Architecture independence: | |
7 | * Copyright (c) 2005, Bull S.A. | |
8 | * Written by Pierre Peiffer <pierre.peiffer@bull.net> | |
a86c6181 AT |
9 | */ |
10 | ||
11 | /* | |
12 | * Extents support for EXT4 | |
13 | * | |
14 | * TODO: | |
15 | * - ext4*_error() should be used in some situations | |
16 | * - analyze all BUG()/BUG_ON(), use -EIO where appropriate | |
17 | * - smart tree reduction | |
18 | */ | |
19 | ||
a86c6181 AT |
20 | #include <linux/fs.h> |
21 | #include <linux/time.h> | |
cd02ff0b | 22 | #include <linux/jbd2.h> |
a86c6181 AT |
23 | #include <linux/highuid.h> |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/quotaops.h> | |
26 | #include <linux/string.h> | |
27 | #include <linux/slab.h> | |
7c0f6ba6 | 28 | #include <linux/uaccess.h> |
6873fa0d | 29 | #include <linux/fiemap.h> |
66114cad | 30 | #include <linux/backing-dev.h> |
d3b6f23f | 31 | #include <linux/iomap.h> |
3dcf5451 | 32 | #include "ext4_jbd2.h" |
4a092d73 | 33 | #include "ext4_extents.h" |
f19d5870 | 34 | #include "xattr.h" |
a86c6181 | 35 | |
0562e0ba JZ |
36 | #include <trace/events/ext4.h> |
37 | ||
5f95d21f LC |
38 | /* |
39 | * used by extent splitting. | |
40 | */ | |
41 | #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ | |
42 | due to ENOSPC */ | |
556615dc LC |
43 | #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ |
44 | #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ | |
5f95d21f | 45 | |
dee1f973 DM |
46 | #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ |
47 | #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ | |
48 | ||
7ac5990d DW |
49 | static __le32 ext4_extent_block_csum(struct inode *inode, |
50 | struct ext4_extent_header *eh) | |
51 | { | |
52 | struct ext4_inode_info *ei = EXT4_I(inode); | |
53 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | |
54 | __u32 csum; | |
55 | ||
56 | csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, | |
57 | EXT4_EXTENT_TAIL_OFFSET(eh)); | |
58 | return cpu_to_le32(csum); | |
59 | } | |
60 | ||
61 | static int ext4_extent_block_csum_verify(struct inode *inode, | |
62 | struct ext4_extent_header *eh) | |
63 | { | |
64 | struct ext4_extent_tail *et; | |
65 | ||
9aa5d32b | 66 | if (!ext4_has_metadata_csum(inode->i_sb)) |
7ac5990d DW |
67 | return 1; |
68 | ||
69 | et = find_ext4_extent_tail(eh); | |
70 | if (et->et_checksum != ext4_extent_block_csum(inode, eh)) | |
71 | return 0; | |
72 | return 1; | |
73 | } | |
74 | ||
75 | static void ext4_extent_block_csum_set(struct inode *inode, | |
76 | struct ext4_extent_header *eh) | |
77 | { | |
78 | struct ext4_extent_tail *et; | |
79 | ||
9aa5d32b | 80 | if (!ext4_has_metadata_csum(inode->i_sb)) |
7ac5990d DW |
81 | return; |
82 | ||
83 | et = find_ext4_extent_tail(eh); | |
84 | et->et_checksum = ext4_extent_block_csum(inode, eh); | |
85 | } | |
86 | ||
5f95d21f LC |
87 | static int ext4_split_extent_at(handle_t *handle, |
88 | struct inode *inode, | |
dfe50809 | 89 | struct ext4_ext_path **ppath, |
5f95d21f LC |
90 | ext4_lblk_t split, |
91 | int split_flag, | |
92 | int flags); | |
93 | ||
a4130367 | 94 | static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) |
a86c6181 | 95 | { |
7b808191 | 96 | /* |
a4130367 JK |
97 | * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this |
98 | * moment, get_block can be called only for blocks inside i_size since | |
99 | * page cache has been already dropped and writes are blocked by | |
100 | * i_mutex. So we can safely drop the i_data_sem here. | |
7b808191 | 101 | */ |
a4130367 | 102 | BUG_ON(EXT4_JOURNAL(inode) == NULL); |
27bc446e | 103 | ext4_discard_preallocations(inode, 0); |
a4130367 JK |
104 | up_write(&EXT4_I(inode)->i_data_sem); |
105 | *dropped = 1; | |
106 | return 0; | |
107 | } | |
487caeef | 108 | |
a4130367 JK |
109 | /* |
110 | * Make sure 'handle' has at least 'check_cred' credits. If not, restart | |
111 | * transaction with 'restart_cred' credits. The function drops i_data_sem | |
112 | * when restarting transaction and gets it after transaction is restarted. | |
113 | * | |
114 | * The function returns 0 on success, 1 if transaction had to be restarted, | |
115 | * and < 0 in case of fatal error. | |
116 | */ | |
117 | int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode, | |
83448bdf JK |
118 | int check_cred, int restart_cred, |
119 | int revoke_cred) | |
a4130367 JK |
120 | { |
121 | int ret; | |
122 | int dropped = 0; | |
123 | ||
124 | ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred, | |
83448bdf | 125 | revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped)); |
a4130367 JK |
126 | if (dropped) |
127 | down_write(&EXT4_I(inode)->i_data_sem); | |
128 | return ret; | |
a86c6181 AT |
129 | } |
130 | ||
131 | /* | |
132 | * could return: | |
133 | * - EROFS | |
134 | * - ENOMEM | |
135 | */ | |
136 | static int ext4_ext_get_access(handle_t *handle, struct inode *inode, | |
137 | struct ext4_ext_path *path) | |
138 | { | |
139 | if (path->p_bh) { | |
140 | /* path points to block */ | |
5d601255 | 141 | BUFFER_TRACE(path->p_bh, "get_write_access"); |
188c299e JK |
142 | return ext4_journal_get_write_access(handle, inode->i_sb, |
143 | path->p_bh, EXT4_JTR_NONE); | |
a86c6181 AT |
144 | } |
145 | /* path points to leaf/index in inode body */ | |
146 | /* we use in-core data, no need to protect them */ | |
147 | return 0; | |
148 | } | |
149 | ||
150 | /* | |
151 | * could return: | |
152 | * - EROFS | |
153 | * - ENOMEM | |
154 | * - EIO | |
155 | */ | |
43f81677 EB |
156 | static int __ext4_ext_dirty(const char *where, unsigned int line, |
157 | handle_t *handle, struct inode *inode, | |
158 | struct ext4_ext_path *path) | |
a86c6181 AT |
159 | { |
160 | int err; | |
4b1f1660 DM |
161 | |
162 | WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); | |
a86c6181 | 163 | if (path->p_bh) { |
7ac5990d | 164 | ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); |
a86c6181 | 165 | /* path points to block */ |
9ea7a0df TT |
166 | err = __ext4_handle_dirty_metadata(where, line, handle, |
167 | inode, path->p_bh); | |
a86c6181 AT |
168 | } else { |
169 | /* path points to leaf/index in inode body */ | |
170 | err = ext4_mark_inode_dirty(handle, inode); | |
171 | } | |
172 | return err; | |
173 | } | |
174 | ||
43f81677 EB |
175 | #define ext4_ext_dirty(handle, inode, path) \ |
176 | __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) | |
177 | ||
f65e6fba | 178 | static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, |
a86c6181 | 179 | struct ext4_ext_path *path, |
725d26d3 | 180 | ext4_lblk_t block) |
a86c6181 | 181 | { |
a86c6181 | 182 | if (path) { |
81fdbb4a | 183 | int depth = path->p_depth; |
a86c6181 | 184 | struct ext4_extent *ex; |
a86c6181 | 185 | |
ad4fb9ca KM |
186 | /* |
187 | * Try to predict block placement assuming that we are | |
188 | * filling in a file which will eventually be | |
189 | * non-sparse --- i.e., in the case of libbfd writing | |
190 | * an ELF object sections out-of-order but in a way | |
191 | * the eventually results in a contiguous object or | |
192 | * executable file, or some database extending a table | |
193 | * space file. However, this is actually somewhat | |
194 | * non-ideal if we are writing a sparse file such as | |
195 | * qemu or KVM writing a raw image file that is going | |
196 | * to stay fairly sparse, since it will end up | |
197 | * fragmenting the file system's free space. Maybe we | |
198 | * should have some hueristics or some way to allow | |
199 | * userspace to pass a hint to file system, | |
b8d6568a | 200 | * especially if the latter case turns out to be |
ad4fb9ca KM |
201 | * common. |
202 | */ | |
7e028976 | 203 | ex = path[depth].p_ext; |
ad4fb9ca KM |
204 | if (ex) { |
205 | ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); | |
206 | ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); | |
207 | ||
208 | if (block > ext_block) | |
209 | return ext_pblk + (block - ext_block); | |
210 | else | |
211 | return ext_pblk - (ext_block - block); | |
212 | } | |
a86c6181 | 213 | |
d0d856e8 RD |
214 | /* it looks like index is empty; |
215 | * try to find starting block from index itself */ | |
a86c6181 AT |
216 | if (path[depth].p_bh) |
217 | return path[depth].p_bh->b_blocknr; | |
218 | } | |
219 | ||
220 | /* OK. use inode's group */ | |
f86186b4 | 221 | return ext4_inode_to_goal_block(inode); |
a86c6181 AT |
222 | } |
223 | ||
654b4908 AK |
224 | /* |
225 | * Allocation for a meta data block | |
226 | */ | |
f65e6fba | 227 | static ext4_fsblk_t |
654b4908 | 228 | ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, |
a86c6181 | 229 | struct ext4_ext_path *path, |
55f020db | 230 | struct ext4_extent *ex, int *err, unsigned int flags) |
a86c6181 | 231 | { |
f65e6fba | 232 | ext4_fsblk_t goal, newblock; |
a86c6181 AT |
233 | |
234 | goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); | |
55f020db AH |
235 | newblock = ext4_new_meta_blocks(handle, inode, goal, flags, |
236 | NULL, err); | |
a86c6181 AT |
237 | return newblock; |
238 | } | |
239 | ||
55ad63bf | 240 | static inline int ext4_ext_space_block(struct inode *inode, int check) |
a86c6181 AT |
241 | { |
242 | int size; | |
243 | ||
244 | size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) | |
245 | / sizeof(struct ext4_extent); | |
bbf2f9fb | 246 | #ifdef AGGRESSIVE_TEST |
02dc62fb YY |
247 | if (!check && size > 6) |
248 | size = 6; | |
a86c6181 AT |
249 | #endif |
250 | return size; | |
251 | } | |
252 | ||
55ad63bf | 253 | static inline int ext4_ext_space_block_idx(struct inode *inode, int check) |
a86c6181 AT |
254 | { |
255 | int size; | |
256 | ||
257 | size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) | |
258 | / sizeof(struct ext4_extent_idx); | |
bbf2f9fb | 259 | #ifdef AGGRESSIVE_TEST |
02dc62fb YY |
260 | if (!check && size > 5) |
261 | size = 5; | |
a86c6181 AT |
262 | #endif |
263 | return size; | |
264 | } | |
265 | ||
55ad63bf | 266 | static inline int ext4_ext_space_root(struct inode *inode, int check) |
a86c6181 AT |
267 | { |
268 | int size; | |
269 | ||
270 | size = sizeof(EXT4_I(inode)->i_data); | |
271 | size -= sizeof(struct ext4_extent_header); | |
272 | size /= sizeof(struct ext4_extent); | |
bbf2f9fb | 273 | #ifdef AGGRESSIVE_TEST |
02dc62fb YY |
274 | if (!check && size > 3) |
275 | size = 3; | |
a86c6181 AT |
276 | #endif |
277 | return size; | |
278 | } | |
279 | ||
55ad63bf | 280 | static inline int ext4_ext_space_root_idx(struct inode *inode, int check) |
a86c6181 AT |
281 | { |
282 | int size; | |
283 | ||
284 | size = sizeof(EXT4_I(inode)->i_data); | |
285 | size -= sizeof(struct ext4_extent_header); | |
286 | size /= sizeof(struct ext4_extent_idx); | |
bbf2f9fb | 287 | #ifdef AGGRESSIVE_TEST |
02dc62fb YY |
288 | if (!check && size > 4) |
289 | size = 4; | |
a86c6181 AT |
290 | #endif |
291 | return size; | |
292 | } | |
293 | ||
fcf6b1b7 DM |
294 | static inline int |
295 | ext4_force_split_extent_at(handle_t *handle, struct inode *inode, | |
dfe50809 | 296 | struct ext4_ext_path **ppath, ext4_lblk_t lblk, |
fcf6b1b7 DM |
297 | int nofail) |
298 | { | |
dfe50809 | 299 | struct ext4_ext_path *path = *ppath; |
fcf6b1b7 | 300 | int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); |
73c384c0 TT |
301 | int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO; |
302 | ||
303 | if (nofail) | |
304 | flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL; | |
fcf6b1b7 | 305 | |
dfe50809 | 306 | return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? |
fcf6b1b7 | 307 | EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, |
73c384c0 | 308 | flags); |
fcf6b1b7 DM |
309 | } |
310 | ||
c29c0ae7 AT |
311 | static int |
312 | ext4_ext_max_entries(struct inode *inode, int depth) | |
313 | { | |
314 | int max; | |
315 | ||
316 | if (depth == ext_depth(inode)) { | |
317 | if (depth == 0) | |
55ad63bf | 318 | max = ext4_ext_space_root(inode, 1); |
c29c0ae7 | 319 | else |
55ad63bf | 320 | max = ext4_ext_space_root_idx(inode, 1); |
c29c0ae7 AT |
321 | } else { |
322 | if (depth == 0) | |
55ad63bf | 323 | max = ext4_ext_space_block(inode, 1); |
c29c0ae7 | 324 | else |
55ad63bf | 325 | max = ext4_ext_space_block_idx(inode, 1); |
c29c0ae7 AT |
326 | } |
327 | ||
328 | return max; | |
329 | } | |
330 | ||
56b19868 AK |
331 | static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) |
332 | { | |
bf89d16f | 333 | ext4_fsblk_t block = ext4_ext_pblock(ext); |
56b19868 | 334 | int len = ext4_ext_get_actual_len(ext); |
5946d089 | 335 | ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); |
e84a26ce | 336 | |
f70749ca VN |
337 | /* |
338 | * We allow neither: | |
339 | * - zero length | |
340 | * - overflow/wrap-around | |
341 | */ | |
342 | if (lblock + len <= lblock) | |
31d4f3a2 | 343 | return 0; |
ce9f24cc | 344 | return ext4_inode_block_valid(inode, block, len); |
56b19868 AK |
345 | } |
346 | ||
347 | static int ext4_valid_extent_idx(struct inode *inode, | |
348 | struct ext4_extent_idx *ext_idx) | |
349 | { | |
bf89d16f | 350 | ext4_fsblk_t block = ext4_idx_pblock(ext_idx); |
e84a26ce | 351 | |
ce9f24cc | 352 | return ext4_inode_block_valid(inode, block, 1); |
56b19868 AK |
353 | } |
354 | ||
355 | static int ext4_valid_extent_entries(struct inode *inode, | |
54d3adbc TT |
356 | struct ext4_extent_header *eh, |
357 | ext4_fsblk_t *pblk, int depth) | |
56b19868 | 358 | { |
56b19868 AK |
359 | unsigned short entries; |
360 | if (eh->eh_entries == 0) | |
361 | return 1; | |
362 | ||
363 | entries = le16_to_cpu(eh->eh_entries); | |
364 | ||
365 | if (depth == 0) { | |
366 | /* leaf entries */ | |
81fdbb4a | 367 | struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); |
5946d089 EG |
368 | ext4_lblk_t lblock = 0; |
369 | ext4_lblk_t prev = 0; | |
370 | int len = 0; | |
56b19868 AK |
371 | while (entries) { |
372 | if (!ext4_valid_extent(inode, ext)) | |
373 | return 0; | |
5946d089 EG |
374 | |
375 | /* Check for overlapping extents */ | |
376 | lblock = le32_to_cpu(ext->ee_block); | |
377 | len = ext4_ext_get_actual_len(ext); | |
378 | if ((lblock <= prev) && prev) { | |
54d3adbc | 379 | *pblk = ext4_ext_pblock(ext); |
5946d089 EG |
380 | return 0; |
381 | } | |
56b19868 AK |
382 | ext++; |
383 | entries--; | |
5946d089 | 384 | prev = lblock + len - 1; |
56b19868 AK |
385 | } |
386 | } else { | |
81fdbb4a | 387 | struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); |
56b19868 AK |
388 | while (entries) { |
389 | if (!ext4_valid_extent_idx(inode, ext_idx)) | |
390 | return 0; | |
391 | ext_idx++; | |
392 | entries--; | |
393 | } | |
394 | } | |
395 | return 1; | |
396 | } | |
397 | ||
c398eda0 TT |
398 | static int __ext4_ext_check(const char *function, unsigned int line, |
399 | struct inode *inode, struct ext4_extent_header *eh, | |
c349179b | 400 | int depth, ext4_fsblk_t pblk) |
c29c0ae7 AT |
401 | { |
402 | const char *error_msg; | |
6a797d27 | 403 | int max = 0, err = -EFSCORRUPTED; |
c29c0ae7 AT |
404 | |
405 | if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { | |
406 | error_msg = "invalid magic"; | |
407 | goto corrupted; | |
408 | } | |
409 | if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { | |
410 | error_msg = "unexpected eh_depth"; | |
411 | goto corrupted; | |
412 | } | |
413 | if (unlikely(eh->eh_max == 0)) { | |
414 | error_msg = "invalid eh_max"; | |
415 | goto corrupted; | |
416 | } | |
417 | max = ext4_ext_max_entries(inode, depth); | |
418 | if (unlikely(le16_to_cpu(eh->eh_max) > max)) { | |
419 | error_msg = "too large eh_max"; | |
420 | goto corrupted; | |
421 | } | |
422 | if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { | |
423 | error_msg = "invalid eh_entries"; | |
424 | goto corrupted; | |
425 | } | |
54d3adbc | 426 | if (!ext4_valid_extent_entries(inode, eh, &pblk, depth)) { |
56b19868 AK |
427 | error_msg = "invalid extent entries"; |
428 | goto corrupted; | |
429 | } | |
7bc94916 VN |
430 | if (unlikely(depth > 32)) { |
431 | error_msg = "too large eh_depth"; | |
432 | goto corrupted; | |
433 | } | |
7ac5990d DW |
434 | /* Verify checksum on non-root extent tree nodes */ |
435 | if (ext_depth(inode) != depth && | |
436 | !ext4_extent_block_csum_verify(inode, eh)) { | |
437 | error_msg = "extent tree corrupted"; | |
6a797d27 | 438 | err = -EFSBADCRC; |
7ac5990d DW |
439 | goto corrupted; |
440 | } | |
c29c0ae7 AT |
441 | return 0; |
442 | ||
443 | corrupted: | |
54d3adbc TT |
444 | ext4_error_inode_err(inode, function, line, 0, -err, |
445 | "pblk %llu bad header/extent: %s - magic %x, " | |
446 | "entries %u, max %u(%u), depth %u(%u)", | |
447 | (unsigned long long) pblk, error_msg, | |
448 | le16_to_cpu(eh->eh_magic), | |
449 | le16_to_cpu(eh->eh_entries), | |
450 | le16_to_cpu(eh->eh_max), | |
451 | max, le16_to_cpu(eh->eh_depth), depth); | |
6a797d27 | 452 | return err; |
c29c0ae7 AT |
453 | } |
454 | ||
c349179b TT |
455 | #define ext4_ext_check(inode, eh, depth, pblk) \ |
456 | __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk)) | |
c29c0ae7 | 457 | |
7a262f7c AK |
458 | int ext4_ext_check_inode(struct inode *inode) |
459 | { | |
c349179b | 460 | return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); |
7a262f7c AK |
461 | } |
462 | ||
4068664e DM |
463 | static void ext4_cache_extents(struct inode *inode, |
464 | struct ext4_extent_header *eh) | |
465 | { | |
466 | struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); | |
467 | ext4_lblk_t prev = 0; | |
468 | int i; | |
469 | ||
470 | for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { | |
471 | unsigned int status = EXTENT_STATUS_WRITTEN; | |
472 | ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); | |
473 | int len = ext4_ext_get_actual_len(ex); | |
474 | ||
475 | if (prev && (prev != lblk)) | |
476 | ext4_es_cache_extent(inode, prev, lblk - prev, ~0, | |
477 | EXTENT_STATUS_HOLE); | |
478 | ||
479 | if (ext4_ext_is_unwritten(ex)) | |
480 | status = EXTENT_STATUS_UNWRITTEN; | |
481 | ext4_es_cache_extent(inode, lblk, len, | |
482 | ext4_ext_pblock(ex), status); | |
483 | prev = lblk + len; | |
484 | } | |
485 | } | |
486 | ||
7d7ea89e TT |
487 | static struct buffer_head * |
488 | __read_extent_tree_block(const char *function, unsigned int line, | |
107a7bd3 TT |
489 | struct inode *inode, ext4_fsblk_t pblk, int depth, |
490 | int flags) | |
f8489128 | 491 | { |
7d7ea89e TT |
492 | struct buffer_head *bh; |
493 | int err; | |
73c384c0 TT |
494 | gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS; |
495 | ||
496 | if (flags & EXT4_EX_NOFAIL) | |
497 | gfp_flags |= __GFP_NOFAIL; | |
7d7ea89e | 498 | |
73c384c0 | 499 | bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags); |
7d7ea89e TT |
500 | if (unlikely(!bh)) |
501 | return ERR_PTR(-ENOMEM); | |
f8489128 | 502 | |
7d7ea89e TT |
503 | if (!bh_uptodate_or_lock(bh)) { |
504 | trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); | |
2d069c08 | 505 | err = ext4_read_bh(bh, 0, NULL); |
7d7ea89e TT |
506 | if (err < 0) |
507 | goto errout; | |
508 | } | |
7869a4a6 | 509 | if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) |
7d7ea89e | 510 | return bh; |
ce9f24cc JK |
511 | err = __ext4_ext_check(function, line, inode, |
512 | ext_block_hdr(bh), depth, pblk); | |
513 | if (err) | |
514 | goto errout; | |
f8489128 | 515 | set_buffer_verified(bh); |
107a7bd3 TT |
516 | /* |
517 | * If this is a leaf block, cache all of its entries | |
518 | */ | |
519 | if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { | |
520 | struct ext4_extent_header *eh = ext_block_hdr(bh); | |
4068664e | 521 | ext4_cache_extents(inode, eh); |
107a7bd3 | 522 | } |
7d7ea89e TT |
523 | return bh; |
524 | errout: | |
525 | put_bh(bh); | |
526 | return ERR_PTR(err); | |
527 | ||
f8489128 DW |
528 | } |
529 | ||
107a7bd3 TT |
530 | #define read_extent_tree_block(inode, pblk, depth, flags) \ |
531 | __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \ | |
532 | (depth), (flags)) | |
f8489128 | 533 | |
7869a4a6 TT |
534 | /* |
535 | * This function is called to cache a file's extent information in the | |
536 | * extent status tree | |
537 | */ | |
538 | int ext4_ext_precache(struct inode *inode) | |
539 | { | |
540 | struct ext4_inode_info *ei = EXT4_I(inode); | |
541 | struct ext4_ext_path *path = NULL; | |
542 | struct buffer_head *bh; | |
543 | int i = 0, depth, ret = 0; | |
544 | ||
545 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | |
546 | return 0; /* not an extent-mapped inode */ | |
547 | ||
548 | down_read(&ei->i_data_sem); | |
549 | depth = ext_depth(inode); | |
550 | ||
2f424a5a RH |
551 | /* Don't cache anything if there are no external extent blocks */ |
552 | if (!depth) { | |
553 | up_read(&ei->i_data_sem); | |
554 | return ret; | |
555 | } | |
556 | ||
6396bb22 | 557 | path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), |
7869a4a6 TT |
558 | GFP_NOFS); |
559 | if (path == NULL) { | |
560 | up_read(&ei->i_data_sem); | |
561 | return -ENOMEM; | |
562 | } | |
563 | ||
7869a4a6 TT |
564 | path[0].p_hdr = ext_inode_hdr(inode); |
565 | ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); | |
566 | if (ret) | |
567 | goto out; | |
568 | path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); | |
569 | while (i >= 0) { | |
570 | /* | |
571 | * If this is a leaf block or we've reached the end of | |
572 | * the index block, go up | |
573 | */ | |
574 | if ((i == depth) || | |
575 | path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { | |
576 | brelse(path[i].p_bh); | |
577 | path[i].p_bh = NULL; | |
578 | i--; | |
579 | continue; | |
580 | } | |
581 | bh = read_extent_tree_block(inode, | |
582 | ext4_idx_pblock(path[i].p_idx++), | |
583 | depth - i - 1, | |
584 | EXT4_EX_FORCE_CACHE); | |
585 | if (IS_ERR(bh)) { | |
586 | ret = PTR_ERR(bh); | |
587 | break; | |
588 | } | |
589 | i++; | |
590 | path[i].p_bh = bh; | |
591 | path[i].p_hdr = ext_block_hdr(bh); | |
592 | path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); | |
593 | } | |
594 | ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); | |
595 | out: | |
596 | up_read(&ei->i_data_sem); | |
597 | ext4_ext_drop_refs(path); | |
598 | kfree(path); | |
599 | return ret; | |
600 | } | |
601 | ||
a86c6181 AT |
602 | #ifdef EXT_DEBUG |
603 | static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) | |
604 | { | |
605 | int k, l = path->p_depth; | |
606 | ||
70aa1554 | 607 | ext_debug(inode, "path:"); |
a86c6181 AT |
608 | for (k = 0; k <= l; k++, path++) { |
609 | if (path->p_idx) { | |
70aa1554 | 610 | ext_debug(inode, " %d->%llu", |
6e89bbb7 EB |
611 | le32_to_cpu(path->p_idx->ei_block), |
612 | ext4_idx_pblock(path->p_idx)); | |
a86c6181 | 613 | } else if (path->p_ext) { |
70aa1554 | 614 | ext_debug(inode, " %d:[%d]%d:%llu ", |
a86c6181 | 615 | le32_to_cpu(path->p_ext->ee_block), |
556615dc | 616 | ext4_ext_is_unwritten(path->p_ext), |
a2df2a63 | 617 | ext4_ext_get_actual_len(path->p_ext), |
bf89d16f | 618 | ext4_ext_pblock(path->p_ext)); |
a86c6181 | 619 | } else |
70aa1554 | 620 | ext_debug(inode, " []"); |
a86c6181 | 621 | } |
70aa1554 | 622 | ext_debug(inode, "\n"); |
a86c6181 AT |
623 | } |
624 | ||
625 | static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) | |
626 | { | |
627 | int depth = ext_depth(inode); | |
628 | struct ext4_extent_header *eh; | |
629 | struct ext4_extent *ex; | |
630 | int i; | |
631 | ||
632 | if (!path) | |
633 | return; | |
634 | ||
635 | eh = path[depth].p_hdr; | |
636 | ex = EXT_FIRST_EXTENT(eh); | |
637 | ||
70aa1554 | 638 | ext_debug(inode, "Displaying leaf extents\n"); |
553f9008 | 639 | |
a86c6181 | 640 | for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { |
70aa1554 | 641 | ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), |
556615dc | 642 | ext4_ext_is_unwritten(ex), |
bf89d16f | 643 | ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); |
a86c6181 | 644 | } |
70aa1554 | 645 | ext_debug(inode, "\n"); |
a86c6181 | 646 | } |
1b16da77 YY |
647 | |
648 | static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, | |
649 | ext4_fsblk_t newblock, int level) | |
650 | { | |
651 | int depth = ext_depth(inode); | |
652 | struct ext4_extent *ex; | |
653 | ||
654 | if (depth != level) { | |
655 | struct ext4_extent_idx *idx; | |
656 | idx = path[level].p_idx; | |
657 | while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { | |
70aa1554 RH |
658 | ext_debug(inode, "%d: move %d:%llu in new index %llu\n", |
659 | level, le32_to_cpu(idx->ei_block), | |
660 | ext4_idx_pblock(idx), newblock); | |
1b16da77 YY |
661 | idx++; |
662 | } | |
663 | ||
664 | return; | |
665 | } | |
666 | ||
667 | ex = path[depth].p_ext; | |
668 | while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { | |
70aa1554 | 669 | ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n", |
1b16da77 YY |
670 | le32_to_cpu(ex->ee_block), |
671 | ext4_ext_pblock(ex), | |
556615dc | 672 | ext4_ext_is_unwritten(ex), |
1b16da77 YY |
673 | ext4_ext_get_actual_len(ex), |
674 | newblock); | |
675 | ex++; | |
676 | } | |
677 | } | |
678 | ||
a86c6181 | 679 | #else |
af5bc92d TT |
680 | #define ext4_ext_show_path(inode, path) |
681 | #define ext4_ext_show_leaf(inode, path) | |
1b16da77 | 682 | #define ext4_ext_show_move(inode, path, newblock, level) |
a86c6181 AT |
683 | #endif |
684 | ||
b35905c1 | 685 | void ext4_ext_drop_refs(struct ext4_ext_path *path) |
a86c6181 | 686 | { |
b7ea89ad | 687 | int depth, i; |
a86c6181 | 688 | |
b7ea89ad TT |
689 | if (!path) |
690 | return; | |
691 | depth = path->p_depth; | |
de745485 | 692 | for (i = 0; i <= depth; i++, path++) { |
e0f49d27 ME |
693 | brelse(path->p_bh); |
694 | path->p_bh = NULL; | |
de745485 | 695 | } |
a86c6181 AT |
696 | } |
697 | ||
698 | /* | |
d0d856e8 RD |
699 | * ext4_ext_binsearch_idx: |
700 | * binary search for the closest index of the given block | |
c29c0ae7 | 701 | * the header must be checked before calling this |
a86c6181 AT |
702 | */ |
703 | static void | |
725d26d3 AK |
704 | ext4_ext_binsearch_idx(struct inode *inode, |
705 | struct ext4_ext_path *path, ext4_lblk_t block) | |
a86c6181 AT |
706 | { |
707 | struct ext4_extent_header *eh = path->p_hdr; | |
708 | struct ext4_extent_idx *r, *l, *m; | |
709 | ||
a86c6181 | 710 | |
70aa1554 | 711 | ext_debug(inode, "binsearch for %u(idx): ", block); |
a86c6181 AT |
712 | |
713 | l = EXT_FIRST_INDEX(eh) + 1; | |
e9f410b1 | 714 | r = EXT_LAST_INDEX(eh); |
a86c6181 AT |
715 | while (l <= r) { |
716 | m = l + (r - l) / 2; | |
717 | if (block < le32_to_cpu(m->ei_block)) | |
718 | r = m - 1; | |
719 | else | |
720 | l = m + 1; | |
70aa1554 RH |
721 | ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, |
722 | le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block), | |
723 | r, le32_to_cpu(r->ei_block)); | |
a86c6181 AT |
724 | } |
725 | ||
726 | path->p_idx = l - 1; | |
70aa1554 | 727 | ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), |
bf89d16f | 728 | ext4_idx_pblock(path->p_idx)); |
a86c6181 AT |
729 | |
730 | #ifdef CHECK_BINSEARCH | |
731 | { | |
732 | struct ext4_extent_idx *chix, *ix; | |
733 | int k; | |
734 | ||
735 | chix = ix = EXT_FIRST_INDEX(eh); | |
736 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { | |
6e89bbb7 EB |
737 | if (k != 0 && le32_to_cpu(ix->ei_block) <= |
738 | le32_to_cpu(ix[-1].ei_block)) { | |
4776004f TT |
739 | printk(KERN_DEBUG "k=%d, ix=0x%p, " |
740 | "first=0x%p\n", k, | |
741 | ix, EXT_FIRST_INDEX(eh)); | |
742 | printk(KERN_DEBUG "%u <= %u\n", | |
a86c6181 AT |
743 | le32_to_cpu(ix->ei_block), |
744 | le32_to_cpu(ix[-1].ei_block)); | |
745 | } | |
746 | BUG_ON(k && le32_to_cpu(ix->ei_block) | |
8c55e204 | 747 | <= le32_to_cpu(ix[-1].ei_block)); |
a86c6181 AT |
748 | if (block < le32_to_cpu(ix->ei_block)) |
749 | break; | |
750 | chix = ix; | |
751 | } | |
752 | BUG_ON(chix != path->p_idx); | |
753 | } | |
754 | #endif | |
755 | ||
756 | } | |
757 | ||
758 | /* | |
d0d856e8 RD |
759 | * ext4_ext_binsearch: |
760 | * binary search for closest extent of the given block | |
c29c0ae7 | 761 | * the header must be checked before calling this |
a86c6181 AT |
762 | */ |
763 | static void | |
725d26d3 AK |
764 | ext4_ext_binsearch(struct inode *inode, |
765 | struct ext4_ext_path *path, ext4_lblk_t block) | |
a86c6181 AT |
766 | { |
767 | struct ext4_extent_header *eh = path->p_hdr; | |
768 | struct ext4_extent *r, *l, *m; | |
769 | ||
a86c6181 AT |
770 | if (eh->eh_entries == 0) { |
771 | /* | |
d0d856e8 RD |
772 | * this leaf is empty: |
773 | * we get such a leaf in split/add case | |
a86c6181 AT |
774 | */ |
775 | return; | |
776 | } | |
777 | ||
70aa1554 | 778 | ext_debug(inode, "binsearch for %u: ", block); |
a86c6181 AT |
779 | |
780 | l = EXT_FIRST_EXTENT(eh) + 1; | |
e9f410b1 | 781 | r = EXT_LAST_EXTENT(eh); |
a86c6181 AT |
782 | |
783 | while (l <= r) { | |
784 | m = l + (r - l) / 2; | |
785 | if (block < le32_to_cpu(m->ee_block)) | |
786 | r = m - 1; | |
787 | else | |
788 | l = m + 1; | |
70aa1554 RH |
789 | ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, |
790 | le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block), | |
791 | r, le32_to_cpu(r->ee_block)); | |
a86c6181 AT |
792 | } |
793 | ||
794 | path->p_ext = l - 1; | |
70aa1554 | 795 | ext_debug(inode, " -> %d:%llu:[%d]%d ", |
8c55e204 | 796 | le32_to_cpu(path->p_ext->ee_block), |
bf89d16f | 797 | ext4_ext_pblock(path->p_ext), |
556615dc | 798 | ext4_ext_is_unwritten(path->p_ext), |
a2df2a63 | 799 | ext4_ext_get_actual_len(path->p_ext)); |
a86c6181 AT |
800 | |
801 | #ifdef CHECK_BINSEARCH | |
802 | { | |
803 | struct ext4_extent *chex, *ex; | |
804 | int k; | |
805 | ||
806 | chex = ex = EXT_FIRST_EXTENT(eh); | |
807 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { | |
808 | BUG_ON(k && le32_to_cpu(ex->ee_block) | |
8c55e204 | 809 | <= le32_to_cpu(ex[-1].ee_block)); |
a86c6181 AT |
810 | if (block < le32_to_cpu(ex->ee_block)) |
811 | break; | |
812 | chex = ex; | |
813 | } | |
814 | BUG_ON(chex != path->p_ext); | |
815 | } | |
816 | #endif | |
817 | ||
818 | } | |
819 | ||
4209ae12 | 820 | void ext4_ext_tree_init(handle_t *handle, struct inode *inode) |
a86c6181 AT |
821 | { |
822 | struct ext4_extent_header *eh; | |
823 | ||
824 | eh = ext_inode_hdr(inode); | |
825 | eh->eh_depth = 0; | |
826 | eh->eh_entries = 0; | |
827 | eh->eh_magic = EXT4_EXT_MAGIC; | |
55ad63bf | 828 | eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); |
ce3aba43 | 829 | eh->eh_generation = 0; |
a86c6181 | 830 | ext4_mark_inode_dirty(handle, inode); |
a86c6181 AT |
831 | } |
832 | ||
833 | struct ext4_ext_path * | |
ed8a1a76 TT |
834 | ext4_find_extent(struct inode *inode, ext4_lblk_t block, |
835 | struct ext4_ext_path **orig_path, int flags) | |
a86c6181 AT |
836 | { |
837 | struct ext4_extent_header *eh; | |
838 | struct buffer_head *bh; | |
705912ca TT |
839 | struct ext4_ext_path *path = orig_path ? *orig_path : NULL; |
840 | short int depth, i, ppos = 0; | |
860d21e2 | 841 | int ret; |
73c384c0 TT |
842 | gfp_t gfp_flags = GFP_NOFS; |
843 | ||
844 | if (flags & EXT4_EX_NOFAIL) | |
845 | gfp_flags |= __GFP_NOFAIL; | |
a86c6181 AT |
846 | |
847 | eh = ext_inode_hdr(inode); | |
c29c0ae7 | 848 | depth = ext_depth(inode); |
bc890a60 TT |
849 | if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { |
850 | EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", | |
851 | depth); | |
852 | ret = -EFSCORRUPTED; | |
853 | goto err; | |
854 | } | |
a86c6181 | 855 | |
10809df8 | 856 | if (path) { |
523f431c | 857 | ext4_ext_drop_refs(path); |
10809df8 TT |
858 | if (depth > path[0].p_maxdepth) { |
859 | kfree(path); | |
860 | *orig_path = path = NULL; | |
861 | } | |
862 | } | |
863 | if (!path) { | |
523f431c | 864 | /* account possible depth increase */ |
6396bb22 | 865 | path = kcalloc(depth + 2, sizeof(struct ext4_ext_path), |
73c384c0 | 866 | gfp_flags); |
19008f6d | 867 | if (unlikely(!path)) |
a86c6181 | 868 | return ERR_PTR(-ENOMEM); |
10809df8 | 869 | path[0].p_maxdepth = depth + 1; |
a86c6181 | 870 | } |
a86c6181 | 871 | path[0].p_hdr = eh; |
1973adcb | 872 | path[0].p_bh = NULL; |
a86c6181 | 873 | |
c29c0ae7 | 874 | i = depth; |
4068664e DM |
875 | if (!(flags & EXT4_EX_NOCACHE) && depth == 0) |
876 | ext4_cache_extents(inode, eh); | |
a86c6181 AT |
877 | /* walk through the tree */ |
878 | while (i) { | |
70aa1554 | 879 | ext_debug(inode, "depth %d: num %d, max %d\n", |
a86c6181 | 880 | ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); |
c29c0ae7 | 881 | |
a86c6181 | 882 | ext4_ext_binsearch_idx(inode, path + ppos, block); |
bf89d16f | 883 | path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); |
a86c6181 AT |
884 | path[ppos].p_depth = i; |
885 | path[ppos].p_ext = NULL; | |
886 | ||
107a7bd3 TT |
887 | bh = read_extent_tree_block(inode, path[ppos].p_block, --i, |
888 | flags); | |
a1c83681 | 889 | if (IS_ERR(bh)) { |
7d7ea89e | 890 | ret = PTR_ERR(bh); |
a86c6181 | 891 | goto err; |
860d21e2 | 892 | } |
7d7ea89e | 893 | |
a86c6181 AT |
894 | eh = ext_block_hdr(bh); |
895 | ppos++; | |
a86c6181 AT |
896 | path[ppos].p_bh = bh; |
897 | path[ppos].p_hdr = eh; | |
a86c6181 AT |
898 | } |
899 | ||
900 | path[ppos].p_depth = i; | |
a86c6181 AT |
901 | path[ppos].p_ext = NULL; |
902 | path[ppos].p_idx = NULL; | |
903 | ||
a86c6181 AT |
904 | /* find extent */ |
905 | ext4_ext_binsearch(inode, path + ppos, block); | |
1973adcb SF |
906 | /* if not an empty leaf */ |
907 | if (path[ppos].p_ext) | |
bf89d16f | 908 | path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); |
a86c6181 AT |
909 | |
910 | ext4_ext_show_path(inode, path); | |
911 | ||
912 | return path; | |
913 | ||
914 | err: | |
915 | ext4_ext_drop_refs(path); | |
dfe50809 TT |
916 | kfree(path); |
917 | if (orig_path) | |
918 | *orig_path = NULL; | |
860d21e2 | 919 | return ERR_PTR(ret); |
a86c6181 AT |
920 | } |
921 | ||
922 | /* | |
d0d856e8 RD |
923 | * ext4_ext_insert_index: |
924 | * insert new index [@logical;@ptr] into the block at @curp; | |
925 | * check where to insert: before @curp or after @curp | |
a86c6181 | 926 | */ |
1f109d5a TT |
927 | static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, |
928 | struct ext4_ext_path *curp, | |
929 | int logical, ext4_fsblk_t ptr) | |
a86c6181 AT |
930 | { |
931 | struct ext4_extent_idx *ix; | |
932 | int len, err; | |
933 | ||
7e028976 AM |
934 | err = ext4_ext_get_access(handle, inode, curp); |
935 | if (err) | |
a86c6181 AT |
936 | return err; |
937 | ||
273df556 FM |
938 | if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { |
939 | EXT4_ERROR_INODE(inode, | |
940 | "logical %d == ei_block %d!", | |
941 | logical, le32_to_cpu(curp->p_idx->ei_block)); | |
6a797d27 | 942 | return -EFSCORRUPTED; |
273df556 | 943 | } |
d4620315 RD |
944 | |
945 | if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) | |
946 | >= le16_to_cpu(curp->p_hdr->eh_max))) { | |
947 | EXT4_ERROR_INODE(inode, | |
948 | "eh_entries %d >= eh_max %d!", | |
949 | le16_to_cpu(curp->p_hdr->eh_entries), | |
950 | le16_to_cpu(curp->p_hdr->eh_max)); | |
6a797d27 | 951 | return -EFSCORRUPTED; |
d4620315 RD |
952 | } |
953 | ||
a86c6181 AT |
954 | if (logical > le32_to_cpu(curp->p_idx->ei_block)) { |
955 | /* insert after */ | |
70aa1554 RH |
956 | ext_debug(inode, "insert new index %d after: %llu\n", |
957 | logical, ptr); | |
a86c6181 AT |
958 | ix = curp->p_idx + 1; |
959 | } else { | |
960 | /* insert before */ | |
70aa1554 RH |
961 | ext_debug(inode, "insert new index %d before: %llu\n", |
962 | logical, ptr); | |
a86c6181 AT |
963 | ix = curp->p_idx; |
964 | } | |
965 | ||
80e675f9 EG |
966 | len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; |
967 | BUG_ON(len < 0); | |
968 | if (len > 0) { | |
70aa1554 | 969 | ext_debug(inode, "insert new index %d: " |
80e675f9 EG |
970 | "move %d indices from 0x%p to 0x%p\n", |
971 | logical, len, ix, ix + 1); | |
972 | memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); | |
973 | } | |
974 | ||
f472e026 TM |
975 | if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { |
976 | EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); | |
6a797d27 | 977 | return -EFSCORRUPTED; |
f472e026 TM |
978 | } |
979 | ||
a86c6181 | 980 | ix->ei_block = cpu_to_le32(logical); |
f65e6fba | 981 | ext4_idx_store_pblock(ix, ptr); |
e8546d06 | 982 | le16_add_cpu(&curp->p_hdr->eh_entries, 1); |
a86c6181 | 983 | |
273df556 FM |
984 | if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { |
985 | EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); | |
6a797d27 | 986 | return -EFSCORRUPTED; |
273df556 | 987 | } |
a86c6181 AT |
988 | |
989 | err = ext4_ext_dirty(handle, inode, curp); | |
990 | ext4_std_error(inode->i_sb, err); | |
991 | ||
992 | return err; | |
993 | } | |
994 | ||
995 | /* | |
d0d856e8 RD |
996 | * ext4_ext_split: |
997 | * inserts new subtree into the path, using free index entry | |
998 | * at depth @at: | |
999 | * - allocates all needed blocks (new leaf and all intermediate index blocks) | |
1000 | * - makes decision where to split | |
1001 | * - moves remaining extents and index entries (right to the split point) | |
1002 | * into the newly allocated blocks | |
1003 | * - initializes subtree | |
a86c6181 AT |
1004 | */ |
1005 | static int ext4_ext_split(handle_t *handle, struct inode *inode, | |
55f020db AH |
1006 | unsigned int flags, |
1007 | struct ext4_ext_path *path, | |
1008 | struct ext4_extent *newext, int at) | |
a86c6181 AT |
1009 | { |
1010 | struct buffer_head *bh = NULL; | |
1011 | int depth = ext_depth(inode); | |
1012 | struct ext4_extent_header *neh; | |
1013 | struct ext4_extent_idx *fidx; | |
a86c6181 | 1014 | int i = at, k, m, a; |
f65e6fba | 1015 | ext4_fsblk_t newblock, oldblock; |
a86c6181 | 1016 | __le32 border; |
f65e6fba | 1017 | ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ |
73c384c0 | 1018 | gfp_t gfp_flags = GFP_NOFS; |
a86c6181 | 1019 | int err = 0; |
592acbf1 | 1020 | size_t ext_size = 0; |
a86c6181 | 1021 | |
73c384c0 TT |
1022 | if (flags & EXT4_EX_NOFAIL) |
1023 | gfp_flags |= __GFP_NOFAIL; | |
1024 | ||
a86c6181 | 1025 | /* make decision: where to split? */ |
d0d856e8 | 1026 | /* FIXME: now decision is simplest: at current extent */ |
a86c6181 | 1027 | |
d0d856e8 | 1028 | /* if current leaf will be split, then we should use |
a86c6181 | 1029 | * border from split point */ |
273df556 FM |
1030 | if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { |
1031 | EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); | |
6a797d27 | 1032 | return -EFSCORRUPTED; |
273df556 | 1033 | } |
a86c6181 AT |
1034 | if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { |
1035 | border = path[depth].p_ext[1].ee_block; | |
70aa1554 | 1036 | ext_debug(inode, "leaf will be split." |
a86c6181 | 1037 | " next leaf starts at %d\n", |
8c55e204 | 1038 | le32_to_cpu(border)); |
a86c6181 AT |
1039 | } else { |
1040 | border = newext->ee_block; | |
70aa1554 | 1041 | ext_debug(inode, "leaf will be added." |
a86c6181 | 1042 | " next leaf starts at %d\n", |
8c55e204 | 1043 | le32_to_cpu(border)); |
a86c6181 AT |
1044 | } |
1045 | ||
1046 | /* | |
d0d856e8 RD |
1047 | * If error occurs, then we break processing |
1048 | * and mark filesystem read-only. index won't | |
a86c6181 | 1049 | * be inserted and tree will be in consistent |
d0d856e8 | 1050 | * state. Next mount will repair buffers too. |
a86c6181 AT |
1051 | */ |
1052 | ||
1053 | /* | |
d0d856e8 RD |
1054 | * Get array to track all allocated blocks. |
1055 | * We need this to handle errors and free blocks | |
1056 | * upon them. | |
a86c6181 | 1057 | */ |
73c384c0 | 1058 | ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags); |
a86c6181 AT |
1059 | if (!ablocks) |
1060 | return -ENOMEM; | |
a86c6181 AT |
1061 | |
1062 | /* allocate all needed blocks */ | |
70aa1554 | 1063 | ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at); |
a86c6181 | 1064 | for (a = 0; a < depth - at; a++) { |
654b4908 | 1065 | newblock = ext4_ext_new_meta_block(handle, inode, path, |
55f020db | 1066 | newext, &err, flags); |
a86c6181 AT |
1067 | if (newblock == 0) |
1068 | goto cleanup; | |
1069 | ablocks[a] = newblock; | |
1070 | } | |
1071 | ||
1072 | /* initialize new leaf */ | |
1073 | newblock = ablocks[--a]; | |
273df556 FM |
1074 | if (unlikely(newblock == 0)) { |
1075 | EXT4_ERROR_INODE(inode, "newblock == 0!"); | |
6a797d27 | 1076 | err = -EFSCORRUPTED; |
273df556 FM |
1077 | goto cleanup; |
1078 | } | |
c45653c3 | 1079 | bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); |
aebf0243 | 1080 | if (unlikely(!bh)) { |
860d21e2 | 1081 | err = -ENOMEM; |
a86c6181 AT |
1082 | goto cleanup; |
1083 | } | |
1084 | lock_buffer(bh); | |
1085 | ||
188c299e JK |
1086 | err = ext4_journal_get_create_access(handle, inode->i_sb, bh, |
1087 | EXT4_JTR_NONE); | |
7e028976 | 1088 | if (err) |
a86c6181 AT |
1089 | goto cleanup; |
1090 | ||
1091 | neh = ext_block_hdr(bh); | |
1092 | neh->eh_entries = 0; | |
55ad63bf | 1093 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
a86c6181 AT |
1094 | neh->eh_magic = EXT4_EXT_MAGIC; |
1095 | neh->eh_depth = 0; | |
ce3aba43 | 1096 | neh->eh_generation = 0; |
a86c6181 | 1097 | |
d0d856e8 | 1098 | /* move remainder of path[depth] to the new leaf */ |
273df556 FM |
1099 | if (unlikely(path[depth].p_hdr->eh_entries != |
1100 | path[depth].p_hdr->eh_max)) { | |
1101 | EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", | |
1102 | path[depth].p_hdr->eh_entries, | |
1103 | path[depth].p_hdr->eh_max); | |
6a797d27 | 1104 | err = -EFSCORRUPTED; |
273df556 FM |
1105 | goto cleanup; |
1106 | } | |
a86c6181 | 1107 | /* start copy from next extent */ |
1b16da77 YY |
1108 | m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; |
1109 | ext4_ext_show_move(inode, path, newblock, depth); | |
a86c6181 | 1110 | if (m) { |
1b16da77 YY |
1111 | struct ext4_extent *ex; |
1112 | ex = EXT_FIRST_EXTENT(neh); | |
1113 | memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); | |
e8546d06 | 1114 | le16_add_cpu(&neh->eh_entries, m); |
a86c6181 AT |
1115 | } |
1116 | ||
592acbf1 SR |
1117 | /* zero out unused area in the extent block */ |
1118 | ext_size = sizeof(struct ext4_extent_header) + | |
1119 | sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries); | |
1120 | memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); | |
7ac5990d | 1121 | ext4_extent_block_csum_set(inode, neh); |
a86c6181 AT |
1122 | set_buffer_uptodate(bh); |
1123 | unlock_buffer(bh); | |
1124 | ||
0390131b | 1125 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 1126 | if (err) |
a86c6181 AT |
1127 | goto cleanup; |
1128 | brelse(bh); | |
1129 | bh = NULL; | |
1130 | ||
1131 | /* correct old leaf */ | |
1132 | if (m) { | |
7e028976 AM |
1133 | err = ext4_ext_get_access(handle, inode, path + depth); |
1134 | if (err) | |
a86c6181 | 1135 | goto cleanup; |
e8546d06 | 1136 | le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); |
7e028976 AM |
1137 | err = ext4_ext_dirty(handle, inode, path + depth); |
1138 | if (err) | |
a86c6181 AT |
1139 | goto cleanup; |
1140 | ||
1141 | } | |
1142 | ||
1143 | /* create intermediate indexes */ | |
1144 | k = depth - at - 1; | |
273df556 FM |
1145 | if (unlikely(k < 0)) { |
1146 | EXT4_ERROR_INODE(inode, "k %d < 0!", k); | |
6a797d27 | 1147 | err = -EFSCORRUPTED; |
273df556 FM |
1148 | goto cleanup; |
1149 | } | |
a86c6181 | 1150 | if (k) |
70aa1554 | 1151 | ext_debug(inode, "create %d intermediate indices\n", k); |
a86c6181 AT |
1152 | /* insert new index into current index block */ |
1153 | /* current depth stored in i var */ | |
1154 | i = depth - 1; | |
1155 | while (k--) { | |
1156 | oldblock = newblock; | |
1157 | newblock = ablocks[--a]; | |
bba90743 | 1158 | bh = sb_getblk(inode->i_sb, newblock); |
aebf0243 | 1159 | if (unlikely(!bh)) { |
860d21e2 | 1160 | err = -ENOMEM; |
a86c6181 AT |
1161 | goto cleanup; |
1162 | } | |
1163 | lock_buffer(bh); | |
1164 | ||
188c299e JK |
1165 | err = ext4_journal_get_create_access(handle, inode->i_sb, bh, |
1166 | EXT4_JTR_NONE); | |
7e028976 | 1167 | if (err) |
a86c6181 AT |
1168 | goto cleanup; |
1169 | ||
1170 | neh = ext_block_hdr(bh); | |
1171 | neh->eh_entries = cpu_to_le16(1); | |
1172 | neh->eh_magic = EXT4_EXT_MAGIC; | |
55ad63bf | 1173 | neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
a86c6181 | 1174 | neh->eh_depth = cpu_to_le16(depth - i); |
ce3aba43 | 1175 | neh->eh_generation = 0; |
a86c6181 AT |
1176 | fidx = EXT_FIRST_INDEX(neh); |
1177 | fidx->ei_block = border; | |
f65e6fba | 1178 | ext4_idx_store_pblock(fidx, oldblock); |
a86c6181 | 1179 | |
70aa1554 | 1180 | ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n", |
bba90743 | 1181 | i, newblock, le32_to_cpu(border), oldblock); |
a86c6181 | 1182 | |
1b16da77 | 1183 | /* move remainder of path[i] to the new index block */ |
273df556 FM |
1184 | if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != |
1185 | EXT_LAST_INDEX(path[i].p_hdr))) { | |
1186 | EXT4_ERROR_INODE(inode, | |
1187 | "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", | |
1188 | le32_to_cpu(path[i].p_ext->ee_block)); | |
6a797d27 | 1189 | err = -EFSCORRUPTED; |
273df556 FM |
1190 | goto cleanup; |
1191 | } | |
1b16da77 YY |
1192 | /* start copy indexes */ |
1193 | m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; | |
70aa1554 | 1194 | ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx, |
1b16da77 YY |
1195 | EXT_MAX_INDEX(path[i].p_hdr)); |
1196 | ext4_ext_show_move(inode, path, newblock, i); | |
a86c6181 | 1197 | if (m) { |
1b16da77 | 1198 | memmove(++fidx, path[i].p_idx, |
a86c6181 | 1199 | sizeof(struct ext4_extent_idx) * m); |
e8546d06 | 1200 | le16_add_cpu(&neh->eh_entries, m); |
a86c6181 | 1201 | } |
592acbf1 SR |
1202 | /* zero out unused area in the extent block */ |
1203 | ext_size = sizeof(struct ext4_extent_header) + | |
1204 | (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries)); | |
1205 | memset(bh->b_data + ext_size, 0, | |
1206 | inode->i_sb->s_blocksize - ext_size); | |
7ac5990d | 1207 | ext4_extent_block_csum_set(inode, neh); |
a86c6181 AT |
1208 | set_buffer_uptodate(bh); |
1209 | unlock_buffer(bh); | |
1210 | ||
0390131b | 1211 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 1212 | if (err) |
a86c6181 AT |
1213 | goto cleanup; |
1214 | brelse(bh); | |
1215 | bh = NULL; | |
1216 | ||
1217 | /* correct old index */ | |
1218 | if (m) { | |
1219 | err = ext4_ext_get_access(handle, inode, path + i); | |
1220 | if (err) | |
1221 | goto cleanup; | |
e8546d06 | 1222 | le16_add_cpu(&path[i].p_hdr->eh_entries, -m); |
a86c6181 AT |
1223 | err = ext4_ext_dirty(handle, inode, path + i); |
1224 | if (err) | |
1225 | goto cleanup; | |
1226 | } | |
1227 | ||
1228 | i--; | |
1229 | } | |
1230 | ||
1231 | /* insert new index */ | |
a86c6181 AT |
1232 | err = ext4_ext_insert_index(handle, inode, path + at, |
1233 | le32_to_cpu(border), newblock); | |
1234 | ||
1235 | cleanup: | |
1236 | if (bh) { | |
1237 | if (buffer_locked(bh)) | |
1238 | unlock_buffer(bh); | |
1239 | brelse(bh); | |
1240 | } | |
1241 | ||
1242 | if (err) { | |
1243 | /* free all allocated blocks in error case */ | |
1244 | for (i = 0; i < depth; i++) { | |
1245 | if (!ablocks[i]) | |
1246 | continue; | |
7dc57615 | 1247 | ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, |
e6362609 | 1248 | EXT4_FREE_BLOCKS_METADATA); |
a86c6181 AT |
1249 | } |
1250 | } | |
1251 | kfree(ablocks); | |
1252 | ||
1253 | return err; | |
1254 | } | |
1255 | ||
1256 | /* | |
d0d856e8 RD |
1257 | * ext4_ext_grow_indepth: |
1258 | * implements tree growing procedure: | |
1259 | * - allocates new block | |
1260 | * - moves top-level data (index block or leaf) into the new block | |
1261 | * - initializes new top-level, creating index that points to the | |
1262 | * just created block | |
a86c6181 AT |
1263 | */ |
1264 | static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, | |
be5cd90d | 1265 | unsigned int flags) |
a86c6181 | 1266 | { |
a86c6181 | 1267 | struct ext4_extent_header *neh; |
a86c6181 | 1268 | struct buffer_head *bh; |
be5cd90d DM |
1269 | ext4_fsblk_t newblock, goal = 0; |
1270 | struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; | |
a86c6181 | 1271 | int err = 0; |
592acbf1 | 1272 | size_t ext_size = 0; |
a86c6181 | 1273 | |
be5cd90d DM |
1274 | /* Try to prepend new index to old one */ |
1275 | if (ext_depth(inode)) | |
1276 | goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); | |
1277 | if (goal > le32_to_cpu(es->s_first_data_block)) { | |
1278 | flags |= EXT4_MB_HINT_TRY_GOAL; | |
1279 | goal--; | |
1280 | } else | |
1281 | goal = ext4_inode_to_goal_block(inode); | |
1282 | newblock = ext4_new_meta_blocks(handle, inode, goal, flags, | |
1283 | NULL, &err); | |
a86c6181 AT |
1284 | if (newblock == 0) |
1285 | return err; | |
1286 | ||
c45653c3 | 1287 | bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); |
aebf0243 | 1288 | if (unlikely(!bh)) |
860d21e2 | 1289 | return -ENOMEM; |
a86c6181 AT |
1290 | lock_buffer(bh); |
1291 | ||
188c299e JK |
1292 | err = ext4_journal_get_create_access(handle, inode->i_sb, bh, |
1293 | EXT4_JTR_NONE); | |
7e028976 | 1294 | if (err) { |
a86c6181 AT |
1295 | unlock_buffer(bh); |
1296 | goto out; | |
1297 | } | |
1298 | ||
592acbf1 | 1299 | ext_size = sizeof(EXT4_I(inode)->i_data); |
a86c6181 | 1300 | /* move top-level index/leaf into new block */ |
592acbf1 SR |
1301 | memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size); |
1302 | /* zero out unused area in the extent block */ | |
1303 | memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); | |
a86c6181 AT |
1304 | |
1305 | /* set size of new block */ | |
1306 | neh = ext_block_hdr(bh); | |
1307 | /* old root could have indexes or leaves | |
1308 | * so calculate e_max right way */ | |
1309 | if (ext_depth(inode)) | |
55ad63bf | 1310 | neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
a86c6181 | 1311 | else |
55ad63bf | 1312 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
a86c6181 | 1313 | neh->eh_magic = EXT4_EXT_MAGIC; |
7ac5990d | 1314 | ext4_extent_block_csum_set(inode, neh); |
a86c6181 | 1315 | set_buffer_uptodate(bh); |
0caaefba | 1316 | set_buffer_verified(bh); |
a86c6181 AT |
1317 | unlock_buffer(bh); |
1318 | ||
0390131b | 1319 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 1320 | if (err) |
a86c6181 AT |
1321 | goto out; |
1322 | ||
1939dd84 | 1323 | /* Update top-level index: num,max,pointer */ |
a86c6181 | 1324 | neh = ext_inode_hdr(inode); |
1939dd84 DM |
1325 | neh->eh_entries = cpu_to_le16(1); |
1326 | ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); | |
1327 | if (neh->eh_depth == 0) { | |
1328 | /* Root extent block becomes index block */ | |
1329 | neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); | |
1330 | EXT_FIRST_INDEX(neh)->ei_block = | |
1331 | EXT_FIRST_EXTENT(neh)->ee_block; | |
1332 | } | |
70aa1554 | 1333 | ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n", |
a86c6181 | 1334 | le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), |
5a0790c2 | 1335 | le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), |
bf89d16f | 1336 | ext4_idx_pblock(EXT_FIRST_INDEX(neh))); |
a86c6181 | 1337 | |
ba39ebb6 | 1338 | le16_add_cpu(&neh->eh_depth, 1); |
4209ae12 | 1339 | err = ext4_mark_inode_dirty(handle, inode); |
a86c6181 AT |
1340 | out: |
1341 | brelse(bh); | |
1342 | ||
1343 | return err; | |
1344 | } | |
1345 | ||
1346 | /* | |
d0d856e8 RD |
1347 | * ext4_ext_create_new_leaf: |
1348 | * finds empty index and adds new leaf. | |
1349 | * if no free index is found, then it requests in-depth growing. | |
a86c6181 AT |
1350 | */ |
1351 | static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, | |
107a7bd3 TT |
1352 | unsigned int mb_flags, |
1353 | unsigned int gb_flags, | |
dfe50809 | 1354 | struct ext4_ext_path **ppath, |
55f020db | 1355 | struct ext4_extent *newext) |
a86c6181 | 1356 | { |
dfe50809 | 1357 | struct ext4_ext_path *path = *ppath; |
a86c6181 AT |
1358 | struct ext4_ext_path *curp; |
1359 | int depth, i, err = 0; | |
1360 | ||
1361 | repeat: | |
1362 | i = depth = ext_depth(inode); | |
1363 | ||
1364 | /* walk up to the tree and look for free index entry */ | |
1365 | curp = path + depth; | |
1366 | while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { | |
1367 | i--; | |
1368 | curp--; | |
1369 | } | |
1370 | ||
d0d856e8 RD |
1371 | /* we use already allocated block for index block, |
1372 | * so subsequent data blocks should be contiguous */ | |
a86c6181 AT |
1373 | if (EXT_HAS_FREE_INDEX(curp)) { |
1374 | /* if we found index with free entry, then use that | |
1375 | * entry: create all needed subtree and add new leaf */ | |
107a7bd3 | 1376 | err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); |
787e0981 SF |
1377 | if (err) |
1378 | goto out; | |
a86c6181 AT |
1379 | |
1380 | /* refill path */ | |
ed8a1a76 | 1381 | path = ext4_find_extent(inode, |
725d26d3 | 1382 | (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
dfe50809 | 1383 | ppath, gb_flags); |
a86c6181 AT |
1384 | if (IS_ERR(path)) |
1385 | err = PTR_ERR(path); | |
1386 | } else { | |
1387 | /* tree is full, time to grow in depth */ | |
be5cd90d | 1388 | err = ext4_ext_grow_indepth(handle, inode, mb_flags); |
a86c6181 AT |
1389 | if (err) |
1390 | goto out; | |
1391 | ||
1392 | /* refill path */ | |
ed8a1a76 | 1393 | path = ext4_find_extent(inode, |
725d26d3 | 1394 | (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
dfe50809 | 1395 | ppath, gb_flags); |
a86c6181 AT |
1396 | if (IS_ERR(path)) { |
1397 | err = PTR_ERR(path); | |
1398 | goto out; | |
1399 | } | |
1400 | ||
1401 | /* | |
d0d856e8 RD |
1402 | * only first (depth 0 -> 1) produces free space; |
1403 | * in all other cases we have to split the grown tree | |
a86c6181 AT |
1404 | */ |
1405 | depth = ext_depth(inode); | |
1406 | if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { | |
d0d856e8 | 1407 | /* now we need to split */ |
a86c6181 AT |
1408 | goto repeat; |
1409 | } | |
1410 | } | |
1411 | ||
1412 | out: | |
1413 | return err; | |
1414 | } | |
1415 | ||
1988b51e AT |
1416 | /* |
1417 | * search the closest allocated block to the left for *logical | |
1418 | * and returns it at @logical + it's physical address at @phys | |
1419 | * if *logical is the smallest allocated block, the function | |
1420 | * returns 0 at @phys | |
1421 | * return value contains 0 (success) or error code | |
1422 | */ | |
1f109d5a TT |
1423 | static int ext4_ext_search_left(struct inode *inode, |
1424 | struct ext4_ext_path *path, | |
1425 | ext4_lblk_t *logical, ext4_fsblk_t *phys) | |
1988b51e AT |
1426 | { |
1427 | struct ext4_extent_idx *ix; | |
1428 | struct ext4_extent *ex; | |
b939e376 | 1429 | int depth, ee_len; |
1988b51e | 1430 | |
273df556 FM |
1431 | if (unlikely(path == NULL)) { |
1432 | EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); | |
6a797d27 | 1433 | return -EFSCORRUPTED; |
273df556 | 1434 | } |
1988b51e AT |
1435 | depth = path->p_depth; |
1436 | *phys = 0; | |
1437 | ||
1438 | if (depth == 0 && path->p_ext == NULL) | |
1439 | return 0; | |
1440 | ||
1441 | /* usually extent in the path covers blocks smaller | |
1442 | * then *logical, but it can be that extent is the | |
1443 | * first one in the file */ | |
1444 | ||
1445 | ex = path[depth].p_ext; | |
b939e376 | 1446 | ee_len = ext4_ext_get_actual_len(ex); |
1988b51e | 1447 | if (*logical < le32_to_cpu(ex->ee_block)) { |
273df556 FM |
1448 | if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
1449 | EXT4_ERROR_INODE(inode, | |
1450 | "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", | |
1451 | *logical, le32_to_cpu(ex->ee_block)); | |
6a797d27 | 1452 | return -EFSCORRUPTED; |
273df556 | 1453 | } |
1988b51e AT |
1454 | while (--depth >= 0) { |
1455 | ix = path[depth].p_idx; | |
273df556 FM |
1456 | if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
1457 | EXT4_ERROR_INODE(inode, | |
1458 | "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", | |
6ee3b212 | 1459 | ix != NULL ? le32_to_cpu(ix->ei_block) : 0, |
273df556 | 1460 | EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? |
6ee3b212 | 1461 | le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, |
273df556 | 1462 | depth); |
6a797d27 | 1463 | return -EFSCORRUPTED; |
273df556 | 1464 | } |
1988b51e AT |
1465 | } |
1466 | return 0; | |
1467 | } | |
1468 | ||
273df556 FM |
1469 | if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
1470 | EXT4_ERROR_INODE(inode, | |
1471 | "logical %d < ee_block %d + ee_len %d!", | |
1472 | *logical, le32_to_cpu(ex->ee_block), ee_len); | |
6a797d27 | 1473 | return -EFSCORRUPTED; |
273df556 | 1474 | } |
1988b51e | 1475 | |
b939e376 | 1476 | *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; |
bf89d16f | 1477 | *phys = ext4_ext_pblock(ex) + ee_len - 1; |
1988b51e AT |
1478 | return 0; |
1479 | } | |
1480 | ||
1481 | /* | |
d7dce9e0 | 1482 | * Search the closest allocated block to the right for *logical |
1483 | * and returns it at @logical + it's physical address at @phys. | |
1484 | * If not exists, return 0 and @phys is set to 0. We will return | |
1485 | * 1 which means we found an allocated block and ret_ex is valid. | |
1486 | * Or return a (< 0) error code. | |
1988b51e | 1487 | */ |
1f109d5a TT |
1488 | static int ext4_ext_search_right(struct inode *inode, |
1489 | struct ext4_ext_path *path, | |
4d33b1ef | 1490 | ext4_lblk_t *logical, ext4_fsblk_t *phys, |
d7dce9e0 | 1491 | struct ext4_extent *ret_ex) |
1988b51e AT |
1492 | { |
1493 | struct buffer_head *bh = NULL; | |
1494 | struct ext4_extent_header *eh; | |
1495 | struct ext4_extent_idx *ix; | |
1496 | struct ext4_extent *ex; | |
1497 | ext4_fsblk_t block; | |
395a87bf ES |
1498 | int depth; /* Note, NOT eh_depth; depth from top of tree */ |
1499 | int ee_len; | |
1988b51e | 1500 | |
273df556 FM |
1501 | if (unlikely(path == NULL)) { |
1502 | EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); | |
6a797d27 | 1503 | return -EFSCORRUPTED; |
273df556 | 1504 | } |
1988b51e AT |
1505 | depth = path->p_depth; |
1506 | *phys = 0; | |
1507 | ||
1508 | if (depth == 0 && path->p_ext == NULL) | |
1509 | return 0; | |
1510 | ||
1511 | /* usually extent in the path covers blocks smaller | |
1512 | * then *logical, but it can be that extent is the | |
1513 | * first one in the file */ | |
1514 | ||
1515 | ex = path[depth].p_ext; | |
b939e376 | 1516 | ee_len = ext4_ext_get_actual_len(ex); |
1988b51e | 1517 | if (*logical < le32_to_cpu(ex->ee_block)) { |
273df556 FM |
1518 | if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
1519 | EXT4_ERROR_INODE(inode, | |
1520 | "first_extent(path[%d].p_hdr) != ex", | |
1521 | depth); | |
6a797d27 | 1522 | return -EFSCORRUPTED; |
273df556 | 1523 | } |
1988b51e AT |
1524 | while (--depth >= 0) { |
1525 | ix = path[depth].p_idx; | |
273df556 FM |
1526 | if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
1527 | EXT4_ERROR_INODE(inode, | |
1528 | "ix != EXT_FIRST_INDEX *logical %d!", | |
1529 | *logical); | |
6a797d27 | 1530 | return -EFSCORRUPTED; |
273df556 | 1531 | } |
1988b51e | 1532 | } |
4d33b1ef | 1533 | goto found_extent; |
1988b51e AT |
1534 | } |
1535 | ||
273df556 FM |
1536 | if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
1537 | EXT4_ERROR_INODE(inode, | |
1538 | "logical %d < ee_block %d + ee_len %d!", | |
1539 | *logical, le32_to_cpu(ex->ee_block), ee_len); | |
6a797d27 | 1540 | return -EFSCORRUPTED; |
273df556 | 1541 | } |
1988b51e AT |
1542 | |
1543 | if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { | |
1544 | /* next allocated block in this leaf */ | |
1545 | ex++; | |
4d33b1ef | 1546 | goto found_extent; |
1988b51e AT |
1547 | } |
1548 | ||
1549 | /* go up and search for index to the right */ | |
1550 | while (--depth >= 0) { | |
1551 | ix = path[depth].p_idx; | |
1552 | if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) | |
25f1ee3a | 1553 | goto got_index; |
1988b51e AT |
1554 | } |
1555 | ||
25f1ee3a WF |
1556 | /* we've gone up to the root and found no index to the right */ |
1557 | return 0; | |
1988b51e | 1558 | |
25f1ee3a | 1559 | got_index: |
1988b51e AT |
1560 | /* we've found index to the right, let's |
1561 | * follow it and find the closest allocated | |
1562 | * block to the right */ | |
1563 | ix++; | |
bf89d16f | 1564 | block = ext4_idx_pblock(ix); |
1988b51e | 1565 | while (++depth < path->p_depth) { |
395a87bf | 1566 | /* subtract from p_depth to get proper eh_depth */ |
7d7ea89e | 1567 | bh = read_extent_tree_block(inode, block, |
107a7bd3 | 1568 | path->p_depth - depth, 0); |
7d7ea89e TT |
1569 | if (IS_ERR(bh)) |
1570 | return PTR_ERR(bh); | |
1571 | eh = ext_block_hdr(bh); | |
1988b51e | 1572 | ix = EXT_FIRST_INDEX(eh); |
bf89d16f | 1573 | block = ext4_idx_pblock(ix); |
1988b51e AT |
1574 | put_bh(bh); |
1575 | } | |
1576 | ||
107a7bd3 | 1577 | bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); |
7d7ea89e TT |
1578 | if (IS_ERR(bh)) |
1579 | return PTR_ERR(bh); | |
1988b51e | 1580 | eh = ext_block_hdr(bh); |
1988b51e | 1581 | ex = EXT_FIRST_EXTENT(eh); |
4d33b1ef | 1582 | found_extent: |
1988b51e | 1583 | *logical = le32_to_cpu(ex->ee_block); |
bf89d16f | 1584 | *phys = ext4_ext_pblock(ex); |
d7dce9e0 | 1585 | if (ret_ex) |
1586 | *ret_ex = *ex; | |
4d33b1ef TT |
1587 | if (bh) |
1588 | put_bh(bh); | |
d7dce9e0 | 1589 | return 1; |
1988b51e AT |
1590 | } |
1591 | ||
a86c6181 | 1592 | /* |
d0d856e8 | 1593 | * ext4_ext_next_allocated_block: |
f17722f9 | 1594 | * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. |
d0d856e8 RD |
1595 | * NOTE: it considers block number from index entry as |
1596 | * allocated block. Thus, index entries have to be consistent | |
1597 | * with leaves. | |
a86c6181 | 1598 | */ |
fcf6b1b7 | 1599 | ext4_lblk_t |
a86c6181 AT |
1600 | ext4_ext_next_allocated_block(struct ext4_ext_path *path) |
1601 | { | |
1602 | int depth; | |
1603 | ||
1604 | BUG_ON(path == NULL); | |
1605 | depth = path->p_depth; | |
1606 | ||
1607 | if (depth == 0 && path->p_ext == NULL) | |
f17722f9 | 1608 | return EXT_MAX_BLOCKS; |
a86c6181 AT |
1609 | |
1610 | while (depth >= 0) { | |
6e89bbb7 EB |
1611 | struct ext4_ext_path *p = &path[depth]; |
1612 | ||
a86c6181 AT |
1613 | if (depth == path->p_depth) { |
1614 | /* leaf */ | |
6e89bbb7 EB |
1615 | if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr)) |
1616 | return le32_to_cpu(p->p_ext[1].ee_block); | |
a86c6181 AT |
1617 | } else { |
1618 | /* index */ | |
6e89bbb7 EB |
1619 | if (p->p_idx != EXT_LAST_INDEX(p->p_hdr)) |
1620 | return le32_to_cpu(p->p_idx[1].ei_block); | |
a86c6181 AT |
1621 | } |
1622 | depth--; | |
1623 | } | |
1624 | ||
f17722f9 | 1625 | return EXT_MAX_BLOCKS; |
a86c6181 AT |
1626 | } |
1627 | ||
1628 | /* | |
d0d856e8 | 1629 | * ext4_ext_next_leaf_block: |
f17722f9 | 1630 | * returns first allocated block from next leaf or EXT_MAX_BLOCKS |
a86c6181 | 1631 | */ |
5718789d | 1632 | static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) |
a86c6181 AT |
1633 | { |
1634 | int depth; | |
1635 | ||
1636 | BUG_ON(path == NULL); | |
1637 | depth = path->p_depth; | |
1638 | ||
1639 | /* zero-tree has no leaf blocks at all */ | |
1640 | if (depth == 0) | |
f17722f9 | 1641 | return EXT_MAX_BLOCKS; |
a86c6181 AT |
1642 | |
1643 | /* go to index block */ | |
1644 | depth--; | |
1645 | ||
1646 | while (depth >= 0) { | |
1647 | if (path[depth].p_idx != | |
1648 | EXT_LAST_INDEX(path[depth].p_hdr)) | |
725d26d3 AK |
1649 | return (ext4_lblk_t) |
1650 | le32_to_cpu(path[depth].p_idx[1].ei_block); | |
a86c6181 AT |
1651 | depth--; |
1652 | } | |
1653 | ||
f17722f9 | 1654 | return EXT_MAX_BLOCKS; |
a86c6181 AT |
1655 | } |
1656 | ||
1657 | /* | |
d0d856e8 RD |
1658 | * ext4_ext_correct_indexes: |
1659 | * if leaf gets modified and modified extent is first in the leaf, | |
1660 | * then we have to correct all indexes above. | |
a86c6181 AT |
1661 | * TODO: do we need to correct tree in all cases? |
1662 | */ | |
1d03ec98 | 1663 | static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, |
a86c6181 AT |
1664 | struct ext4_ext_path *path) |
1665 | { | |
1666 | struct ext4_extent_header *eh; | |
1667 | int depth = ext_depth(inode); | |
1668 | struct ext4_extent *ex; | |
1669 | __le32 border; | |
1670 | int k, err = 0; | |
1671 | ||
1672 | eh = path[depth].p_hdr; | |
1673 | ex = path[depth].p_ext; | |
273df556 FM |
1674 | |
1675 | if (unlikely(ex == NULL || eh == NULL)) { | |
1676 | EXT4_ERROR_INODE(inode, | |
1677 | "ex %p == NULL or eh %p == NULL", ex, eh); | |
6a797d27 | 1678 | return -EFSCORRUPTED; |
273df556 | 1679 | } |
a86c6181 AT |
1680 | |
1681 | if (depth == 0) { | |
1682 | /* there is no tree at all */ | |
1683 | return 0; | |
1684 | } | |
1685 | ||
1686 | if (ex != EXT_FIRST_EXTENT(eh)) { | |
1687 | /* we correct tree if first leaf got modified only */ | |
1688 | return 0; | |
1689 | } | |
1690 | ||
1691 | /* | |
d0d856e8 | 1692 | * TODO: we need correction if border is smaller than current one |
a86c6181 AT |
1693 | */ |
1694 | k = depth - 1; | |
1695 | border = path[depth].p_ext->ee_block; | |
7e028976 AM |
1696 | err = ext4_ext_get_access(handle, inode, path + k); |
1697 | if (err) | |
a86c6181 AT |
1698 | return err; |
1699 | path[k].p_idx->ei_block = border; | |
7e028976 AM |
1700 | err = ext4_ext_dirty(handle, inode, path + k); |
1701 | if (err) | |
a86c6181 AT |
1702 | return err; |
1703 | ||
1704 | while (k--) { | |
1705 | /* change all left-side indexes */ | |
1706 | if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) | |
1707 | break; | |
7e028976 AM |
1708 | err = ext4_ext_get_access(handle, inode, path + k); |
1709 | if (err) | |
a86c6181 AT |
1710 | break; |
1711 | path[k].p_idx->ei_block = border; | |
7e028976 AM |
1712 | err = ext4_ext_dirty(handle, inode, path + k); |
1713 | if (err) | |
a86c6181 AT |
1714 | break; |
1715 | } | |
1716 | ||
1717 | return err; | |
1718 | } | |
1719 | ||
43f81677 EB |
1720 | static int ext4_can_extents_be_merged(struct inode *inode, |
1721 | struct ext4_extent *ex1, | |
1722 | struct ext4_extent *ex2) | |
a86c6181 | 1723 | { |
da0169b3 | 1724 | unsigned short ext1_ee_len, ext2_ee_len; |
a2df2a63 | 1725 | |
556615dc | 1726 | if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) |
a2df2a63 AA |
1727 | return 0; |
1728 | ||
1729 | ext1_ee_len = ext4_ext_get_actual_len(ex1); | |
1730 | ext2_ee_len = ext4_ext_get_actual_len(ex2); | |
1731 | ||
1732 | if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != | |
63f57933 | 1733 | le32_to_cpu(ex2->ee_block)) |
a86c6181 AT |
1734 | return 0; |
1735 | ||
da0169b3 | 1736 | if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) |
471d4011 | 1737 | return 0; |
378f32ba | 1738 | |
556615dc | 1739 | if (ext4_ext_is_unwritten(ex1) && |
378f32ba | 1740 | ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN) |
a9b82415 | 1741 | return 0; |
bbf2f9fb | 1742 | #ifdef AGGRESSIVE_TEST |
b939e376 | 1743 | if (ext1_ee_len >= 4) |
a86c6181 AT |
1744 | return 0; |
1745 | #endif | |
1746 | ||
bf89d16f | 1747 | if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) |
a86c6181 AT |
1748 | return 1; |
1749 | return 0; | |
1750 | } | |
1751 | ||
56055d3a AA |
1752 | /* |
1753 | * This function tries to merge the "ex" extent to the next extent in the tree. | |
1754 | * It always tries to merge towards right. If you want to merge towards | |
1755 | * left, pass "ex - 1" as argument instead of "ex". | |
1756 | * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns | |
1757 | * 1 if they got merged. | |
1758 | */ | |
197217a5 | 1759 | static int ext4_ext_try_to_merge_right(struct inode *inode, |
1f109d5a TT |
1760 | struct ext4_ext_path *path, |
1761 | struct ext4_extent *ex) | |
56055d3a AA |
1762 | { |
1763 | struct ext4_extent_header *eh; | |
1764 | unsigned int depth, len; | |
556615dc | 1765 | int merge_done = 0, unwritten; |
56055d3a AA |
1766 | |
1767 | depth = ext_depth(inode); | |
1768 | BUG_ON(path[depth].p_hdr == NULL); | |
1769 | eh = path[depth].p_hdr; | |
1770 | ||
1771 | while (ex < EXT_LAST_EXTENT(eh)) { | |
1772 | if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) | |
1773 | break; | |
1774 | /* merge with next extent! */ | |
556615dc | 1775 | unwritten = ext4_ext_is_unwritten(ex); |
56055d3a AA |
1776 | ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) |
1777 | + ext4_ext_get_actual_len(ex + 1)); | |
556615dc LC |
1778 | if (unwritten) |
1779 | ext4_ext_mark_unwritten(ex); | |
56055d3a AA |
1780 | |
1781 | if (ex + 1 < EXT_LAST_EXTENT(eh)) { | |
1782 | len = (EXT_LAST_EXTENT(eh) - ex - 1) | |
1783 | * sizeof(struct ext4_extent); | |
1784 | memmove(ex + 1, ex + 2, len); | |
1785 | } | |
e8546d06 | 1786 | le16_add_cpu(&eh->eh_entries, -1); |
56055d3a AA |
1787 | merge_done = 1; |
1788 | WARN_ON(eh->eh_entries == 0); | |
1789 | if (!eh->eh_entries) | |
24676da4 | 1790 | EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); |
56055d3a AA |
1791 | } |
1792 | ||
1793 | return merge_done; | |
1794 | } | |
1795 | ||
ecb94f5f TT |
1796 | /* |
1797 | * This function does a very simple check to see if we can collapse | |
1798 | * an extent tree with a single extent tree leaf block into the inode. | |
1799 | */ | |
1800 | static void ext4_ext_try_to_merge_up(handle_t *handle, | |
1801 | struct inode *inode, | |
1802 | struct ext4_ext_path *path) | |
1803 | { | |
1804 | size_t s; | |
1805 | unsigned max_root = ext4_ext_space_root(inode, 0); | |
1806 | ext4_fsblk_t blk; | |
1807 | ||
1808 | if ((path[0].p_depth != 1) || | |
1809 | (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || | |
1810 | (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) | |
1811 | return; | |
1812 | ||
1813 | /* | |
1814 | * We need to modify the block allocation bitmap and the block | |
1815 | * group descriptor to release the extent tree block. If we | |
1816 | * can't get the journal credits, give up. | |
1817 | */ | |
83448bdf JK |
1818 | if (ext4_journal_extend(handle, 2, |
1819 | ext4_free_metadata_revoke_credits(inode->i_sb, 1))) | |
ecb94f5f TT |
1820 | return; |
1821 | ||
1822 | /* | |
1823 | * Copy the extent data up to the inode | |
1824 | */ | |
1825 | blk = ext4_idx_pblock(path[0].p_idx); | |
1826 | s = le16_to_cpu(path[1].p_hdr->eh_entries) * | |
1827 | sizeof(struct ext4_extent_idx); | |
1828 | s += sizeof(struct ext4_extent_header); | |
1829 | ||
10809df8 | 1830 | path[1].p_maxdepth = path[0].p_maxdepth; |
ecb94f5f TT |
1831 | memcpy(path[0].p_hdr, path[1].p_hdr, s); |
1832 | path[0].p_depth = 0; | |
1833 | path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + | |
1834 | (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); | |
1835 | path[0].p_hdr->eh_max = cpu_to_le16(max_root); | |
1836 | ||
1837 | brelse(path[1].p_bh); | |
1838 | ext4_free_blocks(handle, inode, NULL, blk, 1, | |
71d4f7d0 | 1839 | EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
ecb94f5f TT |
1840 | } |
1841 | ||
197217a5 | 1842 | /* |
adde81cf EB |
1843 | * This function tries to merge the @ex extent to neighbours in the tree, then |
1844 | * tries to collapse the extent tree into the inode. | |
197217a5 | 1845 | */ |
ecb94f5f TT |
1846 | static void ext4_ext_try_to_merge(handle_t *handle, |
1847 | struct inode *inode, | |
197217a5 | 1848 | struct ext4_ext_path *path, |
adde81cf EB |
1849 | struct ext4_extent *ex) |
1850 | { | |
197217a5 YY |
1851 | struct ext4_extent_header *eh; |
1852 | unsigned int depth; | |
1853 | int merge_done = 0; | |
197217a5 YY |
1854 | |
1855 | depth = ext_depth(inode); | |
1856 | BUG_ON(path[depth].p_hdr == NULL); | |
1857 | eh = path[depth].p_hdr; | |
1858 | ||
1859 | if (ex > EXT_FIRST_EXTENT(eh)) | |
1860 | merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); | |
1861 | ||
1862 | if (!merge_done) | |
ecb94f5f | 1863 | (void) ext4_ext_try_to_merge_right(inode, path, ex); |
197217a5 | 1864 | |
ecb94f5f | 1865 | ext4_ext_try_to_merge_up(handle, inode, path); |
197217a5 YY |
1866 | } |
1867 | ||
25d14f98 AA |
1868 | /* |
1869 | * check if a portion of the "newext" extent overlaps with an | |
1870 | * existing extent. | |
1871 | * | |
1872 | * If there is an overlap discovered, it updates the length of the newext | |
1873 | * such that there will be no overlap, and then returns 1. | |
1874 | * If there is no overlap found, it returns 0. | |
1875 | */ | |
4d33b1ef TT |
1876 | static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, |
1877 | struct inode *inode, | |
1f109d5a TT |
1878 | struct ext4_extent *newext, |
1879 | struct ext4_ext_path *path) | |
25d14f98 | 1880 | { |
725d26d3 | 1881 | ext4_lblk_t b1, b2; |
25d14f98 AA |
1882 | unsigned int depth, len1; |
1883 | unsigned int ret = 0; | |
1884 | ||
1885 | b1 = le32_to_cpu(newext->ee_block); | |
a2df2a63 | 1886 | len1 = ext4_ext_get_actual_len(newext); |
25d14f98 AA |
1887 | depth = ext_depth(inode); |
1888 | if (!path[depth].p_ext) | |
1889 | goto out; | |
f5a44db5 | 1890 | b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); |
25d14f98 AA |
1891 | |
1892 | /* | |
1893 | * get the next allocated block if the extent in the path | |
2b2d6d01 | 1894 | * is before the requested block(s) |
25d14f98 AA |
1895 | */ |
1896 | if (b2 < b1) { | |
1897 | b2 = ext4_ext_next_allocated_block(path); | |
f17722f9 | 1898 | if (b2 == EXT_MAX_BLOCKS) |
25d14f98 | 1899 | goto out; |
f5a44db5 | 1900 | b2 = EXT4_LBLK_CMASK(sbi, b2); |
25d14f98 AA |
1901 | } |
1902 | ||
725d26d3 | 1903 | /* check for wrap through zero on extent logical start block*/ |
25d14f98 | 1904 | if (b1 + len1 < b1) { |
f17722f9 | 1905 | len1 = EXT_MAX_BLOCKS - b1; |
25d14f98 AA |
1906 | newext->ee_len = cpu_to_le16(len1); |
1907 | ret = 1; | |
1908 | } | |
1909 | ||
1910 | /* check for overlap */ | |
1911 | if (b1 + len1 > b2) { | |
1912 | newext->ee_len = cpu_to_le16(b2 - b1); | |
1913 | ret = 1; | |
1914 | } | |
1915 | out: | |
1916 | return ret; | |
1917 | } | |
1918 | ||
a86c6181 | 1919 | /* |
d0d856e8 | 1920 | * ext4_ext_insert_extent: |
e4d7f2d3 | 1921 | * tries to merge requested extent into the existing extent or |
d0d856e8 RD |
1922 | * inserts requested extent as new one into the tree, |
1923 | * creating new leaf in the no-space case. | |
a86c6181 AT |
1924 | */ |
1925 | int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, | |
dfe50809 | 1926 | struct ext4_ext_path **ppath, |
107a7bd3 | 1927 | struct ext4_extent *newext, int gb_flags) |
a86c6181 | 1928 | { |
dfe50809 | 1929 | struct ext4_ext_path *path = *ppath; |
af5bc92d | 1930 | struct ext4_extent_header *eh; |
a86c6181 AT |
1931 | struct ext4_extent *ex, *fex; |
1932 | struct ext4_extent *nearex; /* nearest extent */ | |
1933 | struct ext4_ext_path *npath = NULL; | |
725d26d3 AK |
1934 | int depth, len, err; |
1935 | ext4_lblk_t next; | |
556615dc | 1936 | int mb_flags = 0, unwritten; |
a86c6181 | 1937 | |
e3cf5d5d TT |
1938 | if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
1939 | mb_flags |= EXT4_MB_DELALLOC_RESERVED; | |
273df556 FM |
1940 | if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { |
1941 | EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); | |
6a797d27 | 1942 | return -EFSCORRUPTED; |
273df556 | 1943 | } |
a86c6181 AT |
1944 | depth = ext_depth(inode); |
1945 | ex = path[depth].p_ext; | |
be8981be | 1946 | eh = path[depth].p_hdr; |
273df556 FM |
1947 | if (unlikely(path[depth].p_hdr == NULL)) { |
1948 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | |
6a797d27 | 1949 | return -EFSCORRUPTED; |
273df556 | 1950 | } |
a86c6181 AT |
1951 | |
1952 | /* try to insert block into found extent and return */ | |
107a7bd3 | 1953 | if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { |
a2df2a63 AA |
1954 | |
1955 | /* | |
be8981be LC |
1956 | * Try to see whether we should rather test the extent on |
1957 | * right from ex, or from the left of ex. This is because | |
ed8a1a76 | 1958 | * ext4_find_extent() can return either extent on the |
be8981be LC |
1959 | * left, or on the right from the searched position. This |
1960 | * will make merging more effective. | |
a2df2a63 | 1961 | */ |
be8981be LC |
1962 | if (ex < EXT_LAST_EXTENT(eh) && |
1963 | (le32_to_cpu(ex->ee_block) + | |
1964 | ext4_ext_get_actual_len(ex) < | |
1965 | le32_to_cpu(newext->ee_block))) { | |
1966 | ex += 1; | |
1967 | goto prepend; | |
1968 | } else if ((ex > EXT_FIRST_EXTENT(eh)) && | |
1969 | (le32_to_cpu(newext->ee_block) + | |
1970 | ext4_ext_get_actual_len(newext) < | |
1971 | le32_to_cpu(ex->ee_block))) | |
1972 | ex -= 1; | |
1973 | ||
1974 | /* Try to append newex to the ex */ | |
1975 | if (ext4_can_extents_be_merged(inode, ex, newext)) { | |
70aa1554 | 1976 | ext_debug(inode, "append [%d]%d block to %u:[%d]%d" |
be8981be | 1977 | "(from %llu)\n", |
556615dc | 1978 | ext4_ext_is_unwritten(newext), |
be8981be LC |
1979 | ext4_ext_get_actual_len(newext), |
1980 | le32_to_cpu(ex->ee_block), | |
556615dc | 1981 | ext4_ext_is_unwritten(ex), |
be8981be LC |
1982 | ext4_ext_get_actual_len(ex), |
1983 | ext4_ext_pblock(ex)); | |
1984 | err = ext4_ext_get_access(handle, inode, | |
1985 | path + depth); | |
1986 | if (err) | |
1987 | return err; | |
556615dc | 1988 | unwritten = ext4_ext_is_unwritten(ex); |
be8981be | 1989 | ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) |
a2df2a63 | 1990 | + ext4_ext_get_actual_len(newext)); |
556615dc LC |
1991 | if (unwritten) |
1992 | ext4_ext_mark_unwritten(ex); | |
be8981be LC |
1993 | eh = path[depth].p_hdr; |
1994 | nearex = ex; | |
1995 | goto merge; | |
1996 | } | |
1997 | ||
1998 | prepend: | |
1999 | /* Try to prepend newex to the ex */ | |
2000 | if (ext4_can_extents_be_merged(inode, newext, ex)) { | |
70aa1554 | 2001 | ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d" |
be8981be LC |
2002 | "(from %llu)\n", |
2003 | le32_to_cpu(newext->ee_block), | |
556615dc | 2004 | ext4_ext_is_unwritten(newext), |
be8981be LC |
2005 | ext4_ext_get_actual_len(newext), |
2006 | le32_to_cpu(ex->ee_block), | |
556615dc | 2007 | ext4_ext_is_unwritten(ex), |
be8981be LC |
2008 | ext4_ext_get_actual_len(ex), |
2009 | ext4_ext_pblock(ex)); | |
2010 | err = ext4_ext_get_access(handle, inode, | |
2011 | path + depth); | |
2012 | if (err) | |
2013 | return err; | |
2014 | ||
556615dc | 2015 | unwritten = ext4_ext_is_unwritten(ex); |
be8981be LC |
2016 | ex->ee_block = newext->ee_block; |
2017 | ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); | |
2018 | ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) | |
2019 | + ext4_ext_get_actual_len(newext)); | |
556615dc LC |
2020 | if (unwritten) |
2021 | ext4_ext_mark_unwritten(ex); | |
be8981be LC |
2022 | eh = path[depth].p_hdr; |
2023 | nearex = ex; | |
2024 | goto merge; | |
2025 | } | |
a86c6181 AT |
2026 | } |
2027 | ||
a86c6181 AT |
2028 | depth = ext_depth(inode); |
2029 | eh = path[depth].p_hdr; | |
2030 | if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) | |
2031 | goto has_space; | |
2032 | ||
2033 | /* probably next leaf has space for us? */ | |
2034 | fex = EXT_LAST_EXTENT(eh); | |
598dbdf2 RD |
2035 | next = EXT_MAX_BLOCKS; |
2036 | if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) | |
5718789d | 2037 | next = ext4_ext_next_leaf_block(path); |
598dbdf2 | 2038 | if (next != EXT_MAX_BLOCKS) { |
70aa1554 | 2039 | ext_debug(inode, "next leaf block - %u\n", next); |
a86c6181 | 2040 | BUG_ON(npath != NULL); |
73c384c0 | 2041 | npath = ext4_find_extent(inode, next, NULL, gb_flags); |
a86c6181 AT |
2042 | if (IS_ERR(npath)) |
2043 | return PTR_ERR(npath); | |
2044 | BUG_ON(npath->p_depth != path->p_depth); | |
2045 | eh = npath[depth].p_hdr; | |
2046 | if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { | |
70aa1554 | 2047 | ext_debug(inode, "next leaf isn't full(%d)\n", |
a86c6181 AT |
2048 | le16_to_cpu(eh->eh_entries)); |
2049 | path = npath; | |
ffb505ff | 2050 | goto has_space; |
a86c6181 | 2051 | } |
70aa1554 | 2052 | ext_debug(inode, "next leaf has no free space(%d,%d)\n", |
a86c6181 AT |
2053 | le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); |
2054 | } | |
2055 | ||
2056 | /* | |
d0d856e8 RD |
2057 | * There is no free space in the found leaf. |
2058 | * We're gonna add a new leaf in the tree. | |
a86c6181 | 2059 | */ |
107a7bd3 | 2060 | if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) |
e3cf5d5d | 2061 | mb_flags |= EXT4_MB_USE_RESERVED; |
107a7bd3 | 2062 | err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, |
dfe50809 | 2063 | ppath, newext); |
a86c6181 AT |
2064 | if (err) |
2065 | goto cleanup; | |
2066 | depth = ext_depth(inode); | |
2067 | eh = path[depth].p_hdr; | |
2068 | ||
2069 | has_space: | |
2070 | nearex = path[depth].p_ext; | |
2071 | ||
7e028976 AM |
2072 | err = ext4_ext_get_access(handle, inode, path + depth); |
2073 | if (err) | |
a86c6181 AT |
2074 | goto cleanup; |
2075 | ||
2076 | if (!nearex) { | |
2077 | /* there is no extent in this leaf, create first one */ | |
70aa1554 | 2078 | ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n", |
8c55e204 | 2079 | le32_to_cpu(newext->ee_block), |
bf89d16f | 2080 | ext4_ext_pblock(newext), |
556615dc | 2081 | ext4_ext_is_unwritten(newext), |
a2df2a63 | 2082 | ext4_ext_get_actual_len(newext)); |
80e675f9 EG |
2083 | nearex = EXT_FIRST_EXTENT(eh); |
2084 | } else { | |
2085 | if (le32_to_cpu(newext->ee_block) | |
8c55e204 | 2086 | > le32_to_cpu(nearex->ee_block)) { |
80e675f9 | 2087 | /* Insert after */ |
70aa1554 | 2088 | ext_debug(inode, "insert %u:%llu:[%d]%d before: " |
32de6756 | 2089 | "nearest %p\n", |
80e675f9 EG |
2090 | le32_to_cpu(newext->ee_block), |
2091 | ext4_ext_pblock(newext), | |
556615dc | 2092 | ext4_ext_is_unwritten(newext), |
80e675f9 EG |
2093 | ext4_ext_get_actual_len(newext), |
2094 | nearex); | |
2095 | nearex++; | |
2096 | } else { | |
2097 | /* Insert before */ | |
2098 | BUG_ON(newext->ee_block == nearex->ee_block); | |
70aa1554 | 2099 | ext_debug(inode, "insert %u:%llu:[%d]%d after: " |
32de6756 | 2100 | "nearest %p\n", |
8c55e204 | 2101 | le32_to_cpu(newext->ee_block), |
bf89d16f | 2102 | ext4_ext_pblock(newext), |
556615dc | 2103 | ext4_ext_is_unwritten(newext), |
a2df2a63 | 2104 | ext4_ext_get_actual_len(newext), |
80e675f9 EG |
2105 | nearex); |
2106 | } | |
2107 | len = EXT_LAST_EXTENT(eh) - nearex + 1; | |
2108 | if (len > 0) { | |
70aa1554 | 2109 | ext_debug(inode, "insert %u:%llu:[%d]%d: " |
80e675f9 EG |
2110 | "move %d extents from 0x%p to 0x%p\n", |
2111 | le32_to_cpu(newext->ee_block), | |
2112 | ext4_ext_pblock(newext), | |
556615dc | 2113 | ext4_ext_is_unwritten(newext), |
80e675f9 EG |
2114 | ext4_ext_get_actual_len(newext), |
2115 | len, nearex, nearex + 1); | |
2116 | memmove(nearex + 1, nearex, | |
2117 | len * sizeof(struct ext4_extent)); | |
a86c6181 | 2118 | } |
a86c6181 AT |
2119 | } |
2120 | ||
e8546d06 | 2121 | le16_add_cpu(&eh->eh_entries, 1); |
80e675f9 | 2122 | path[depth].p_ext = nearex; |
a86c6181 | 2123 | nearex->ee_block = newext->ee_block; |
bf89d16f | 2124 | ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); |
a86c6181 | 2125 | nearex->ee_len = newext->ee_len; |
a86c6181 AT |
2126 | |
2127 | merge: | |
e7bcf823 | 2128 | /* try to merge extents */ |
107a7bd3 | 2129 | if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) |
ecb94f5f | 2130 | ext4_ext_try_to_merge(handle, inode, path, nearex); |
a86c6181 | 2131 | |
a86c6181 AT |
2132 | |
2133 | /* time to correct all indexes above */ | |
2134 | err = ext4_ext_correct_indexes(handle, inode, path); | |
2135 | if (err) | |
2136 | goto cleanup; | |
2137 | ||
ecb94f5f | 2138 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
a86c6181 AT |
2139 | |
2140 | cleanup: | |
b7ea89ad TT |
2141 | ext4_ext_drop_refs(npath); |
2142 | kfree(npath); | |
a86c6181 AT |
2143 | return err; |
2144 | } | |
2145 | ||
bb5835ed TT |
2146 | static int ext4_fill_es_cache_info(struct inode *inode, |
2147 | ext4_lblk_t block, ext4_lblk_t num, | |
2148 | struct fiemap_extent_info *fieinfo) | |
2149 | { | |
2150 | ext4_lblk_t next, end = block + num - 1; | |
2151 | struct extent_status es; | |
2152 | unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; | |
2153 | unsigned int flags; | |
2154 | int err; | |
2155 | ||
2156 | while (block <= end) { | |
2157 | next = 0; | |
2158 | flags = 0; | |
2159 | if (!ext4_es_lookup_extent(inode, block, &next, &es)) | |
2160 | break; | |
2161 | if (ext4_es_is_unwritten(&es)) | |
2162 | flags |= FIEMAP_EXTENT_UNWRITTEN; | |
2163 | if (ext4_es_is_delayed(&es)) | |
2164 | flags |= (FIEMAP_EXTENT_DELALLOC | | |
2165 | FIEMAP_EXTENT_UNKNOWN); | |
2166 | if (ext4_es_is_hole(&es)) | |
2167 | flags |= EXT4_FIEMAP_EXTENT_HOLE; | |
2168 | if (next == 0) | |
2169 | flags |= FIEMAP_EXTENT_LAST; | |
2170 | if (flags & (FIEMAP_EXTENT_DELALLOC| | |
2171 | EXT4_FIEMAP_EXTENT_HOLE)) | |
2172 | es.es_pblk = 0; | |
2173 | else | |
2174 | es.es_pblk = ext4_es_pblock(&es); | |
2175 | err = fiemap_fill_next_extent(fieinfo, | |
2176 | (__u64)es.es_lblk << blksize_bits, | |
2177 | (__u64)es.es_pblk << blksize_bits, | |
2178 | (__u64)es.es_len << blksize_bits, | |
2179 | flags); | |
2180 | if (next == 0) | |
2181 | break; | |
2182 | block = next; | |
2183 | if (err < 0) | |
2184 | return err; | |
2185 | if (err == 1) | |
2186 | return 0; | |
2187 | } | |
2188 | return 0; | |
2189 | } | |
2190 | ||
2191 | ||
a86c6181 | 2192 | /* |
140a5250 JK |
2193 | * ext4_ext_determine_hole - determine hole around given block |
2194 | * @inode: inode we lookup in | |
2195 | * @path: path in extent tree to @lblk | |
2196 | * @lblk: pointer to logical block around which we want to determine hole | |
2197 | * | |
2198 | * Determine hole length (and start if easily possible) around given logical | |
2199 | * block. We don't try too hard to find the beginning of the hole but @path | |
2200 | * actually points to extent before @lblk, we provide it. | |
2201 | * | |
2202 | * The function returns the length of a hole starting at @lblk. We update @lblk | |
2203 | * to the beginning of the hole if we managed to find it. | |
a86c6181 | 2204 | */ |
140a5250 JK |
2205 | static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode, |
2206 | struct ext4_ext_path *path, | |
2207 | ext4_lblk_t *lblk) | |
a86c6181 AT |
2208 | { |
2209 | int depth = ext_depth(inode); | |
a86c6181 | 2210 | struct ext4_extent *ex; |
140a5250 | 2211 | ext4_lblk_t len; |
a86c6181 AT |
2212 | |
2213 | ex = path[depth].p_ext; | |
2214 | if (ex == NULL) { | |
2f8e0a7c | 2215 | /* there is no extent yet, so gap is [0;-] */ |
140a5250 | 2216 | *lblk = 0; |
2f8e0a7c | 2217 | len = EXT_MAX_BLOCKS; |
140a5250 JK |
2218 | } else if (*lblk < le32_to_cpu(ex->ee_block)) { |
2219 | len = le32_to_cpu(ex->ee_block) - *lblk; | |
2220 | } else if (*lblk >= le32_to_cpu(ex->ee_block) | |
a2df2a63 | 2221 | + ext4_ext_get_actual_len(ex)) { |
725d26d3 | 2222 | ext4_lblk_t next; |
725d26d3 | 2223 | |
140a5250 | 2224 | *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); |
725d26d3 | 2225 | next = ext4_ext_next_allocated_block(path); |
140a5250 JK |
2226 | BUG_ON(next == *lblk); |
2227 | len = next - *lblk; | |
a86c6181 | 2228 | } else { |
a86c6181 AT |
2229 | BUG(); |
2230 | } | |
140a5250 JK |
2231 | return len; |
2232 | } | |
a86c6181 | 2233 | |
140a5250 JK |
2234 | /* |
2235 | * ext4_ext_put_gap_in_cache: | |
2236 | * calculate boundaries of the gap that the requested block fits into | |
2237 | * and cache this gap | |
2238 | */ | |
2239 | static void | |
2240 | ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start, | |
2241 | ext4_lblk_t hole_len) | |
2242 | { | |
2243 | struct extent_status es; | |
2244 | ||
ad431025 EW |
2245 | ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start, |
2246 | hole_start + hole_len - 1, &es); | |
2f8e0a7c ZL |
2247 | if (es.es_len) { |
2248 | /* There's delayed extent containing lblock? */ | |
140a5250 | 2249 | if (es.es_lblk <= hole_start) |
2f8e0a7c | 2250 | return; |
140a5250 | 2251 | hole_len = min(es.es_lblk - hole_start, hole_len); |
2f8e0a7c | 2252 | } |
70aa1554 | 2253 | ext_debug(inode, " -> %u:%u\n", hole_start, hole_len); |
140a5250 JK |
2254 | ext4_es_insert_extent(inode, hole_start, hole_len, ~0, |
2255 | EXTENT_STATUS_HOLE); | |
a86c6181 AT |
2256 | } |
2257 | ||
2258 | /* | |
d0d856e8 RD |
2259 | * ext4_ext_rm_idx: |
2260 | * removes index from the index block. | |
a86c6181 | 2261 | */ |
1d03ec98 | 2262 | static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, |
c36575e6 | 2263 | struct ext4_ext_path *path, int depth) |
a86c6181 | 2264 | { |
a86c6181 | 2265 | int err; |
f65e6fba | 2266 | ext4_fsblk_t leaf; |
a86c6181 AT |
2267 | |
2268 | /* free index block */ | |
c36575e6 FL |
2269 | depth--; |
2270 | path = path + depth; | |
bf89d16f | 2271 | leaf = ext4_idx_pblock(path->p_idx); |
273df556 FM |
2272 | if (unlikely(path->p_hdr->eh_entries == 0)) { |
2273 | EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); | |
6a797d27 | 2274 | return -EFSCORRUPTED; |
273df556 | 2275 | } |
7e028976 AM |
2276 | err = ext4_ext_get_access(handle, inode, path); |
2277 | if (err) | |
a86c6181 | 2278 | return err; |
0e1147b0 RD |
2279 | |
2280 | if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { | |
2281 | int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; | |
2282 | len *= sizeof(struct ext4_extent_idx); | |
2283 | memmove(path->p_idx, path->p_idx + 1, len); | |
2284 | } | |
2285 | ||
e8546d06 | 2286 | le16_add_cpu(&path->p_hdr->eh_entries, -1); |
7e028976 AM |
2287 | err = ext4_ext_dirty(handle, inode, path); |
2288 | if (err) | |
a86c6181 | 2289 | return err; |
70aa1554 | 2290 | ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf); |
d8990240 AK |
2291 | trace_ext4_ext_rm_idx(inode, leaf); |
2292 | ||
7dc57615 | 2293 | ext4_free_blocks(handle, inode, NULL, leaf, 1, |
e6362609 | 2294 | EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
c36575e6 FL |
2295 | |
2296 | while (--depth >= 0) { | |
2297 | if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) | |
2298 | break; | |
2299 | path--; | |
2300 | err = ext4_ext_get_access(handle, inode, path); | |
2301 | if (err) | |
2302 | break; | |
2303 | path->p_idx->ei_block = (path+1)->p_idx->ei_block; | |
2304 | err = ext4_ext_dirty(handle, inode, path); | |
2305 | if (err) | |
2306 | break; | |
2307 | } | |
a86c6181 AT |
2308 | return err; |
2309 | } | |
2310 | ||
2311 | /* | |
ee12b630 MC |
2312 | * ext4_ext_calc_credits_for_single_extent: |
2313 | * This routine returns max. credits that needed to insert an extent | |
2314 | * to the extent tree. | |
2315 | * When pass the actual path, the caller should calculate credits | |
2316 | * under i_data_sem. | |
a86c6181 | 2317 | */ |
525f4ed8 | 2318 | int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, |
a86c6181 AT |
2319 | struct ext4_ext_path *path) |
2320 | { | |
a86c6181 | 2321 | if (path) { |
ee12b630 | 2322 | int depth = ext_depth(inode); |
f3bd1f3f | 2323 | int ret = 0; |
ee12b630 | 2324 | |
a86c6181 | 2325 | /* probably there is space in leaf? */ |
a86c6181 | 2326 | if (le16_to_cpu(path[depth].p_hdr->eh_entries) |
ee12b630 | 2327 | < le16_to_cpu(path[depth].p_hdr->eh_max)) { |
a86c6181 | 2328 | |
ee12b630 MC |
2329 | /* |
2330 | * There are some space in the leaf tree, no | |
2331 | * need to account for leaf block credit | |
2332 | * | |
2333 | * bitmaps and block group descriptor blocks | |
df3ab170 | 2334 | * and other metadata blocks still need to be |
ee12b630 MC |
2335 | * accounted. |
2336 | */ | |
525f4ed8 | 2337 | /* 1 bitmap, 1 block group descriptor */ |
ee12b630 | 2338 | ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); |
5887e98b | 2339 | return ret; |
ee12b630 MC |
2340 | } |
2341 | } | |
a86c6181 | 2342 | |
525f4ed8 | 2343 | return ext4_chunk_trans_blocks(inode, nrblocks); |
ee12b630 | 2344 | } |
a86c6181 | 2345 | |
ee12b630 | 2346 | /* |
fffb2739 | 2347 | * How many index/leaf blocks need to change/allocate to add @extents extents? |
ee12b630 | 2348 | * |
fffb2739 JK |
2349 | * If we add a single extent, then in the worse case, each tree level |
2350 | * index/leaf need to be changed in case of the tree split. | |
ee12b630 | 2351 | * |
fffb2739 JK |
2352 | * If more extents are inserted, they could cause the whole tree split more |
2353 | * than once, but this is really rare. | |
ee12b630 | 2354 | */ |
fffb2739 | 2355 | int ext4_ext_index_trans_blocks(struct inode *inode, int extents) |
ee12b630 MC |
2356 | { |
2357 | int index; | |
f19d5870 TM |
2358 | int depth; |
2359 | ||
2360 | /* If we are converting the inline data, only one is needed here. */ | |
2361 | if (ext4_has_inline_data(inode)) | |
2362 | return 1; | |
2363 | ||
2364 | depth = ext_depth(inode); | |
a86c6181 | 2365 | |
fffb2739 | 2366 | if (extents <= 1) |
ee12b630 MC |
2367 | index = depth * 2; |
2368 | else | |
2369 | index = depth * 3; | |
a86c6181 | 2370 | |
ee12b630 | 2371 | return index; |
a86c6181 AT |
2372 | } |
2373 | ||
981250ca TT |
2374 | static inline int get_default_free_blocks_flags(struct inode *inode) |
2375 | { | |
ddfa17e4 TE |
2376 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || |
2377 | ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE)) | |
981250ca TT |
2378 | return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; |
2379 | else if (ext4_should_journal_data(inode)) | |
2380 | return EXT4_FREE_BLOCKS_FORGET; | |
2381 | return 0; | |
2382 | } | |
2383 | ||
9fe67149 EW |
2384 | /* |
2385 | * ext4_rereserve_cluster - increment the reserved cluster count when | |
2386 | * freeing a cluster with a pending reservation | |
2387 | * | |
2388 | * @inode - file containing the cluster | |
2389 | * @lblk - logical block in cluster to be reserved | |
2390 | * | |
2391 | * Increments the reserved cluster count and adjusts quota in a bigalloc | |
2392 | * file system when freeing a partial cluster containing at least one | |
2393 | * delayed and unwritten block. A partial cluster meeting that | |
2394 | * requirement will have a pending reservation. If so, the | |
2395 | * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to | |
2396 | * defer reserved and allocated space accounting to a subsequent call | |
2397 | * to this function. | |
2398 | */ | |
2399 | static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk) | |
2400 | { | |
2401 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | |
2402 | struct ext4_inode_info *ei = EXT4_I(inode); | |
2403 | ||
2404 | dquot_reclaim_block(inode, EXT4_C2B(sbi, 1)); | |
2405 | ||
2406 | spin_lock(&ei->i_block_reservation_lock); | |
2407 | ei->i_reserved_data_blocks++; | |
2408 | percpu_counter_add(&sbi->s_dirtyclusters_counter, 1); | |
2409 | spin_unlock(&ei->i_block_reservation_lock); | |
2410 | ||
2411 | percpu_counter_add(&sbi->s_freeclusters_counter, 1); | |
2412 | ext4_remove_pending(inode, lblk); | |
2413 | } | |
2414 | ||
a86c6181 | 2415 | static int ext4_remove_blocks(handle_t *handle, struct inode *inode, |
0aa06000 | 2416 | struct ext4_extent *ex, |
9fe67149 | 2417 | struct partial_cluster *partial, |
0aa06000 | 2418 | ext4_lblk_t from, ext4_lblk_t to) |
a86c6181 | 2419 | { |
0aa06000 | 2420 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
345ee947 | 2421 | unsigned short ee_len = ext4_ext_get_actual_len(ex); |
9fe67149 EW |
2422 | ext4_fsblk_t last_pblk, pblk; |
2423 | ext4_lblk_t num; | |
2424 | int flags; | |
2425 | ||
2426 | /* only extent tail removal is allowed */ | |
2427 | if (from < le32_to_cpu(ex->ee_block) || | |
2428 | to != le32_to_cpu(ex->ee_block) + ee_len - 1) { | |
2429 | ext4_error(sbi->s_sb, | |
2430 | "strange request: removal(2) %u-%u from %u:%u", | |
2431 | from, to, le32_to_cpu(ex->ee_block), ee_len); | |
2432 | return 0; | |
2433 | } | |
2434 | ||
2435 | #ifdef EXTENTS_STATS | |
2436 | spin_lock(&sbi->s_ext_stats_lock); | |
2437 | sbi->s_ext_blocks += ee_len; | |
2438 | sbi->s_ext_extents++; | |
2439 | if (ee_len < sbi->s_ext_min) | |
2440 | sbi->s_ext_min = ee_len; | |
2441 | if (ee_len > sbi->s_ext_max) | |
2442 | sbi->s_ext_max = ee_len; | |
2443 | if (ext_depth(inode) > sbi->s_depth_max) | |
2444 | sbi->s_depth_max = ext_depth(inode); | |
2445 | spin_unlock(&sbi->s_ext_stats_lock); | |
2446 | #endif | |
2447 | ||
2448 | trace_ext4_remove_blocks(inode, ex, from, to, partial); | |
18888cf0 | 2449 | |
0aa06000 | 2450 | /* |
9fe67149 EW |
2451 | * if we have a partial cluster, and it's different from the |
2452 | * cluster of the last block in the extent, we free it | |
0aa06000 | 2453 | */ |
9fe67149 EW |
2454 | last_pblk = ext4_ext_pblock(ex) + ee_len - 1; |
2455 | ||
2456 | if (partial->state != initial && | |
2457 | partial->pclu != EXT4_B2C(sbi, last_pblk)) { | |
2458 | if (partial->state == tofree) { | |
2459 | flags = get_default_free_blocks_flags(inode); | |
2460 | if (ext4_is_pending(inode, partial->lblk)) | |
2461 | flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; | |
2462 | ext4_free_blocks(handle, inode, NULL, | |
2463 | EXT4_C2B(sbi, partial->pclu), | |
2464 | sbi->s_cluster_ratio, flags); | |
2465 | if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) | |
2466 | ext4_rereserve_cluster(inode, partial->lblk); | |
2467 | } | |
2468 | partial->state = initial; | |
2469 | } | |
2470 | ||
2471 | num = le32_to_cpu(ex->ee_block) + ee_len - from; | |
2472 | pblk = ext4_ext_pblock(ex) + ee_len - num; | |
0aa06000 TT |
2473 | |
2474 | /* | |
9fe67149 EW |
2475 | * We free the partial cluster at the end of the extent (if any), |
2476 | * unless the cluster is used by another extent (partial_cluster | |
2477 | * state is nofree). If a partial cluster exists here, it must be | |
2478 | * shared with the last block in the extent. | |
0aa06000 | 2479 | */ |
9fe67149 EW |
2480 | flags = get_default_free_blocks_flags(inode); |
2481 | ||
2482 | /* partial, left end cluster aligned, right end unaligned */ | |
2483 | if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) && | |
2484 | (EXT4_LBLK_CMASK(sbi, to) >= from) && | |
2485 | (partial->state != nofree)) { | |
2486 | if (ext4_is_pending(inode, to)) | |
2487 | flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; | |
0aa06000 | 2488 | ext4_free_blocks(handle, inode, NULL, |
9fe67149 | 2489 | EXT4_PBLK_CMASK(sbi, last_pblk), |
0aa06000 | 2490 | sbi->s_cluster_ratio, flags); |
9fe67149 EW |
2491 | if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) |
2492 | ext4_rereserve_cluster(inode, to); | |
2493 | partial->state = initial; | |
2494 | flags = get_default_free_blocks_flags(inode); | |
0aa06000 TT |
2495 | } |
2496 | ||
9fe67149 | 2497 | flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; |
d23142c6 | 2498 | |
9fe67149 EW |
2499 | /* |
2500 | * For bigalloc file systems, we never free a partial cluster | |
2501 | * at the beginning of the extent. Instead, we check to see if we | |
2502 | * need to free it on a subsequent call to ext4_remove_blocks, | |
2503 | * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. | |
2504 | */ | |
2505 | flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; | |
2506 | ext4_free_blocks(handle, inode, NULL, pblk, num, flags); | |
2507 | ||
2508 | /* reset the partial cluster if we've freed past it */ | |
2509 | if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk)) | |
2510 | partial->state = initial; | |
2511 | ||
2512 | /* | |
2513 | * If we've freed the entire extent but the beginning is not left | |
2514 | * cluster aligned and is not marked as ineligible for freeing we | |
2515 | * record the partial cluster at the beginning of the extent. It | |
2516 | * wasn't freed by the preceding ext4_free_blocks() call, and we | |
2517 | * need to look farther to the left to determine if it's to be freed | |
2518 | * (not shared with another extent). Else, reset the partial | |
2519 | * cluster - we're either done freeing or the beginning of the | |
2520 | * extent is left cluster aligned. | |
2521 | */ | |
2522 | if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) { | |
2523 | if (partial->state == initial) { | |
2524 | partial->pclu = EXT4_B2C(sbi, pblk); | |
2525 | partial->lblk = from; | |
2526 | partial->state = tofree; | |
345ee947 | 2527 | } |
9fe67149 EW |
2528 | } else { |
2529 | partial->state = initial; | |
2530 | } | |
2531 | ||
a86c6181 AT |
2532 | return 0; |
2533 | } | |
2534 | ||
d583fb87 AH |
2535 | /* |
2536 | * ext4_ext_rm_leaf() Removes the extents associated with the | |
5bf43760 EW |
2537 | * blocks appearing between "start" and "end". Both "start" |
2538 | * and "end" must appear in the same extent or EIO is returned. | |
d583fb87 AH |
2539 | * |
2540 | * @handle: The journal handle | |
2541 | * @inode: The files inode | |
2542 | * @path: The path to the leaf | |
d23142c6 | 2543 | * @partial_cluster: The cluster which we'll have to free if all extents |
5bf43760 EW |
2544 | * has been released from it. However, if this value is |
2545 | * negative, it's a cluster just to the right of the | |
2546 | * punched region and it must not be freed. | |
d583fb87 AH |
2547 | * @start: The first block to remove |
2548 | * @end: The last block to remove | |
2549 | */ | |
a86c6181 AT |
2550 | static int |
2551 | ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |
d23142c6 | 2552 | struct ext4_ext_path *path, |
9fe67149 | 2553 | struct partial_cluster *partial, |
0aa06000 | 2554 | ext4_lblk_t start, ext4_lblk_t end) |
a86c6181 | 2555 | { |
0aa06000 | 2556 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
a86c6181 | 2557 | int err = 0, correct_index = 0; |
83448bdf | 2558 | int depth = ext_depth(inode), credits, revoke_credits; |
a86c6181 | 2559 | struct ext4_extent_header *eh; |
750c9c47 | 2560 | ext4_lblk_t a, b; |
725d26d3 AK |
2561 | unsigned num; |
2562 | ext4_lblk_t ex_ee_block; | |
a86c6181 | 2563 | unsigned short ex_ee_len; |
556615dc | 2564 | unsigned unwritten = 0; |
a86c6181 | 2565 | struct ext4_extent *ex; |
d23142c6 | 2566 | ext4_fsblk_t pblk; |
a86c6181 | 2567 | |
c29c0ae7 | 2568 | /* the header must be checked already in ext4_ext_remove_space() */ |
70aa1554 | 2569 | ext_debug(inode, "truncate since %u in leaf to %u\n", start, end); |
a86c6181 AT |
2570 | if (!path[depth].p_hdr) |
2571 | path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); | |
2572 | eh = path[depth].p_hdr; | |
273df556 FM |
2573 | if (unlikely(path[depth].p_hdr == NULL)) { |
2574 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | |
6a797d27 | 2575 | return -EFSCORRUPTED; |
273df556 | 2576 | } |
a86c6181 | 2577 | /* find where to start removing */ |
6ae06ff5 AS |
2578 | ex = path[depth].p_ext; |
2579 | if (!ex) | |
2580 | ex = EXT_LAST_EXTENT(eh); | |
a86c6181 AT |
2581 | |
2582 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
a2df2a63 | 2583 | ex_ee_len = ext4_ext_get_actual_len(ex); |
a86c6181 | 2584 | |
9fe67149 | 2585 | trace_ext4_ext_rm_leaf(inode, start, ex, partial); |
d8990240 | 2586 | |
a86c6181 AT |
2587 | while (ex >= EXT_FIRST_EXTENT(eh) && |
2588 | ex_ee_block + ex_ee_len > start) { | |
a41f2071 | 2589 | |
556615dc LC |
2590 | if (ext4_ext_is_unwritten(ex)) |
2591 | unwritten = 1; | |
a41f2071 | 2592 | else |
556615dc | 2593 | unwritten = 0; |
a41f2071 | 2594 | |
70aa1554 | 2595 | ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block, |
556615dc | 2596 | unwritten, ex_ee_len); |
a86c6181 AT |
2597 | path[depth].p_ext = ex; |
2598 | ||
2599 | a = ex_ee_block > start ? ex_ee_block : start; | |
d583fb87 AH |
2600 | b = ex_ee_block+ex_ee_len - 1 < end ? |
2601 | ex_ee_block+ex_ee_len - 1 : end; | |
a86c6181 | 2602 | |
70aa1554 | 2603 | ext_debug(inode, " border %u:%u\n", a, b); |
a86c6181 | 2604 | |
d583fb87 | 2605 | /* If this extent is beyond the end of the hole, skip it */ |
5f95d21f | 2606 | if (end < ex_ee_block) { |
d23142c6 LC |
2607 | /* |
2608 | * We're going to skip this extent and move to another, | |
f4226d9e EW |
2609 | * so note that its first cluster is in use to avoid |
2610 | * freeing it when removing blocks. Eventually, the | |
2611 | * right edge of the truncated/punched region will | |
2612 | * be just to the left. | |
d23142c6 | 2613 | */ |
f4226d9e EW |
2614 | if (sbi->s_cluster_ratio > 1) { |
2615 | pblk = ext4_ext_pblock(ex); | |
9fe67149 EW |
2616 | partial->pclu = EXT4_B2C(sbi, pblk); |
2617 | partial->state = nofree; | |
f4226d9e | 2618 | } |
d583fb87 AH |
2619 | ex--; |
2620 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
2621 | ex_ee_len = ext4_ext_get_actual_len(ex); | |
2622 | continue; | |
750c9c47 | 2623 | } else if (b != ex_ee_block + ex_ee_len - 1) { |
dc1841d6 LC |
2624 | EXT4_ERROR_INODE(inode, |
2625 | "can not handle truncate %u:%u " | |
2626 | "on extent %u:%u", | |
2627 | start, end, ex_ee_block, | |
2628 | ex_ee_block + ex_ee_len - 1); | |
6a797d27 | 2629 | err = -EFSCORRUPTED; |
750c9c47 | 2630 | goto out; |
a86c6181 AT |
2631 | } else if (a != ex_ee_block) { |
2632 | /* remove tail of the extent */ | |
750c9c47 | 2633 | num = a - ex_ee_block; |
a86c6181 AT |
2634 | } else { |
2635 | /* remove whole extent: excellent! */ | |
a86c6181 | 2636 | num = 0; |
a86c6181 | 2637 | } |
34071da7 TT |
2638 | /* |
2639 | * 3 for leaf, sb, and inode plus 2 (bmap and group | |
2640 | * descriptor) for each block group; assume two block | |
2641 | * groups plus ex_ee_len/blocks_per_block_group for | |
2642 | * the worst case | |
2643 | */ | |
2644 | credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); | |
a86c6181 AT |
2645 | if (ex == EXT_FIRST_EXTENT(eh)) { |
2646 | correct_index = 1; | |
2647 | credits += (ext_depth(inode)) + 1; | |
2648 | } | |
5aca07eb | 2649 | credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); |
83448bdf JK |
2650 | /* |
2651 | * We may end up freeing some index blocks and data from the | |
2652 | * punched range. Note that partial clusters are accounted for | |
2653 | * by ext4_free_data_revoke_credits(). | |
2654 | */ | |
2655 | revoke_credits = | |
2656 | ext4_free_metadata_revoke_credits(inode->i_sb, | |
2657 | ext_depth(inode)) + | |
2658 | ext4_free_data_revoke_credits(inode, b - a + 1); | |
a86c6181 | 2659 | |
a4130367 | 2660 | err = ext4_datasem_ensure_credits(handle, inode, credits, |
83448bdf | 2661 | credits, revoke_credits); |
a4130367 JK |
2662 | if (err) { |
2663 | if (err > 0) | |
2664 | err = -EAGAIN; | |
a86c6181 | 2665 | goto out; |
a4130367 | 2666 | } |
a86c6181 AT |
2667 | |
2668 | err = ext4_ext_get_access(handle, inode, path + depth); | |
2669 | if (err) | |
2670 | goto out; | |
2671 | ||
9fe67149 | 2672 | err = ext4_remove_blocks(handle, inode, ex, partial, a, b); |
a86c6181 AT |
2673 | if (err) |
2674 | goto out; | |
2675 | ||
750c9c47 | 2676 | if (num == 0) |
d0d856e8 | 2677 | /* this extent is removed; mark slot entirely unused */ |
f65e6fba | 2678 | ext4_ext_store_pblock(ex, 0); |
a86c6181 | 2679 | |
a86c6181 | 2680 | ex->ee_len = cpu_to_le16(num); |
749269fa | 2681 | /* |
556615dc | 2682 | * Do not mark unwritten if all the blocks in the |
749269fa AA |
2683 | * extent have been removed. |
2684 | */ | |
556615dc LC |
2685 | if (unwritten && num) |
2686 | ext4_ext_mark_unwritten(ex); | |
d583fb87 AH |
2687 | /* |
2688 | * If the extent was completely released, | |
2689 | * we need to remove it from the leaf | |
2690 | */ | |
2691 | if (num == 0) { | |
f17722f9 | 2692 | if (end != EXT_MAX_BLOCKS - 1) { |
d583fb87 AH |
2693 | /* |
2694 | * For hole punching, we need to scoot all the | |
2695 | * extents up when an extent is removed so that | |
2696 | * we dont have blank extents in the middle | |
2697 | */ | |
2698 | memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * | |
2699 | sizeof(struct ext4_extent)); | |
2700 | ||
2701 | /* Now get rid of the one at the end */ | |
2702 | memset(EXT_LAST_EXTENT(eh), 0, | |
2703 | sizeof(struct ext4_extent)); | |
2704 | } | |
2705 | le16_add_cpu(&eh->eh_entries, -1); | |
5bf43760 | 2706 | } |
d583fb87 | 2707 | |
750c9c47 DM |
2708 | err = ext4_ext_dirty(handle, inode, path + depth); |
2709 | if (err) | |
2710 | goto out; | |
2711 | ||
70aa1554 | 2712 | ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num, |
bf89d16f | 2713 | ext4_ext_pblock(ex)); |
a86c6181 AT |
2714 | ex--; |
2715 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
a2df2a63 | 2716 | ex_ee_len = ext4_ext_get_actual_len(ex); |
a86c6181 AT |
2717 | } |
2718 | ||
2719 | if (correct_index && eh->eh_entries) | |
2720 | err = ext4_ext_correct_indexes(handle, inode, path); | |
2721 | ||
0aa06000 | 2722 | /* |
ad6599ab EW |
2723 | * If there's a partial cluster and at least one extent remains in |
2724 | * the leaf, free the partial cluster if it isn't shared with the | |
5bf43760 | 2725 | * current extent. If it is shared with the current extent |
9fe67149 | 2726 | * we reset the partial cluster because we've reached the start of the |
5bf43760 | 2727 | * truncated/punched region and we're done removing blocks. |
0aa06000 | 2728 | */ |
9fe67149 | 2729 | if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) { |
5bf43760 | 2730 | pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; |
9fe67149 EW |
2731 | if (partial->pclu != EXT4_B2C(sbi, pblk)) { |
2732 | int flags = get_default_free_blocks_flags(inode); | |
2733 | ||
2734 | if (ext4_is_pending(inode, partial->lblk)) | |
2735 | flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; | |
5bf43760 | 2736 | ext4_free_blocks(handle, inode, NULL, |
9fe67149 EW |
2737 | EXT4_C2B(sbi, partial->pclu), |
2738 | sbi->s_cluster_ratio, flags); | |
2739 | if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) | |
2740 | ext4_rereserve_cluster(inode, partial->lblk); | |
5bf43760 | 2741 | } |
9fe67149 | 2742 | partial->state = initial; |
0aa06000 TT |
2743 | } |
2744 | ||
a86c6181 AT |
2745 | /* if this leaf is free, then we should |
2746 | * remove it from index block above */ | |
2747 | if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) | |
c36575e6 | 2748 | err = ext4_ext_rm_idx(handle, inode, path, depth); |
a86c6181 AT |
2749 | |
2750 | out: | |
2751 | return err; | |
2752 | } | |
2753 | ||
2754 | /* | |
d0d856e8 RD |
2755 | * ext4_ext_more_to_rm: |
2756 | * returns 1 if current index has to be freed (even partial) | |
a86c6181 | 2757 | */ |
09b88252 | 2758 | static int |
a86c6181 AT |
2759 | ext4_ext_more_to_rm(struct ext4_ext_path *path) |
2760 | { | |
2761 | BUG_ON(path->p_idx == NULL); | |
2762 | ||
2763 | if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) | |
2764 | return 0; | |
2765 | ||
2766 | /* | |
d0d856e8 | 2767 | * if truncate on deeper level happened, it wasn't partial, |
a86c6181 AT |
2768 | * so we have to consider current index for truncation |
2769 | */ | |
2770 | if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) | |
2771 | return 0; | |
2772 | return 1; | |
2773 | } | |
2774 | ||
26a4c0c6 TT |
2775 | int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, |
2776 | ext4_lblk_t end) | |
a86c6181 | 2777 | { |
f4226d9e | 2778 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
a86c6181 | 2779 | int depth = ext_depth(inode); |
968dee77 | 2780 | struct ext4_ext_path *path = NULL; |
9fe67149 | 2781 | struct partial_cluster partial; |
a86c6181 | 2782 | handle_t *handle; |
6f2080e6 | 2783 | int i = 0, err = 0; |
a86c6181 | 2784 | |
9fe67149 EW |
2785 | partial.pclu = 0; |
2786 | partial.lblk = 0; | |
2787 | partial.state = initial; | |
2788 | ||
70aa1554 | 2789 | ext_debug(inode, "truncate since %u to %u\n", start, end); |
a86c6181 AT |
2790 | |
2791 | /* probably first extent we're gonna free will be last in block */ | |
83448bdf JK |
2792 | handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE, |
2793 | depth + 1, | |
2794 | ext4_free_metadata_revoke_credits(inode->i_sb, depth)); | |
a86c6181 AT |
2795 | if (IS_ERR(handle)) |
2796 | return PTR_ERR(handle); | |
2797 | ||
0617b83f | 2798 | again: |
61801325 | 2799 | trace_ext4_ext_remove_space(inode, start, end, depth); |
d8990240 | 2800 | |
5f95d21f LC |
2801 | /* |
2802 | * Check if we are removing extents inside the extent tree. If that | |
2803 | * is the case, we are going to punch a hole inside the extent tree | |
2804 | * so we have to check whether we need to split the extent covering | |
2805 | * the last block to remove so we can easily remove the part of it | |
2806 | * in ext4_ext_rm_leaf(). | |
2807 | */ | |
2808 | if (end < EXT_MAX_BLOCKS - 1) { | |
2809 | struct ext4_extent *ex; | |
f4226d9e EW |
2810 | ext4_lblk_t ee_block, ex_end, lblk; |
2811 | ext4_fsblk_t pblk; | |
5f95d21f | 2812 | |
f4226d9e | 2813 | /* find extent for or closest extent to this block */ |
73c384c0 TT |
2814 | path = ext4_find_extent(inode, end, NULL, |
2815 | EXT4_EX_NOCACHE | EXT4_EX_NOFAIL); | |
5f95d21f LC |
2816 | if (IS_ERR(path)) { |
2817 | ext4_journal_stop(handle); | |
2818 | return PTR_ERR(path); | |
2819 | } | |
2820 | depth = ext_depth(inode); | |
6f2080e6 | 2821 | /* Leaf not may not exist only if inode has no blocks at all */ |
5f95d21f | 2822 | ex = path[depth].p_ext; |
968dee77 | 2823 | if (!ex) { |
6f2080e6 DM |
2824 | if (depth) { |
2825 | EXT4_ERROR_INODE(inode, | |
2826 | "path[%d].p_hdr == NULL", | |
2827 | depth); | |
6a797d27 | 2828 | err = -EFSCORRUPTED; |
6f2080e6 DM |
2829 | } |
2830 | goto out; | |
968dee77 | 2831 | } |
5f95d21f LC |
2832 | |
2833 | ee_block = le32_to_cpu(ex->ee_block); | |
f4226d9e | 2834 | ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; |
5f95d21f LC |
2835 | |
2836 | /* | |
2837 | * See if the last block is inside the extent, if so split | |
2838 | * the extent at 'end' block so we can easily remove the | |
2839 | * tail of the first part of the split extent in | |
2840 | * ext4_ext_rm_leaf(). | |
2841 | */ | |
f4226d9e EW |
2842 | if (end >= ee_block && end < ex_end) { |
2843 | ||
2844 | /* | |
2845 | * If we're going to split the extent, note that | |
2846 | * the cluster containing the block after 'end' is | |
2847 | * in use to avoid freeing it when removing blocks. | |
2848 | */ | |
2849 | if (sbi->s_cluster_ratio > 1) { | |
cfb3c85a | 2850 | pblk = ext4_ext_pblock(ex) + end - ee_block + 1; |
9fe67149 EW |
2851 | partial.pclu = EXT4_B2C(sbi, pblk); |
2852 | partial.state = nofree; | |
f4226d9e EW |
2853 | } |
2854 | ||
5f95d21f LC |
2855 | /* |
2856 | * Split the extent in two so that 'end' is the last | |
27dd4385 LC |
2857 | * block in the first new extent. Also we should not |
2858 | * fail removing space due to ENOSPC so try to use | |
2859 | * reserved block if that happens. | |
5f95d21f | 2860 | */ |
dfe50809 | 2861 | err = ext4_force_split_extent_at(handle, inode, &path, |
fcf6b1b7 | 2862 | end + 1, 1); |
5f95d21f LC |
2863 | if (err < 0) |
2864 | goto out; | |
f4226d9e | 2865 | |
7bd75230 EW |
2866 | } else if (sbi->s_cluster_ratio > 1 && end >= ex_end && |
2867 | partial.state == initial) { | |
f4226d9e | 2868 | /* |
7bd75230 EW |
2869 | * If we're punching, there's an extent to the right. |
2870 | * If the partial cluster hasn't been set, set it to | |
2871 | * that extent's first cluster and its state to nofree | |
2872 | * so it won't be freed should it contain blocks to be | |
2873 | * removed. If it's already set (tofree/nofree), we're | |
2874 | * retrying and keep the original partial cluster info | |
2875 | * so a cluster marked tofree as a result of earlier | |
2876 | * extent removal is not lost. | |
f4226d9e EW |
2877 | */ |
2878 | lblk = ex_end + 1; | |
2879 | err = ext4_ext_search_right(inode, path, &lblk, &pblk, | |
d7dce9e0 | 2880 | NULL); |
2881 | if (err < 0) | |
f4226d9e | 2882 | goto out; |
9fe67149 EW |
2883 | if (pblk) { |
2884 | partial.pclu = EXT4_B2C(sbi, pblk); | |
2885 | partial.state = nofree; | |
2886 | } | |
5f95d21f | 2887 | } |
5f95d21f | 2888 | } |
a86c6181 | 2889 | /* |
d0d856e8 RD |
2890 | * We start scanning from right side, freeing all the blocks |
2891 | * after i_size and walking into the tree depth-wise. | |
a86c6181 | 2892 | */ |
0617b83f | 2893 | depth = ext_depth(inode); |
968dee77 AS |
2894 | if (path) { |
2895 | int k = i = depth; | |
2896 | while (--k > 0) | |
2897 | path[k].p_block = | |
2898 | le16_to_cpu(path[k].p_hdr->eh_entries)+1; | |
2899 | } else { | |
6396bb22 | 2900 | path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), |
73c384c0 | 2901 | GFP_NOFS | __GFP_NOFAIL); |
968dee77 AS |
2902 | if (path == NULL) { |
2903 | ext4_journal_stop(handle); | |
2904 | return -ENOMEM; | |
2905 | } | |
10809df8 | 2906 | path[0].p_maxdepth = path[0].p_depth = depth; |
968dee77 | 2907 | path[0].p_hdr = ext_inode_hdr(inode); |
89a4e48f | 2908 | i = 0; |
5f95d21f | 2909 | |
c349179b | 2910 | if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { |
6a797d27 | 2911 | err = -EFSCORRUPTED; |
968dee77 AS |
2912 | goto out; |
2913 | } | |
a86c6181 | 2914 | } |
968dee77 | 2915 | err = 0; |
a86c6181 AT |
2916 | |
2917 | while (i >= 0 && err == 0) { | |
2918 | if (i == depth) { | |
2919 | /* this is leaf block */ | |
d583fb87 | 2920 | err = ext4_ext_rm_leaf(handle, inode, path, |
9fe67149 | 2921 | &partial, start, end); |
d0d856e8 | 2922 | /* root level has p_bh == NULL, brelse() eats this */ |
a86c6181 AT |
2923 | brelse(path[i].p_bh); |
2924 | path[i].p_bh = NULL; | |
2925 | i--; | |
2926 | continue; | |
2927 | } | |
2928 | ||
2929 | /* this is index block */ | |
2930 | if (!path[i].p_hdr) { | |
70aa1554 | 2931 | ext_debug(inode, "initialize header\n"); |
a86c6181 | 2932 | path[i].p_hdr = ext_block_hdr(path[i].p_bh); |
a86c6181 AT |
2933 | } |
2934 | ||
a86c6181 | 2935 | if (!path[i].p_idx) { |
d0d856e8 | 2936 | /* this level hasn't been touched yet */ |
a86c6181 AT |
2937 | path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); |
2938 | path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; | |
70aa1554 | 2939 | ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n", |
a86c6181 AT |
2940 | path[i].p_hdr, |
2941 | le16_to_cpu(path[i].p_hdr->eh_entries)); | |
2942 | } else { | |
d0d856e8 | 2943 | /* we were already here, see at next index */ |
a86c6181 AT |
2944 | path[i].p_idx--; |
2945 | } | |
2946 | ||
70aa1554 | 2947 | ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n", |
a86c6181 AT |
2948 | i, EXT_FIRST_INDEX(path[i].p_hdr), |
2949 | path[i].p_idx); | |
2950 | if (ext4_ext_more_to_rm(path + i)) { | |
c29c0ae7 | 2951 | struct buffer_head *bh; |
a86c6181 | 2952 | /* go to the next level */ |
70aa1554 | 2953 | ext_debug(inode, "move to level %d (block %llu)\n", |
bf89d16f | 2954 | i + 1, ext4_idx_pblock(path[i].p_idx)); |
a86c6181 | 2955 | memset(path + i + 1, 0, sizeof(*path)); |
7d7ea89e | 2956 | bh = read_extent_tree_block(inode, |
107a7bd3 TT |
2957 | ext4_idx_pblock(path[i].p_idx), depth - i - 1, |
2958 | EXT4_EX_NOCACHE); | |
7d7ea89e | 2959 | if (IS_ERR(bh)) { |
a86c6181 | 2960 | /* should we reset i_size? */ |
7d7ea89e | 2961 | err = PTR_ERR(bh); |
a86c6181 AT |
2962 | break; |
2963 | } | |
76828c88 TT |
2964 | /* Yield here to deal with large extent trees. |
2965 | * Should be a no-op if we did IO above. */ | |
2966 | cond_resched(); | |
c29c0ae7 | 2967 | if (WARN_ON(i + 1 > depth)) { |
6a797d27 | 2968 | err = -EFSCORRUPTED; |
c29c0ae7 AT |
2969 | break; |
2970 | } | |
2971 | path[i + 1].p_bh = bh; | |
a86c6181 | 2972 | |
d0d856e8 RD |
2973 | /* save actual number of indexes since this |
2974 | * number is changed at the next iteration */ | |
a86c6181 AT |
2975 | path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); |
2976 | i++; | |
2977 | } else { | |
d0d856e8 | 2978 | /* we finished processing this index, go up */ |
a86c6181 | 2979 | if (path[i].p_hdr->eh_entries == 0 && i > 0) { |
d0d856e8 | 2980 | /* index is empty, remove it; |
a86c6181 AT |
2981 | * handle must be already prepared by the |
2982 | * truncatei_leaf() */ | |
c36575e6 | 2983 | err = ext4_ext_rm_idx(handle, inode, path, i); |
a86c6181 | 2984 | } |
d0d856e8 | 2985 | /* root level has p_bh == NULL, brelse() eats this */ |
a86c6181 AT |
2986 | brelse(path[i].p_bh); |
2987 | path[i].p_bh = NULL; | |
2988 | i--; | |
70aa1554 | 2989 | ext_debug(inode, "return to level %d\n", i); |
a86c6181 AT |
2990 | } |
2991 | } | |
2992 | ||
9fe67149 EW |
2993 | trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial, |
2994 | path->p_hdr->eh_entries); | |
d8990240 | 2995 | |
0756b908 | 2996 | /* |
9fe67149 EW |
2997 | * if there's a partial cluster and we have removed the first extent |
2998 | * in the file, then we also free the partial cluster, if any | |
0756b908 | 2999 | */ |
9fe67149 EW |
3000 | if (partial.state == tofree && err == 0) { |
3001 | int flags = get_default_free_blocks_flags(inode); | |
3002 | ||
3003 | if (ext4_is_pending(inode, partial.lblk)) | |
3004 | flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; | |
7b415bf6 | 3005 | ext4_free_blocks(handle, inode, NULL, |
9fe67149 EW |
3006 | EXT4_C2B(sbi, partial.pclu), |
3007 | sbi->s_cluster_ratio, flags); | |
3008 | if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) | |
3009 | ext4_rereserve_cluster(inode, partial.lblk); | |
3010 | partial.state = initial; | |
7b415bf6 AK |
3011 | } |
3012 | ||
a86c6181 AT |
3013 | /* TODO: flexible tree reduction should be here */ |
3014 | if (path->p_hdr->eh_entries == 0) { | |
3015 | /* | |
d0d856e8 RD |
3016 | * truncate to zero freed all the tree, |
3017 | * so we need to correct eh_depth | |
a86c6181 AT |
3018 | */ |
3019 | err = ext4_ext_get_access(handle, inode, path); | |
3020 | if (err == 0) { | |
3021 | ext_inode_hdr(inode)->eh_depth = 0; | |
3022 | ext_inode_hdr(inode)->eh_max = | |
55ad63bf | 3023 | cpu_to_le16(ext4_ext_space_root(inode, 0)); |
a86c6181 AT |
3024 | err = ext4_ext_dirty(handle, inode, path); |
3025 | } | |
3026 | } | |
3027 | out: | |
b7ea89ad TT |
3028 | ext4_ext_drop_refs(path); |
3029 | kfree(path); | |
3030 | path = NULL; | |
dfe50809 TT |
3031 | if (err == -EAGAIN) |
3032 | goto again; | |
a86c6181 AT |
3033 | ext4_journal_stop(handle); |
3034 | ||
3035 | return err; | |
3036 | } | |
3037 | ||
3038 | /* | |
3039 | * called at mount time | |
3040 | */ | |
3041 | void ext4_ext_init(struct super_block *sb) | |
3042 | { | |
3043 | /* | |
3044 | * possible initialization would be here | |
3045 | */ | |
3046 | ||
e2b911c5 | 3047 | if (ext4_has_feature_extents(sb)) { |
90576c0b | 3048 | #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) |
92b97816 | 3049 | printk(KERN_INFO "EXT4-fs: file extents enabled" |
bbf2f9fb | 3050 | #ifdef AGGRESSIVE_TEST |
92b97816 | 3051 | ", aggressive tests" |
a86c6181 AT |
3052 | #endif |
3053 | #ifdef CHECK_BINSEARCH | |
92b97816 | 3054 | ", check binsearch" |
a86c6181 AT |
3055 | #endif |
3056 | #ifdef EXTENTS_STATS | |
92b97816 | 3057 | ", stats" |
a86c6181 | 3058 | #endif |
92b97816 | 3059 | "\n"); |
90576c0b | 3060 | #endif |
a86c6181 AT |
3061 | #ifdef EXTENTS_STATS |
3062 | spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); | |
3063 | EXT4_SB(sb)->s_ext_min = 1 << 30; | |
3064 | EXT4_SB(sb)->s_ext_max = 0; | |
3065 | #endif | |
3066 | } | |
3067 | } | |
3068 | ||
3069 | /* | |
3070 | * called at umount time | |
3071 | */ | |
3072 | void ext4_ext_release(struct super_block *sb) | |
3073 | { | |
e2b911c5 | 3074 | if (!ext4_has_feature_extents(sb)) |
a86c6181 AT |
3075 | return; |
3076 | ||
3077 | #ifdef EXTENTS_STATS | |
3078 | if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { | |
3079 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
3080 | printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", | |
3081 | sbi->s_ext_blocks, sbi->s_ext_extents, | |
3082 | sbi->s_ext_blocks / sbi->s_ext_extents); | |
3083 | printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", | |
3084 | sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); | |
3085 | } | |
3086 | #endif | |
3087 | } | |
3088 | ||
d7b2a00c ZL |
3089 | static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) |
3090 | { | |
3091 | ext4_lblk_t ee_block; | |
3092 | ext4_fsblk_t ee_pblock; | |
3093 | unsigned int ee_len; | |
3094 | ||
3095 | ee_block = le32_to_cpu(ex->ee_block); | |
3096 | ee_len = ext4_ext_get_actual_len(ex); | |
3097 | ee_pblock = ext4_ext_pblock(ex); | |
3098 | ||
3099 | if (ee_len == 0) | |
3100 | return 0; | |
3101 | ||
3102 | return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, | |
3103 | EXTENT_STATUS_WRITTEN); | |
3104 | } | |
3105 | ||
093a088b AK |
3106 | /* FIXME!! we need to try to merge to left or right after zero-out */ |
3107 | static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |
3108 | { | |
2407518d LC |
3109 | ext4_fsblk_t ee_pblock; |
3110 | unsigned int ee_len; | |
093a088b | 3111 | |
093a088b | 3112 | ee_len = ext4_ext_get_actual_len(ex); |
bf89d16f | 3113 | ee_pblock = ext4_ext_pblock(ex); |
53085fac JK |
3114 | return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock, |
3115 | ee_len); | |
093a088b AK |
3116 | } |
3117 | ||
47ea3bb5 YY |
3118 | /* |
3119 | * ext4_split_extent_at() splits an extent at given block. | |
3120 | * | |
3121 | * @handle: the journal handle | |
3122 | * @inode: the file inode | |
3123 | * @path: the path to the extent | |
3124 | * @split: the logical block where the extent is splitted. | |
3125 | * @split_flags: indicates if the extent could be zeroout if split fails, and | |
556615dc | 3126 | * the states(init or unwritten) of new extents. |
47ea3bb5 YY |
3127 | * @flags: flags used to insert new extent to extent tree. |
3128 | * | |
3129 | * | |
3130 | * Splits extent [a, b] into two extents [a, @split) and [@split, b], states | |
e4d7f2d3 | 3131 | * of which are determined by split_flag. |
47ea3bb5 YY |
3132 | * |
3133 | * There are two cases: | |
3134 | * a> the extent are splitted into two extent. | |
3135 | * b> split is not needed, and just mark the extent. | |
3136 | * | |
3137 | * return 0 on success. | |
3138 | */ | |
3139 | static int ext4_split_extent_at(handle_t *handle, | |
3140 | struct inode *inode, | |
dfe50809 | 3141 | struct ext4_ext_path **ppath, |
47ea3bb5 YY |
3142 | ext4_lblk_t split, |
3143 | int split_flag, | |
3144 | int flags) | |
3145 | { | |
dfe50809 | 3146 | struct ext4_ext_path *path = *ppath; |
47ea3bb5 YY |
3147 | ext4_fsblk_t newblock; |
3148 | ext4_lblk_t ee_block; | |
adb23551 | 3149 | struct ext4_extent *ex, newex, orig_ex, zero_ex; |
47ea3bb5 YY |
3150 | struct ext4_extent *ex2 = NULL; |
3151 | unsigned int ee_len, depth; | |
3152 | int err = 0; | |
3153 | ||
dee1f973 DM |
3154 | BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == |
3155 | (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); | |
3156 | ||
70aa1554 | 3157 | ext_debug(inode, "logical block %llu\n", (unsigned long long)split); |
47ea3bb5 YY |
3158 | |
3159 | ext4_ext_show_leaf(inode, path); | |
3160 | ||
3161 | depth = ext_depth(inode); | |
3162 | ex = path[depth].p_ext; | |
3163 | ee_block = le32_to_cpu(ex->ee_block); | |
3164 | ee_len = ext4_ext_get_actual_len(ex); | |
3165 | newblock = split - ee_block + ext4_ext_pblock(ex); | |
3166 | ||
3167 | BUG_ON(split < ee_block || split >= (ee_block + ee_len)); | |
556615dc | 3168 | BUG_ON(!ext4_ext_is_unwritten(ex) && |
357b66fd | 3169 | split_flag & (EXT4_EXT_MAY_ZEROOUT | |
556615dc LC |
3170 | EXT4_EXT_MARK_UNWRIT1 | |
3171 | EXT4_EXT_MARK_UNWRIT2)); | |
47ea3bb5 YY |
3172 | |
3173 | err = ext4_ext_get_access(handle, inode, path + depth); | |
3174 | if (err) | |
3175 | goto out; | |
3176 | ||
3177 | if (split == ee_block) { | |
3178 | /* | |
3179 | * case b: block @split is the block that the extent begins with | |
3180 | * then we just change the state of the extent, and splitting | |
3181 | * is not needed. | |
3182 | */ | |
556615dc LC |
3183 | if (split_flag & EXT4_EXT_MARK_UNWRIT2) |
3184 | ext4_ext_mark_unwritten(ex); | |
47ea3bb5 YY |
3185 | else |
3186 | ext4_ext_mark_initialized(ex); | |
3187 | ||
3188 | if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) | |
ecb94f5f | 3189 | ext4_ext_try_to_merge(handle, inode, path, ex); |
47ea3bb5 | 3190 | |
ecb94f5f | 3191 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
47ea3bb5 YY |
3192 | goto out; |
3193 | } | |
3194 | ||
3195 | /* case a */ | |
3196 | memcpy(&orig_ex, ex, sizeof(orig_ex)); | |
3197 | ex->ee_len = cpu_to_le16(split - ee_block); | |
556615dc LC |
3198 | if (split_flag & EXT4_EXT_MARK_UNWRIT1) |
3199 | ext4_ext_mark_unwritten(ex); | |
47ea3bb5 YY |
3200 | |
3201 | /* | |
3202 | * path may lead to new leaf, not to original leaf any more | |
3203 | * after ext4_ext_insert_extent() returns, | |
3204 | */ | |
3205 | err = ext4_ext_dirty(handle, inode, path + depth); | |
3206 | if (err) | |
3207 | goto fix_extent_len; | |
3208 | ||
3209 | ex2 = &newex; | |
3210 | ex2->ee_block = cpu_to_le32(split); | |
3211 | ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); | |
3212 | ext4_ext_store_pblock(ex2, newblock); | |
556615dc LC |
3213 | if (split_flag & EXT4_EXT_MARK_UNWRIT2) |
3214 | ext4_ext_mark_unwritten(ex2); | |
47ea3bb5 | 3215 | |
dfe50809 | 3216 | err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); |
082cd4ec YB |
3217 | if (err != -ENOSPC && err != -EDQUOT) |
3218 | goto out; | |
3219 | ||
3220 | if (EXT4_EXT_MAY_ZEROOUT & split_flag) { | |
dee1f973 | 3221 | if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { |
adb23551 | 3222 | if (split_flag & EXT4_EXT_DATA_VALID1) { |
dee1f973 | 3223 | err = ext4_ext_zeroout(inode, ex2); |
adb23551 | 3224 | zero_ex.ee_block = ex2->ee_block; |
8cde7ad1 ZL |
3225 | zero_ex.ee_len = cpu_to_le16( |
3226 | ext4_ext_get_actual_len(ex2)); | |
adb23551 ZL |
3227 | ext4_ext_store_pblock(&zero_ex, |
3228 | ext4_ext_pblock(ex2)); | |
3229 | } else { | |
dee1f973 | 3230 | err = ext4_ext_zeroout(inode, ex); |
adb23551 | 3231 | zero_ex.ee_block = ex->ee_block; |
8cde7ad1 ZL |
3232 | zero_ex.ee_len = cpu_to_le16( |
3233 | ext4_ext_get_actual_len(ex)); | |
adb23551 ZL |
3234 | ext4_ext_store_pblock(&zero_ex, |
3235 | ext4_ext_pblock(ex)); | |
3236 | } | |
3237 | } else { | |
dee1f973 | 3238 | err = ext4_ext_zeroout(inode, &orig_ex); |
adb23551 | 3239 | zero_ex.ee_block = orig_ex.ee_block; |
8cde7ad1 ZL |
3240 | zero_ex.ee_len = cpu_to_le16( |
3241 | ext4_ext_get_actual_len(&orig_ex)); | |
adb23551 ZL |
3242 | ext4_ext_store_pblock(&zero_ex, |
3243 | ext4_ext_pblock(&orig_ex)); | |
3244 | } | |
dee1f973 | 3245 | |
082cd4ec YB |
3246 | if (!err) { |
3247 | /* update the extent length and mark as initialized */ | |
3248 | ex->ee_len = cpu_to_le16(ee_len); | |
3249 | ext4_ext_try_to_merge(handle, inode, path, ex); | |
3250 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); | |
3251 | if (!err) | |
3252 | /* update extent status tree */ | |
3253 | err = ext4_zeroout_es(inode, &zero_ex); | |
3254 | /* If we failed at this point, we don't know in which | |
3255 | * state the extent tree exactly is so don't try to fix | |
3256 | * length of the original extent as it may do even more | |
3257 | * damage. | |
3258 | */ | |
3259 | goto out; | |
3260 | } | |
3261 | } | |
47ea3bb5 YY |
3262 | |
3263 | fix_extent_len: | |
3264 | ex->ee_len = orig_ex.ee_len; | |
b60ca334 HS |
3265 | /* |
3266 | * Ignore ext4_ext_dirty return value since we are already in error path | |
3267 | * and err is a non-zero error code. | |
3268 | */ | |
29faed16 | 3269 | ext4_ext_dirty(handle, inode, path + path->p_depth); |
47ea3bb5 | 3270 | return err; |
082cd4ec YB |
3271 | out: |
3272 | ext4_ext_show_leaf(inode, path); | |
3273 | return err; | |
47ea3bb5 YY |
3274 | } |
3275 | ||
3276 | /* | |
3277 | * ext4_split_extents() splits an extent and mark extent which is covered | |
3278 | * by @map as split_flags indicates | |
3279 | * | |
70261f56 | 3280 | * It may result in splitting the extent into multiple extents (up to three) |
47ea3bb5 YY |
3281 | * There are three possibilities: |
3282 | * a> There is no split required | |
3283 | * b> Splits in two extents: Split is happening at either end of the extent | |
3284 | * c> Splits in three extents: Somone is splitting in middle of the extent | |
3285 | * | |
3286 | */ | |
3287 | static int ext4_split_extent(handle_t *handle, | |
3288 | struct inode *inode, | |
dfe50809 | 3289 | struct ext4_ext_path **ppath, |
47ea3bb5 YY |
3290 | struct ext4_map_blocks *map, |
3291 | int split_flag, | |
3292 | int flags) | |
3293 | { | |
dfe50809 | 3294 | struct ext4_ext_path *path = *ppath; |
47ea3bb5 YY |
3295 | ext4_lblk_t ee_block; |
3296 | struct ext4_extent *ex; | |
3297 | unsigned int ee_len, depth; | |
3298 | int err = 0; | |
556615dc | 3299 | int unwritten; |
47ea3bb5 | 3300 | int split_flag1, flags1; |
3a225670 | 3301 | int allocated = map->m_len; |
47ea3bb5 YY |
3302 | |
3303 | depth = ext_depth(inode); | |
3304 | ex = path[depth].p_ext; | |
3305 | ee_block = le32_to_cpu(ex->ee_block); | |
3306 | ee_len = ext4_ext_get_actual_len(ex); | |
556615dc | 3307 | unwritten = ext4_ext_is_unwritten(ex); |
47ea3bb5 YY |
3308 | |
3309 | if (map->m_lblk + map->m_len < ee_block + ee_len) { | |
dee1f973 | 3310 | split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; |
47ea3bb5 | 3311 | flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; |
556615dc LC |
3312 | if (unwritten) |
3313 | split_flag1 |= EXT4_EXT_MARK_UNWRIT1 | | |
3314 | EXT4_EXT_MARK_UNWRIT2; | |
dee1f973 DM |
3315 | if (split_flag & EXT4_EXT_DATA_VALID2) |
3316 | split_flag1 |= EXT4_EXT_DATA_VALID1; | |
dfe50809 | 3317 | err = ext4_split_extent_at(handle, inode, ppath, |
47ea3bb5 | 3318 | map->m_lblk + map->m_len, split_flag1, flags1); |
93917411 YY |
3319 | if (err) |
3320 | goto out; | |
3a225670 ZL |
3321 | } else { |
3322 | allocated = ee_len - (map->m_lblk - ee_block); | |
47ea3bb5 | 3323 | } |
357b66fd DM |
3324 | /* |
3325 | * Update path is required because previous ext4_split_extent_at() may | |
3326 | * result in split of original leaf or extent zeroout. | |
3327 | */ | |
73c384c0 | 3328 | path = ext4_find_extent(inode, map->m_lblk, ppath, flags); |
47ea3bb5 YY |
3329 | if (IS_ERR(path)) |
3330 | return PTR_ERR(path); | |
357b66fd DM |
3331 | depth = ext_depth(inode); |
3332 | ex = path[depth].p_ext; | |
a18ed359 DM |
3333 | if (!ex) { |
3334 | EXT4_ERROR_INODE(inode, "unexpected hole at %lu", | |
3335 | (unsigned long) map->m_lblk); | |
6a797d27 | 3336 | return -EFSCORRUPTED; |
a18ed359 | 3337 | } |
556615dc | 3338 | unwritten = ext4_ext_is_unwritten(ex); |
357b66fd | 3339 | split_flag1 = 0; |
47ea3bb5 YY |
3340 | |
3341 | if (map->m_lblk >= ee_block) { | |
357b66fd | 3342 | split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; |
556615dc LC |
3343 | if (unwritten) { |
3344 | split_flag1 |= EXT4_EXT_MARK_UNWRIT1; | |
357b66fd | 3345 | split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | |
556615dc | 3346 | EXT4_EXT_MARK_UNWRIT2); |
357b66fd | 3347 | } |
dfe50809 | 3348 | err = ext4_split_extent_at(handle, inode, ppath, |
47ea3bb5 YY |
3349 | map->m_lblk, split_flag1, flags); |
3350 | if (err) | |
3351 | goto out; | |
3352 | } | |
3353 | ||
3354 | ext4_ext_show_leaf(inode, path); | |
3355 | out: | |
3a225670 | 3356 | return err ? err : allocated; |
47ea3bb5 YY |
3357 | } |
3358 | ||
56055d3a | 3359 | /* |
e35fd660 | 3360 | * This function is called by ext4_ext_map_blocks() if someone tries to write |
556615dc | 3361 | * to an unwritten extent. It may result in splitting the unwritten |
25985edc | 3362 | * extent into multiple extents (up to three - one initialized and two |
556615dc | 3363 | * unwritten). |
56055d3a AA |
3364 | * There are three possibilities: |
3365 | * a> There is no split required: Entire extent should be initialized | |
3366 | * b> Splits in two extents: Write is happening at either end of the extent | |
3367 | * c> Splits in three extents: Somone is writing in middle of the extent | |
6f91bc5f EG |
3368 | * |
3369 | * Pre-conditions: | |
556615dc | 3370 | * - The extent pointed to by 'path' is unwritten. |
6f91bc5f EG |
3371 | * - The extent pointed to by 'path' contains a superset |
3372 | * of the logical span [map->m_lblk, map->m_lblk + map->m_len). | |
3373 | * | |
3374 | * Post-conditions on success: | |
3375 | * - the returned value is the number of blocks beyond map->l_lblk | |
3376 | * that are allocated and initialized. | |
3377 | * It is guaranteed to be >= map->m_len. | |
56055d3a | 3378 | */ |
725d26d3 | 3379 | static int ext4_ext_convert_to_initialized(handle_t *handle, |
e35fd660 TT |
3380 | struct inode *inode, |
3381 | struct ext4_map_blocks *map, | |
dfe50809 | 3382 | struct ext4_ext_path **ppath, |
27dd4385 | 3383 | int flags) |
56055d3a | 3384 | { |
dfe50809 | 3385 | struct ext4_ext_path *path = *ppath; |
67a5da56 | 3386 | struct ext4_sb_info *sbi; |
6f91bc5f | 3387 | struct ext4_extent_header *eh; |
667eff35 | 3388 | struct ext4_map_blocks split_map; |
4f8caa60 | 3389 | struct ext4_extent zero_ex1, zero_ex2; |
bc2d9db4 | 3390 | struct ext4_extent *ex, *abut_ex; |
21ca087a | 3391 | ext4_lblk_t ee_block, eof_block; |
bc2d9db4 LC |
3392 | unsigned int ee_len, depth, map_len = map->m_len; |
3393 | int allocated = 0, max_zeroout = 0; | |
56055d3a | 3394 | int err = 0; |
4f8caa60 | 3395 | int split_flag = EXT4_EXT_DATA_VALID2; |
21ca087a | 3396 | |
70aa1554 RH |
3397 | ext_debug(inode, "logical block %llu, max_blocks %u\n", |
3398 | (unsigned long long)map->m_lblk, map_len); | |
21ca087a | 3399 | |
67a5da56 | 3400 | sbi = EXT4_SB(inode->i_sb); |
801674f3 JK |
3401 | eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) |
3402 | >> inode->i_sb->s_blocksize_bits; | |
bc2d9db4 LC |
3403 | if (eof_block < map->m_lblk + map_len) |
3404 | eof_block = map->m_lblk + map_len; | |
56055d3a AA |
3405 | |
3406 | depth = ext_depth(inode); | |
6f91bc5f | 3407 | eh = path[depth].p_hdr; |
56055d3a AA |
3408 | ex = path[depth].p_ext; |
3409 | ee_block = le32_to_cpu(ex->ee_block); | |
3410 | ee_len = ext4_ext_get_actual_len(ex); | |
4f8caa60 JK |
3411 | zero_ex1.ee_len = 0; |
3412 | zero_ex2.ee_len = 0; | |
56055d3a | 3413 | |
6f91bc5f EG |
3414 | trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); |
3415 | ||
3416 | /* Pre-conditions */ | |
556615dc | 3417 | BUG_ON(!ext4_ext_is_unwritten(ex)); |
6f91bc5f | 3418 | BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); |
6f91bc5f EG |
3419 | |
3420 | /* | |
3421 | * Attempt to transfer newly initialized blocks from the currently | |
556615dc | 3422 | * unwritten extent to its neighbor. This is much cheaper |
6f91bc5f | 3423 | * than an insertion followed by a merge as those involve costly |
bc2d9db4 LC |
3424 | * memmove() calls. Transferring to the left is the common case in |
3425 | * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) | |
3426 | * followed by append writes. | |
6f91bc5f EG |
3427 | * |
3428 | * Limitations of the current logic: | |
bc2d9db4 | 3429 | * - L1: we do not deal with writes covering the whole extent. |
6f91bc5f EG |
3430 | * This would require removing the extent if the transfer |
3431 | * is possible. | |
bc2d9db4 | 3432 | * - L2: we only attempt to merge with an extent stored in the |
6f91bc5f EG |
3433 | * same extent tree node. |
3434 | */ | |
bc2d9db4 LC |
3435 | if ((map->m_lblk == ee_block) && |
3436 | /* See if we can merge left */ | |
3437 | (map_len < ee_len) && /*L1*/ | |
3438 | (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ | |
6f91bc5f EG |
3439 | ext4_lblk_t prev_lblk; |
3440 | ext4_fsblk_t prev_pblk, ee_pblk; | |
bc2d9db4 | 3441 | unsigned int prev_len; |
6f91bc5f | 3442 | |
bc2d9db4 LC |
3443 | abut_ex = ex - 1; |
3444 | prev_lblk = le32_to_cpu(abut_ex->ee_block); | |
3445 | prev_len = ext4_ext_get_actual_len(abut_ex); | |
3446 | prev_pblk = ext4_ext_pblock(abut_ex); | |
6f91bc5f | 3447 | ee_pblk = ext4_ext_pblock(ex); |
6f91bc5f EG |
3448 | |
3449 | /* | |
bc2d9db4 | 3450 | * A transfer of blocks from 'ex' to 'abut_ex' is allowed |
6f91bc5f | 3451 | * upon those conditions: |
bc2d9db4 LC |
3452 | * - C1: abut_ex is initialized, |
3453 | * - C2: abut_ex is logically abutting ex, | |
3454 | * - C3: abut_ex is physically abutting ex, | |
3455 | * - C4: abut_ex can receive the additional blocks without | |
6f91bc5f EG |
3456 | * overflowing the (initialized) length limit. |
3457 | */ | |
556615dc | 3458 | if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ |
6f91bc5f EG |
3459 | ((prev_lblk + prev_len) == ee_block) && /*C2*/ |
3460 | ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ | |
bc2d9db4 | 3461 | (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ |
6f91bc5f EG |
3462 | err = ext4_ext_get_access(handle, inode, path + depth); |
3463 | if (err) | |
3464 | goto out; | |
3465 | ||
3466 | trace_ext4_ext_convert_to_initialized_fastpath(inode, | |
bc2d9db4 | 3467 | map, ex, abut_ex); |
6f91bc5f | 3468 | |
bc2d9db4 LC |
3469 | /* Shift the start of ex by 'map_len' blocks */ |
3470 | ex->ee_block = cpu_to_le32(ee_block + map_len); | |
3471 | ext4_ext_store_pblock(ex, ee_pblk + map_len); | |
3472 | ex->ee_len = cpu_to_le16(ee_len - map_len); | |
556615dc | 3473 | ext4_ext_mark_unwritten(ex); /* Restore the flag */ |
6f91bc5f | 3474 | |
bc2d9db4 LC |
3475 | /* Extend abut_ex by 'map_len' blocks */ |
3476 | abut_ex->ee_len = cpu_to_le16(prev_len + map_len); | |
6f91bc5f | 3477 | |
bc2d9db4 LC |
3478 | /* Result: number of initialized blocks past m_lblk */ |
3479 | allocated = map_len; | |
3480 | } | |
3481 | } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && | |
3482 | (map_len < ee_len) && /*L1*/ | |
3483 | ex < EXT_LAST_EXTENT(eh)) { /*L2*/ | |
3484 | /* See if we can merge right */ | |
3485 | ext4_lblk_t next_lblk; | |
3486 | ext4_fsblk_t next_pblk, ee_pblk; | |
3487 | unsigned int next_len; | |
3488 | ||
3489 | abut_ex = ex + 1; | |
3490 | next_lblk = le32_to_cpu(abut_ex->ee_block); | |
3491 | next_len = ext4_ext_get_actual_len(abut_ex); | |
3492 | next_pblk = ext4_ext_pblock(abut_ex); | |
3493 | ee_pblk = ext4_ext_pblock(ex); | |
6f91bc5f | 3494 | |
bc2d9db4 LC |
3495 | /* |
3496 | * A transfer of blocks from 'ex' to 'abut_ex' is allowed | |
3497 | * upon those conditions: | |
3498 | * - C1: abut_ex is initialized, | |
3499 | * - C2: abut_ex is logically abutting ex, | |
3500 | * - C3: abut_ex is physically abutting ex, | |
3501 | * - C4: abut_ex can receive the additional blocks without | |
3502 | * overflowing the (initialized) length limit. | |
3503 | */ | |
556615dc | 3504 | if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ |
bc2d9db4 LC |
3505 | ((map->m_lblk + map_len) == next_lblk) && /*C2*/ |
3506 | ((ee_pblk + ee_len) == next_pblk) && /*C3*/ | |
3507 | (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ | |
3508 | err = ext4_ext_get_access(handle, inode, path + depth); | |
3509 | if (err) | |
3510 | goto out; | |
3511 | ||
3512 | trace_ext4_ext_convert_to_initialized_fastpath(inode, | |
3513 | map, ex, abut_ex); | |
3514 | ||
3515 | /* Shift the start of abut_ex by 'map_len' blocks */ | |
3516 | abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); | |
3517 | ext4_ext_store_pblock(abut_ex, next_pblk - map_len); | |
3518 | ex->ee_len = cpu_to_le16(ee_len - map_len); | |
556615dc | 3519 | ext4_ext_mark_unwritten(ex); /* Restore the flag */ |
bc2d9db4 LC |
3520 | |
3521 | /* Extend abut_ex by 'map_len' blocks */ | |
3522 | abut_ex->ee_len = cpu_to_le16(next_len + map_len); | |
6f91bc5f EG |
3523 | |
3524 | /* Result: number of initialized blocks past m_lblk */ | |
bc2d9db4 | 3525 | allocated = map_len; |
6f91bc5f EG |
3526 | } |
3527 | } | |
bc2d9db4 LC |
3528 | if (allocated) { |
3529 | /* Mark the block containing both extents as dirty */ | |
b60ca334 | 3530 | err = ext4_ext_dirty(handle, inode, path + depth); |
bc2d9db4 LC |
3531 | |
3532 | /* Update path to point to the right extent */ | |
3533 | path[depth].p_ext = abut_ex; | |
3534 | goto out; | |
3535 | } else | |
3536 | allocated = ee_len - (map->m_lblk - ee_block); | |
6f91bc5f | 3537 | |
667eff35 | 3538 | WARN_ON(map->m_lblk < ee_block); |
21ca087a DM |
3539 | /* |
3540 | * It is safe to convert extent to initialized via explicit | |
9e740568 | 3541 | * zeroout only if extent is fully inside i_size or new_size. |
21ca087a | 3542 | */ |
667eff35 | 3543 | split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; |
21ca087a | 3544 | |
67a5da56 ZL |
3545 | if (EXT4_EXT_MAY_ZEROOUT & split_flag) |
3546 | max_zeroout = sbi->s_extent_max_zeroout_kb >> | |
4f42f80a | 3547 | (inode->i_sb->s_blocksize_bits - 10); |
67a5da56 | 3548 | |
56055d3a | 3549 | /* |
4f8caa60 | 3550 | * five cases: |
667eff35 | 3551 | * 1. split the extent into three extents. |
4f8caa60 JK |
3552 | * 2. split the extent into two extents, zeroout the head of the first |
3553 | * extent. | |
3554 | * 3. split the extent into two extents, zeroout the tail of the second | |
3555 | * extent. | |
667eff35 | 3556 | * 4. split the extent into two extents with out zeroout. |
4f8caa60 JK |
3557 | * 5. no splitting needed, just possibly zeroout the head and / or the |
3558 | * tail of the extent. | |
56055d3a | 3559 | */ |
667eff35 YY |
3560 | split_map.m_lblk = map->m_lblk; |
3561 | split_map.m_len = map->m_len; | |
3562 | ||
4f8caa60 | 3563 | if (max_zeroout && (allocated > split_map.m_len)) { |
67a5da56 | 3564 | if (allocated <= max_zeroout) { |
4f8caa60 JK |
3565 | /* case 3 or 5 */ |
3566 | zero_ex1.ee_block = | |
3567 | cpu_to_le32(split_map.m_lblk + | |
3568 | split_map.m_len); | |
3569 | zero_ex1.ee_len = | |
3570 | cpu_to_le16(allocated - split_map.m_len); | |
3571 | ext4_ext_store_pblock(&zero_ex1, | |
3572 | ext4_ext_pblock(ex) + split_map.m_lblk + | |
3573 | split_map.m_len - ee_block); | |
3574 | err = ext4_ext_zeroout(inode, &zero_ex1); | |
56055d3a | 3575 | if (err) |
308c57cc | 3576 | goto fallback; |
667eff35 | 3577 | split_map.m_len = allocated; |
4f8caa60 JK |
3578 | } |
3579 | if (split_map.m_lblk - ee_block + split_map.m_len < | |
3580 | max_zeroout) { | |
3581 | /* case 2 or 5 */ | |
3582 | if (split_map.m_lblk != ee_block) { | |
3583 | zero_ex2.ee_block = ex->ee_block; | |
3584 | zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk - | |
667eff35 | 3585 | ee_block); |
4f8caa60 | 3586 | ext4_ext_store_pblock(&zero_ex2, |
667eff35 | 3587 | ext4_ext_pblock(ex)); |
4f8caa60 | 3588 | err = ext4_ext_zeroout(inode, &zero_ex2); |
667eff35 | 3589 | if (err) |
308c57cc | 3590 | goto fallback; |
667eff35 YY |
3591 | } |
3592 | ||
4f8caa60 | 3593 | split_map.m_len += split_map.m_lblk - ee_block; |
667eff35 | 3594 | split_map.m_lblk = ee_block; |
9b940f8e | 3595 | allocated = map->m_len; |
56055d3a AA |
3596 | } |
3597 | } | |
667eff35 | 3598 | |
308c57cc | 3599 | fallback: |
ae9e9c6a JK |
3600 | err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, |
3601 | flags); | |
3602 | if (err > 0) | |
3603 | err = 0; | |
56055d3a | 3604 | out: |
adb23551 | 3605 | /* If we have gotten a failure, don't zero out status tree */ |
4f8caa60 JK |
3606 | if (!err) { |
3607 | err = ext4_zeroout_es(inode, &zero_ex1); | |
3608 | if (!err) | |
3609 | err = ext4_zeroout_es(inode, &zero_ex2); | |
3610 | } | |
56055d3a AA |
3611 | return err ? err : allocated; |
3612 | } | |
3613 | ||
0031462b | 3614 | /* |
e35fd660 | 3615 | * This function is called by ext4_ext_map_blocks() from |
0031462b | 3616 | * ext4_get_blocks_dio_write() when DIO to write |
556615dc | 3617 | * to an unwritten extent. |
0031462b | 3618 | * |
556615dc LC |
3619 | * Writing to an unwritten extent may result in splitting the unwritten |
3620 | * extent into multiple initialized/unwritten extents (up to three) | |
0031462b | 3621 | * There are three possibilities: |
556615dc | 3622 | * a> There is no split required: Entire extent should be unwritten |
0031462b MC |
3623 | * b> Splits in two extents: Write is happening at either end of the extent |
3624 | * c> Splits in three extents: Somone is writing in middle of the extent | |
3625 | * | |
b8a86845 LC |
3626 | * This works the same way in the case of initialized -> unwritten conversion. |
3627 | * | |
0031462b | 3628 | * One of more index blocks maybe needed if the extent tree grow after |
556615dc LC |
3629 | * the unwritten extent split. To prevent ENOSPC occur at the IO |
3630 | * complete, we need to split the unwritten extent before DIO submit | |
3631 | * the IO. The unwritten extent called at this time will be split | |
3632 | * into three unwritten extent(at most). After IO complete, the part | |
0031462b MC |
3633 | * being filled will be convert to initialized by the end_io callback function |
3634 | * via ext4_convert_unwritten_extents(). | |
ba230c3f | 3635 | * |
556615dc | 3636 | * Returns the size of unwritten extent to be written on success. |
0031462b | 3637 | */ |
b8a86845 | 3638 | static int ext4_split_convert_extents(handle_t *handle, |
0031462b | 3639 | struct inode *inode, |
e35fd660 | 3640 | struct ext4_map_blocks *map, |
dfe50809 | 3641 | struct ext4_ext_path **ppath, |
0031462b MC |
3642 | int flags) |
3643 | { | |
dfe50809 | 3644 | struct ext4_ext_path *path = *ppath; |
667eff35 YY |
3645 | ext4_lblk_t eof_block; |
3646 | ext4_lblk_t ee_block; | |
3647 | struct ext4_extent *ex; | |
3648 | unsigned int ee_len; | |
3649 | int split_flag = 0, depth; | |
21ca087a | 3650 | |
70aa1554 | 3651 | ext_debug(inode, "logical block %llu, max_blocks %u\n", |
b8a86845 | 3652 | (unsigned long long)map->m_lblk, map->m_len); |
21ca087a | 3653 | |
801674f3 JK |
3654 | eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) |
3655 | >> inode->i_sb->s_blocksize_bits; | |
e35fd660 TT |
3656 | if (eof_block < map->m_lblk + map->m_len) |
3657 | eof_block = map->m_lblk + map->m_len; | |
21ca087a DM |
3658 | /* |
3659 | * It is safe to convert extent to initialized via explicit | |
e4d7f2d3 | 3660 | * zeroout only if extent is fully inside i_size or new_size. |
21ca087a | 3661 | */ |
667eff35 YY |
3662 | depth = ext_depth(inode); |
3663 | ex = path[depth].p_ext; | |
3664 | ee_block = le32_to_cpu(ex->ee_block); | |
3665 | ee_len = ext4_ext_get_actual_len(ex); | |
0031462b | 3666 | |
b8a86845 LC |
3667 | /* Convert to unwritten */ |
3668 | if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { | |
3669 | split_flag |= EXT4_EXT_DATA_VALID1; | |
3670 | /* Convert to initialized */ | |
3671 | } else if (flags & EXT4_GET_BLOCKS_CONVERT) { | |
3672 | split_flag |= ee_block + ee_len <= eof_block ? | |
3673 | EXT4_EXT_MAY_ZEROOUT : 0; | |
556615dc | 3674 | split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); |
b8a86845 | 3675 | } |
667eff35 | 3676 | flags |= EXT4_GET_BLOCKS_PRE_IO; |
dfe50809 | 3677 | return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); |
0031462b | 3678 | } |
197217a5 | 3679 | |
c7064ef1 | 3680 | static int ext4_convert_unwritten_extents_endio(handle_t *handle, |
dee1f973 DM |
3681 | struct inode *inode, |
3682 | struct ext4_map_blocks *map, | |
dfe50809 | 3683 | struct ext4_ext_path **ppath) |
0031462b | 3684 | { |
dfe50809 | 3685 | struct ext4_ext_path *path = *ppath; |
0031462b | 3686 | struct ext4_extent *ex; |
dee1f973 DM |
3687 | ext4_lblk_t ee_block; |
3688 | unsigned int ee_len; | |
0031462b MC |
3689 | int depth; |
3690 | int err = 0; | |
0031462b MC |
3691 | |
3692 | depth = ext_depth(inode); | |
0031462b | 3693 | ex = path[depth].p_ext; |
dee1f973 DM |
3694 | ee_block = le32_to_cpu(ex->ee_block); |
3695 | ee_len = ext4_ext_get_actual_len(ex); | |
0031462b | 3696 | |
70aa1554 | 3697 | ext_debug(inode, "logical block %llu, max_blocks %u\n", |
dee1f973 DM |
3698 | (unsigned long long)ee_block, ee_len); |
3699 | ||
ff95ec22 DM |
3700 | /* If extent is larger than requested it is a clear sign that we still |
3701 | * have some extent state machine issues left. So extent_split is still | |
3702 | * required. | |
3703 | * TODO: Once all related issues will be fixed this situation should be | |
3704 | * illegal. | |
3705 | */ | |
dee1f973 | 3706 | if (ee_block != map->m_lblk || ee_len > map->m_len) { |
e3d550c2 RP |
3707 | #ifdef CONFIG_EXT4_DEBUG |
3708 | ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu," | |
8d2ae1cb | 3709 | " len %u; IO logical block %llu, len %u", |
ff95ec22 DM |
3710 | inode->i_ino, (unsigned long long)ee_block, ee_len, |
3711 | (unsigned long long)map->m_lblk, map->m_len); | |
3712 | #endif | |
dfe50809 | 3713 | err = ext4_split_convert_extents(handle, inode, map, ppath, |
b8a86845 | 3714 | EXT4_GET_BLOCKS_CONVERT); |
dee1f973 | 3715 | if (err < 0) |
dfe50809 | 3716 | return err; |
ed8a1a76 | 3717 | path = ext4_find_extent(inode, map->m_lblk, ppath, 0); |
dfe50809 TT |
3718 | if (IS_ERR(path)) |
3719 | return PTR_ERR(path); | |
dee1f973 DM |
3720 | depth = ext_depth(inode); |
3721 | ex = path[depth].p_ext; | |
3722 | } | |
197217a5 | 3723 | |
0031462b MC |
3724 | err = ext4_ext_get_access(handle, inode, path + depth); |
3725 | if (err) | |
3726 | goto out; | |
3727 | /* first mark the extent as initialized */ | |
3728 | ext4_ext_mark_initialized(ex); | |
3729 | ||
197217a5 YY |
3730 | /* note: ext4_ext_correct_indexes() isn't needed here because |
3731 | * borders are not changed | |
0031462b | 3732 | */ |
ecb94f5f | 3733 | ext4_ext_try_to_merge(handle, inode, path, ex); |
197217a5 | 3734 | |
0031462b | 3735 | /* Mark modified extent as dirty */ |
ecb94f5f | 3736 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
0031462b MC |
3737 | out: |
3738 | ext4_ext_show_leaf(inode, path); | |
3739 | return err; | |
3740 | } | |
3741 | ||
b8a86845 | 3742 | static int |
e8b83d93 TT |
3743 | convert_initialized_extent(handle_t *handle, struct inode *inode, |
3744 | struct ext4_map_blocks *map, | |
29c6eaff | 3745 | struct ext4_ext_path **ppath, |
f064a9d6 | 3746 | unsigned int *allocated) |
b8a86845 | 3747 | { |
4f224b8b | 3748 | struct ext4_ext_path *path = *ppath; |
e8b83d93 TT |
3749 | struct ext4_extent *ex; |
3750 | ext4_lblk_t ee_block; | |
3751 | unsigned int ee_len; | |
3752 | int depth; | |
b8a86845 LC |
3753 | int err = 0; |
3754 | ||
3755 | /* | |
3756 | * Make sure that the extent is no bigger than we support with | |
556615dc | 3757 | * unwritten extent |
b8a86845 | 3758 | */ |
556615dc LC |
3759 | if (map->m_len > EXT_UNWRITTEN_MAX_LEN) |
3760 | map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; | |
b8a86845 | 3761 | |
e8b83d93 TT |
3762 | depth = ext_depth(inode); |
3763 | ex = path[depth].p_ext; | |
3764 | ee_block = le32_to_cpu(ex->ee_block); | |
3765 | ee_len = ext4_ext_get_actual_len(ex); | |
3766 | ||
70aa1554 | 3767 | ext_debug(inode, "logical block %llu, max_blocks %u\n", |
e8b83d93 TT |
3768 | (unsigned long long)ee_block, ee_len); |
3769 | ||
3770 | if (ee_block != map->m_lblk || ee_len > map->m_len) { | |
dfe50809 | 3771 | err = ext4_split_convert_extents(handle, inode, map, ppath, |
e8b83d93 TT |
3772 | EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); |
3773 | if (err < 0) | |
3774 | return err; | |
ed8a1a76 | 3775 | path = ext4_find_extent(inode, map->m_lblk, ppath, 0); |
e8b83d93 TT |
3776 | if (IS_ERR(path)) |
3777 | return PTR_ERR(path); | |
3778 | depth = ext_depth(inode); | |
3779 | ex = path[depth].p_ext; | |
3780 | if (!ex) { | |
3781 | EXT4_ERROR_INODE(inode, "unexpected hole at %lu", | |
3782 | (unsigned long) map->m_lblk); | |
6a797d27 | 3783 | return -EFSCORRUPTED; |
e8b83d93 TT |
3784 | } |
3785 | } | |
3786 | ||
3787 | err = ext4_ext_get_access(handle, inode, path + depth); | |
3788 | if (err) | |
3789 | return err; | |
3790 | /* first mark the extent as unwritten */ | |
3791 | ext4_ext_mark_unwritten(ex); | |
3792 | ||
3793 | /* note: ext4_ext_correct_indexes() isn't needed here because | |
3794 | * borders are not changed | |
3795 | */ | |
3796 | ext4_ext_try_to_merge(handle, inode, path, ex); | |
3797 | ||
3798 | /* Mark modified extent as dirty */ | |
3799 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); | |
3800 | if (err) | |
3801 | return err; | |
3802 | ext4_ext_show_leaf(inode, path); | |
3803 | ||
3804 | ext4_update_inode_fsync_trans(handle, inode, 1); | |
4337ecd1 | 3805 | |
b8a86845 | 3806 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
f064a9d6 EW |
3807 | if (*allocated > map->m_len) |
3808 | *allocated = map->m_len; | |
3809 | map->m_len = *allocated; | |
3810 | return 0; | |
b8a86845 LC |
3811 | } |
3812 | ||
0031462b | 3813 | static int |
556615dc | 3814 | ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, |
e35fd660 | 3815 | struct ext4_map_blocks *map, |
dfe50809 | 3816 | struct ext4_ext_path **ppath, int flags, |
e35fd660 | 3817 | unsigned int allocated, ext4_fsblk_t newblock) |
0031462b | 3818 | { |
8ec2d31b | 3819 | struct ext4_ext_path __maybe_unused *path = *ppath; |
0031462b MC |
3820 | int ret = 0; |
3821 | int err = 0; | |
3822 | ||
70aa1554 RH |
3823 | ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n", |
3824 | (unsigned long long)map->m_lblk, map->m_len, flags, | |
3825 | allocated); | |
0031462b MC |
3826 | ext4_ext_show_leaf(inode, path); |
3827 | ||
27dd4385 | 3828 | /* |
556615dc | 3829 | * When writing into unwritten space, we should not fail to |
27dd4385 LC |
3830 | * allocate metadata blocks for the new extent block if needed. |
3831 | */ | |
3832 | flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; | |
3833 | ||
556615dc | 3834 | trace_ext4_ext_handle_unwritten_extents(inode, map, flags, |
b5645534 | 3835 | allocated, newblock); |
d8990240 | 3836 | |
779e2651 | 3837 | /* get_block() before submitting IO, split the extent */ |
c8b459f4 | 3838 | if (flags & EXT4_GET_BLOCKS_PRE_IO) { |
dfe50809 TT |
3839 | ret = ext4_split_convert_extents(handle, inode, map, ppath, |
3840 | flags | EXT4_GET_BLOCKS_CONVERT); | |
779e2651 EW |
3841 | if (ret < 0) { |
3842 | err = ret; | |
3843 | goto out2; | |
3844 | } | |
3845 | /* | |
3846 | * shouldn't get a 0 return when splitting an extent unless | |
3847 | * m_len is 0 (bug) or extent has been corrupted | |
3848 | */ | |
3849 | if (unlikely(ret == 0)) { | |
3850 | EXT4_ERROR_INODE(inode, | |
3851 | "unexpected ret == 0, m_len = %u", | |
3852 | map->m_len); | |
3853 | err = -EFSCORRUPTED; | |
3854 | goto out2; | |
3855 | } | |
a25a4e1a | 3856 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
0031462b MC |
3857 | goto out; |
3858 | } | |
c7064ef1 | 3859 | /* IO end_io complete, convert the filled extent to written */ |
c8b459f4 | 3860 | if (flags & EXT4_GET_BLOCKS_CONVERT) { |
bee6cf00 | 3861 | err = ext4_convert_unwritten_extents_endio(handle, inode, map, |
dfe50809 | 3862 | ppath); |
bee6cf00 EW |
3863 | if (err < 0) |
3864 | goto out2; | |
3865 | ext4_update_inode_fsync_trans(handle, inode, 1); | |
3866 | goto map_out; | |
0031462b | 3867 | } |
bee6cf00 | 3868 | /* buffered IO cases */ |
0031462b MC |
3869 | /* |
3870 | * repeat fallocate creation request | |
3871 | * we already have an unwritten extent | |
3872 | */ | |
556615dc | 3873 | if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { |
a25a4e1a | 3874 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
0031462b | 3875 | goto map_out; |
a25a4e1a | 3876 | } |
0031462b MC |
3877 | |
3878 | /* buffered READ or buffered write_begin() lookup */ | |
3879 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { | |
3880 | /* | |
3881 | * We have blocks reserved already. We | |
3882 | * return allocated blocks so that delalloc | |
3883 | * won't do block reservation for us. But | |
3884 | * the buffer head will be unmapped so that | |
3885 | * a read from the block returns 0s. | |
3886 | */ | |
e35fd660 | 3887 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
0031462b MC |
3888 | goto out1; |
3889 | } | |
3890 | ||
be809e12 EW |
3891 | /* |
3892 | * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1. | |
3893 | * For buffered writes, at writepage time, etc. Convert a | |
3894 | * discovered unwritten extent to written. | |
3895 | */ | |
dfe50809 | 3896 | ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); |
be809e12 | 3897 | if (ret < 0) { |
0031462b MC |
3898 | err = ret; |
3899 | goto out2; | |
779e2651 | 3900 | } |
be809e12 EW |
3901 | ext4_update_inode_fsync_trans(handle, inode, 1); |
3902 | /* | |
3903 | * shouldn't get a 0 return when converting an unwritten extent | |
3904 | * unless m_len is 0 (bug) or extent has been corrupted | |
3905 | */ | |
3906 | if (unlikely(ret == 0)) { | |
3907 | EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u", | |
3908 | map->m_len); | |
3909 | err = -EFSCORRUPTED; | |
3910 | goto out2; | |
3911 | } | |
3912 | ||
779e2651 EW |
3913 | out: |
3914 | allocated = ret; | |
e35fd660 | 3915 | map->m_flags |= EXT4_MAP_NEW; |
0031462b | 3916 | map_out: |
e35fd660 | 3917 | map->m_flags |= EXT4_MAP_MAPPED; |
0031462b | 3918 | out1: |
bee6cf00 | 3919 | map->m_pblk = newblock; |
e35fd660 TT |
3920 | if (allocated > map->m_len) |
3921 | allocated = map->m_len; | |
e35fd660 | 3922 | map->m_len = allocated; |
bee6cf00 | 3923 | ext4_ext_show_leaf(inode, path); |
0031462b | 3924 | out2: |
0031462b MC |
3925 | return err ? err : allocated; |
3926 | } | |
58590b06 | 3927 | |
4d33b1ef TT |
3928 | /* |
3929 | * get_implied_cluster_alloc - check to see if the requested | |
3930 | * allocation (in the map structure) overlaps with a cluster already | |
3931 | * allocated in an extent. | |
d8990240 | 3932 | * @sb The filesystem superblock structure |
4d33b1ef TT |
3933 | * @map The requested lblk->pblk mapping |
3934 | * @ex The extent structure which might contain an implied | |
3935 | * cluster allocation | |
3936 | * | |
3937 | * This function is called by ext4_ext_map_blocks() after we failed to | |
3938 | * find blocks that were already in the inode's extent tree. Hence, | |
3939 | * we know that the beginning of the requested region cannot overlap | |
3940 | * the extent from the inode's extent tree. There are three cases we | |
3941 | * want to catch. The first is this case: | |
3942 | * | |
3943 | * |--- cluster # N--| | |
3944 | * |--- extent ---| |---- requested region ---| | |
3945 | * |==========| | |
3946 | * | |
3947 | * The second case that we need to test for is this one: | |
3948 | * | |
3949 | * |--------- cluster # N ----------------| | |
3950 | * |--- requested region --| |------- extent ----| | |
3951 | * |=======================| | |
3952 | * | |
3953 | * The third case is when the requested region lies between two extents | |
3954 | * within the same cluster: | |
3955 | * |------------- cluster # N-------------| | |
3956 | * |----- ex -----| |---- ex_right ----| | |
3957 | * |------ requested region ------| | |
3958 | * |================| | |
3959 | * | |
3960 | * In each of the above cases, we need to set the map->m_pblk and | |
3961 | * map->m_len so it corresponds to the return the extent labelled as | |
3962 | * "|====|" from cluster #N, since it is already in use for data in | |
3963 | * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to | |
3964 | * signal to ext4_ext_map_blocks() that map->m_pblk should be treated | |
3965 | * as a new "allocated" block region. Otherwise, we will return 0 and | |
3966 | * ext4_ext_map_blocks() will then allocate one or more new clusters | |
3967 | * by calling ext4_mb_new_blocks(). | |
3968 | */ | |
d8990240 | 3969 | static int get_implied_cluster_alloc(struct super_block *sb, |
4d33b1ef TT |
3970 | struct ext4_map_blocks *map, |
3971 | struct ext4_extent *ex, | |
3972 | struct ext4_ext_path *path) | |
3973 | { | |
d8990240 | 3974 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
f5a44db5 | 3975 | ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
4d33b1ef | 3976 | ext4_lblk_t ex_cluster_start, ex_cluster_end; |
14d7f3ef | 3977 | ext4_lblk_t rr_cluster_start; |
4d33b1ef TT |
3978 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); |
3979 | ext4_fsblk_t ee_start = ext4_ext_pblock(ex); | |
3980 | unsigned short ee_len = ext4_ext_get_actual_len(ex); | |
3981 | ||
3982 | /* The extent passed in that we are trying to match */ | |
3983 | ex_cluster_start = EXT4_B2C(sbi, ee_block); | |
3984 | ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); | |
3985 | ||
3986 | /* The requested region passed into ext4_map_blocks() */ | |
3987 | rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); | |
4d33b1ef TT |
3988 | |
3989 | if ((rr_cluster_start == ex_cluster_end) || | |
3990 | (rr_cluster_start == ex_cluster_start)) { | |
3991 | if (rr_cluster_start == ex_cluster_end) | |
3992 | ee_start += ee_len - 1; | |
f5a44db5 | 3993 | map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; |
4d33b1ef TT |
3994 | map->m_len = min(map->m_len, |
3995 | (unsigned) sbi->s_cluster_ratio - c_offset); | |
3996 | /* | |
3997 | * Check for and handle this case: | |
3998 | * | |
3999 | * |--------- cluster # N-------------| | |
4000 | * |------- extent ----| | |
4001 | * |--- requested region ---| | |
4002 | * |===========| | |
4003 | */ | |
4004 | ||
4005 | if (map->m_lblk < ee_block) | |
4006 | map->m_len = min(map->m_len, ee_block - map->m_lblk); | |
4007 | ||
4008 | /* | |
4009 | * Check for the case where there is already another allocated | |
4010 | * block to the right of 'ex' but before the end of the cluster. | |
4011 | * | |
4012 | * |------------- cluster # N-------------| | |
4013 | * |----- ex -----| |---- ex_right ----| | |
4014 | * |------ requested region ------| | |
4015 | * |================| | |
4016 | */ | |
4017 | if (map->m_lblk > ee_block) { | |
4018 | ext4_lblk_t next = ext4_ext_next_allocated_block(path); | |
4019 | map->m_len = min(map->m_len, next - map->m_lblk); | |
4020 | } | |
d8990240 AK |
4021 | |
4022 | trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); | |
4d33b1ef TT |
4023 | return 1; |
4024 | } | |
d8990240 AK |
4025 | |
4026 | trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); | |
4d33b1ef TT |
4027 | return 0; |
4028 | } | |
4029 | ||
4030 | ||
c278bfec | 4031 | /* |
f5ab0d1f MC |
4032 | * Block allocation/map/preallocation routine for extents based files |
4033 | * | |
4034 | * | |
c278bfec | 4035 | * Need to be called with |
0e855ac8 AK |
4036 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block |
4037 | * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) | |
f5ab0d1f | 4038 | * |
b483bb77 | 4039 | * return > 0, number of blocks already mapped/allocated |
f5ab0d1f MC |
4040 | * if create == 0 and these are pre-allocated blocks |
4041 | * buffer head is unmapped | |
4042 | * otherwise blocks are mapped | |
4043 | * | |
4044 | * return = 0, if plain look up failed (blocks have not been allocated) | |
4045 | * buffer head is unmapped | |
4046 | * | |
4047 | * return < 0, error case. | |
c278bfec | 4048 | */ |
e35fd660 TT |
4049 | int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, |
4050 | struct ext4_map_blocks *map, int flags) | |
a86c6181 AT |
4051 | { |
4052 | struct ext4_ext_path *path = NULL; | |
d7dce9e0 | 4053 | struct ext4_extent newex, *ex, ex2; |
4d33b1ef | 4054 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
8ad8d710 | 4055 | ext4_fsblk_t newblock = 0, pblk; |
34990461 | 4056 | int err = 0, depth, ret; |
4d33b1ef | 4057 | unsigned int allocated = 0, offset = 0; |
81fdbb4a | 4058 | unsigned int allocated_clusters = 0; |
c9de560d | 4059 | struct ext4_allocation_request ar; |
4d33b1ef | 4060 | ext4_lblk_t cluster_offset; |
a86c6181 | 4061 | |
70aa1554 | 4062 | ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len); |
0562e0ba | 4063 | trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); |
a86c6181 | 4064 | |
a86c6181 | 4065 | /* find extent for this block */ |
ed8a1a76 | 4066 | path = ext4_find_extent(inode, map->m_lblk, NULL, 0); |
a86c6181 AT |
4067 | if (IS_ERR(path)) { |
4068 | err = PTR_ERR(path); | |
4069 | path = NULL; | |
8ad8d710 | 4070 | goto out; |
a86c6181 AT |
4071 | } |
4072 | ||
4073 | depth = ext_depth(inode); | |
4074 | ||
4075 | /* | |
d0d856e8 RD |
4076 | * consistent leaf must not be empty; |
4077 | * this situation is possible, though, _during_ tree modification; | |
ed8a1a76 | 4078 | * this is why assert can't be put in ext4_find_extent() |
a86c6181 | 4079 | */ |
273df556 FM |
4080 | if (unlikely(path[depth].p_ext == NULL && depth != 0)) { |
4081 | EXT4_ERROR_INODE(inode, "bad extent address " | |
f70f362b TT |
4082 | "lblock: %lu, depth: %d pblock %lld", |
4083 | (unsigned long) map->m_lblk, depth, | |
4084 | path[depth].p_block); | |
6a797d27 | 4085 | err = -EFSCORRUPTED; |
8ad8d710 | 4086 | goto out; |
034fb4c9 | 4087 | } |
a86c6181 | 4088 | |
7e028976 AM |
4089 | ex = path[depth].p_ext; |
4090 | if (ex) { | |
725d26d3 | 4091 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); |
bf89d16f | 4092 | ext4_fsblk_t ee_start = ext4_ext_pblock(ex); |
a2df2a63 | 4093 | unsigned short ee_len; |
471d4011 | 4094 | |
b8a86845 | 4095 | |
471d4011 | 4096 | /* |
556615dc | 4097 | * unwritten extents are treated as holes, except that |
56055d3a | 4098 | * we split out initialized portions during a write. |
471d4011 | 4099 | */ |
a2df2a63 | 4100 | ee_len = ext4_ext_get_actual_len(ex); |
d8990240 AK |
4101 | |
4102 | trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); | |
4103 | ||
d0d856e8 | 4104 | /* if found extent covers block, simply return it */ |
e35fd660 TT |
4105 | if (in_range(map->m_lblk, ee_block, ee_len)) { |
4106 | newblock = map->m_lblk - ee_block + ee_start; | |
d0d856e8 | 4107 | /* number of remaining blocks in the extent */ |
e35fd660 | 4108 | allocated = ee_len - (map->m_lblk - ee_block); |
70aa1554 RH |
4109 | ext_debug(inode, "%u fit into %u:%d -> %llu\n", |
4110 | map->m_lblk, ee_block, ee_len, newblock); | |
56055d3a | 4111 | |
b8a86845 LC |
4112 | /* |
4113 | * If the extent is initialized check whether the | |
4114 | * caller wants to convert it to unwritten. | |
4115 | */ | |
556615dc | 4116 | if ((!ext4_ext_is_unwritten(ex)) && |
b8a86845 | 4117 | (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { |
f064a9d6 EW |
4118 | err = convert_initialized_extent(handle, |
4119 | inode, map, &path, &allocated); | |
8ad8d710 | 4120 | goto out; |
f064a9d6 | 4121 | } else if (!ext4_ext_is_unwritten(ex)) { |
8ad8d710 EW |
4122 | map->m_flags |= EXT4_MAP_MAPPED; |
4123 | map->m_pblk = newblock; | |
4124 | if (allocated > map->m_len) | |
4125 | allocated = map->m_len; | |
4126 | map->m_len = allocated; | |
4127 | ext4_ext_show_leaf(inode, path); | |
7877191c | 4128 | goto out; |
f064a9d6 | 4129 | } |
69eb33dc | 4130 | |
556615dc | 4131 | ret = ext4_ext_handle_unwritten_extents( |
dfe50809 | 4132 | handle, inode, map, &path, flags, |
7877191c | 4133 | allocated, newblock); |
ce37c429 EW |
4134 | if (ret < 0) |
4135 | err = ret; | |
4136 | else | |
4137 | allocated = ret; | |
8ad8d710 | 4138 | goto out; |
a86c6181 AT |
4139 | } |
4140 | } | |
4141 | ||
4142 | /* | |
d0d856e8 | 4143 | * requested block isn't allocated yet; |
a86c6181 AT |
4144 | * we couldn't try to create block if create flag is zero |
4145 | */ | |
c2177057 | 4146 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
140a5250 JK |
4147 | ext4_lblk_t hole_start, hole_len; |
4148 | ||
facab4d9 JK |
4149 | hole_start = map->m_lblk; |
4150 | hole_len = ext4_ext_determine_hole(inode, path, &hole_start); | |
56055d3a AA |
4151 | /* |
4152 | * put just found gap into cache to speed up | |
4153 | * subsequent requests | |
4154 | */ | |
140a5250 | 4155 | ext4_ext_put_gap_in_cache(inode, hole_start, hole_len); |
facab4d9 JK |
4156 | |
4157 | /* Update hole_len to reflect hole size after map->m_lblk */ | |
4158 | if (hole_start != map->m_lblk) | |
4159 | hole_len -= map->m_lblk - hole_start; | |
4160 | map->m_pblk = 0; | |
4161 | map->m_len = min_t(unsigned int, map->m_len, hole_len); | |
4162 | ||
8ad8d710 | 4163 | goto out; |
a86c6181 | 4164 | } |
4d33b1ef | 4165 | |
a86c6181 | 4166 | /* |
c2ea3fde | 4167 | * Okay, we need to do block allocation. |
63f57933 | 4168 | */ |
4d33b1ef | 4169 | newex.ee_block = cpu_to_le32(map->m_lblk); |
d0abafac | 4170 | cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
4d33b1ef TT |
4171 | |
4172 | /* | |
4173 | * If we are doing bigalloc, check to see if the extent returned | |
ed8a1a76 | 4174 | * by ext4_find_extent() implies a cluster we can use. |
4d33b1ef TT |
4175 | */ |
4176 | if (cluster_offset && ex && | |
d8990240 | 4177 | get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { |
4d33b1ef TT |
4178 | ar.len = allocated = map->m_len; |
4179 | newblock = map->m_pblk; | |
4180 | goto got_allocated_blocks; | |
4181 | } | |
a86c6181 | 4182 | |
c9de560d | 4183 | /* find neighbour allocated blocks */ |
e35fd660 | 4184 | ar.lleft = map->m_lblk; |
c9de560d AT |
4185 | err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); |
4186 | if (err) | |
8ad8d710 | 4187 | goto out; |
e35fd660 | 4188 | ar.lright = map->m_lblk; |
4d33b1ef | 4189 | err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); |
d7dce9e0 | 4190 | if (err < 0) |
8ad8d710 | 4191 | goto out; |
25d14f98 | 4192 | |
4d33b1ef TT |
4193 | /* Check if the extent after searching to the right implies a |
4194 | * cluster we can use. */ | |
d7dce9e0 | 4195 | if ((sbi->s_cluster_ratio > 1) && err && |
4196 | get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) { | |
4d33b1ef TT |
4197 | ar.len = allocated = map->m_len; |
4198 | newblock = map->m_pblk; | |
4199 | goto got_allocated_blocks; | |
4200 | } | |
4201 | ||
749269fa AA |
4202 | /* |
4203 | * See if request is beyond maximum number of blocks we can have in | |
4204 | * a single extent. For an initialized extent this limit is | |
556615dc LC |
4205 | * EXT_INIT_MAX_LEN and for an unwritten extent this limit is |
4206 | * EXT_UNWRITTEN_MAX_LEN. | |
749269fa | 4207 | */ |
e35fd660 | 4208 | if (map->m_len > EXT_INIT_MAX_LEN && |
556615dc | 4209 | !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) |
e35fd660 | 4210 | map->m_len = EXT_INIT_MAX_LEN; |
556615dc LC |
4211 | else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && |
4212 | (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) | |
4213 | map->m_len = EXT_UNWRITTEN_MAX_LEN; | |
749269fa | 4214 | |
e35fd660 | 4215 | /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ |
e35fd660 | 4216 | newex.ee_len = cpu_to_le16(map->m_len); |
4d33b1ef | 4217 | err = ext4_ext_check_overlap(sbi, inode, &newex, path); |
25d14f98 | 4218 | if (err) |
b939e376 | 4219 | allocated = ext4_ext_get_actual_len(&newex); |
25d14f98 | 4220 | else |
e35fd660 | 4221 | allocated = map->m_len; |
c9de560d AT |
4222 | |
4223 | /* allocate new block */ | |
4224 | ar.inode = inode; | |
e35fd660 TT |
4225 | ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); |
4226 | ar.logical = map->m_lblk; | |
4d33b1ef TT |
4227 | /* |
4228 | * We calculate the offset from the beginning of the cluster | |
4229 | * for the logical block number, since when we allocate a | |
4230 | * physical cluster, the physical block should start at the | |
4231 | * same offset from the beginning of the cluster. This is | |
4232 | * needed so that future calls to get_implied_cluster_alloc() | |
4233 | * work correctly. | |
4234 | */ | |
f5a44db5 | 4235 | offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
4d33b1ef TT |
4236 | ar.len = EXT4_NUM_B2C(sbi, offset+allocated); |
4237 | ar.goal -= offset; | |
4238 | ar.logical -= offset; | |
c9de560d AT |
4239 | if (S_ISREG(inode->i_mode)) |
4240 | ar.flags = EXT4_MB_HINT_DATA; | |
4241 | else | |
4242 | /* disable in-core preallocation for non-regular files */ | |
4243 | ar.flags = 0; | |
556b27ab VH |
4244 | if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) |
4245 | ar.flags |= EXT4_MB_HINT_NOPREALLOC; | |
e3cf5d5d TT |
4246 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
4247 | ar.flags |= EXT4_MB_DELALLOC_RESERVED; | |
c5e298ae TT |
4248 | if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) |
4249 | ar.flags |= EXT4_MB_USE_RESERVED; | |
c9de560d | 4250 | newblock = ext4_mb_new_blocks(handle, &ar, &err); |
a86c6181 | 4251 | if (!newblock) |
8ad8d710 | 4252 | goto out; |
7b415bf6 | 4253 | allocated_clusters = ar.len; |
4d33b1ef | 4254 | ar.len = EXT4_C2B(sbi, ar.len) - offset; |
70aa1554 | 4255 | ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n", |
ec8c60be | 4256 | ar.goal, newblock, ar.len, allocated); |
4d33b1ef TT |
4257 | if (ar.len > allocated) |
4258 | ar.len = allocated; | |
a86c6181 | 4259 | |
4d33b1ef | 4260 | got_allocated_blocks: |
a86c6181 | 4261 | /* try to insert new extent into found leaf and return */ |
8ad8d710 EW |
4262 | pblk = newblock + offset; |
4263 | ext4_ext_store_pblock(&newex, pblk); | |
c9de560d | 4264 | newex.ee_len = cpu_to_le16(ar.len); |
556615dc | 4265 | /* Mark unwritten */ |
34990461 | 4266 | if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { |
556615dc | 4267 | ext4_ext_mark_unwritten(&newex); |
a25a4e1a | 4268 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
8d5d02e6 | 4269 | } |
c8d46e41 | 4270 | |
4337ecd1 | 4271 | err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); |
34990461 EW |
4272 | if (err) { |
4273 | if (allocated_clusters) { | |
4274 | int fb_flags = 0; | |
82e54229 | 4275 | |
34990461 EW |
4276 | /* |
4277 | * free data blocks we just allocated. | |
4278 | * not a good idea to call discard here directly, | |
4279 | * but otherwise we'd need to call it every free(). | |
4280 | */ | |
27bc446e | 4281 | ext4_discard_preallocations(inode, 0); |
34990461 EW |
4282 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
4283 | fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE; | |
4284 | ext4_free_blocks(handle, inode, NULL, newblock, | |
4285 | EXT4_C2B(sbi, allocated_clusters), | |
4286 | fb_flags); | |
4287 | } | |
8ad8d710 | 4288 | goto out; |
315054f0 | 4289 | } |
a86c6181 | 4290 | |
5f634d06 | 4291 | /* |
b6bf9171 EW |
4292 | * Reduce the reserved cluster count to reflect successful deferred |
4293 | * allocation of delayed allocated clusters or direct allocation of | |
4294 | * clusters discovered to be delayed allocated. Once allocated, a | |
4295 | * cluster is not included in the reserved count. | |
5f634d06 | 4296 | */ |
2971148d | 4297 | if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) { |
b6bf9171 | 4298 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { |
232ec872 | 4299 | /* |
b6bf9171 EW |
4300 | * When allocating delayed allocated clusters, simply |
4301 | * reduce the reserved cluster count and claim quota | |
232ec872 LC |
4302 | */ |
4303 | ext4_da_update_reserve_space(inode, allocated_clusters, | |
4304 | 1); | |
b6bf9171 EW |
4305 | } else { |
4306 | ext4_lblk_t lblk, len; | |
4307 | unsigned int n; | |
4308 | ||
4309 | /* | |
4310 | * When allocating non-delayed allocated clusters | |
4311 | * (from fallocate, filemap, DIO, or clusters | |
4312 | * allocated when delalloc has been disabled by | |
4313 | * ext4_nonda_switch), reduce the reserved cluster | |
4314 | * count by the number of allocated clusters that | |
4315 | * have previously been delayed allocated. Quota | |
4316 | * has been claimed by ext4_mb_new_blocks() above, | |
4317 | * so release the quota reservations made for any | |
4318 | * previously delayed allocated clusters. | |
4319 | */ | |
4320 | lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk); | |
4321 | len = allocated_clusters << sbi->s_cluster_bits; | |
4322 | n = ext4_es_delayed_clu(inode, lblk, len); | |
4323 | if (n > 0) | |
4324 | ext4_da_update_reserve_space(inode, (int) n, 0); | |
7b415bf6 AK |
4325 | } |
4326 | } | |
5f634d06 | 4327 | |
b436b9be JK |
4328 | /* |
4329 | * Cache the extent and update transaction to commit on fdatasync only | |
556615dc | 4330 | * when it is _not_ an unwritten extent. |
b436b9be | 4331 | */ |
556615dc | 4332 | if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) |
b436b9be | 4333 | ext4_update_inode_fsync_trans(handle, inode, 1); |
69eb33dc | 4334 | else |
b436b9be | 4335 | ext4_update_inode_fsync_trans(handle, inode, 0); |
8ad8d710 EW |
4336 | |
4337 | map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED); | |
4338 | map->m_pblk = pblk; | |
4339 | map->m_len = ar.len; | |
4340 | allocated = map->m_len; | |
a86c6181 | 4341 | ext4_ext_show_leaf(inode, path); |
8ad8d710 | 4342 | out: |
b7ea89ad TT |
4343 | ext4_ext_drop_refs(path); |
4344 | kfree(path); | |
e861304b | 4345 | |
63b99968 TT |
4346 | trace_ext4_ext_map_blocks_exit(inode, flags, map, |
4347 | err ? err : allocated); | |
7877191c | 4348 | return err ? err : allocated; |
a86c6181 AT |
4349 | } |
4350 | ||
d0abb36d | 4351 | int ext4_ext_truncate(handle_t *handle, struct inode *inode) |
a86c6181 | 4352 | { |
a86c6181 | 4353 | struct super_block *sb = inode->i_sb; |
725d26d3 | 4354 | ext4_lblk_t last_block; |
a86c6181 AT |
4355 | int err = 0; |
4356 | ||
a86c6181 | 4357 | /* |
d0d856e8 RD |
4358 | * TODO: optimization is possible here. |
4359 | * Probably we need not scan at all, | |
4360 | * because page truncation is enough. | |
a86c6181 | 4361 | */ |
a86c6181 AT |
4362 | |
4363 | /* we have to know where to truncate from in crash case */ | |
4364 | EXT4_I(inode)->i_disksize = inode->i_size; | |
d0abb36d TT |
4365 | err = ext4_mark_inode_dirty(handle, inode); |
4366 | if (err) | |
4367 | return err; | |
a86c6181 AT |
4368 | |
4369 | last_block = (inode->i_size + sb->s_blocksize - 1) | |
4370 | >> EXT4_BLOCK_SIZE_BITS(sb); | |
8acd5e9b | 4371 | retry: |
51865fda ZL |
4372 | err = ext4_es_remove_extent(inode, last_block, |
4373 | EXT_MAX_BLOCKS - last_block); | |
94eec0fc | 4374 | if (err == -ENOMEM) { |
8acd5e9b TT |
4375 | cond_resched(); |
4376 | congestion_wait(BLK_RW_ASYNC, HZ/50); | |
4377 | goto retry; | |
4378 | } | |
d0abb36d TT |
4379 | if (err) |
4380 | return err; | |
73c384c0 TT |
4381 | retry_remove_space: |
4382 | err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); | |
4383 | if (err == -ENOMEM) { | |
4384 | cond_resched(); | |
4385 | congestion_wait(BLK_RW_ASYNC, HZ/50); | |
4386 | goto retry_remove_space; | |
4387 | } | |
4388 | return err; | |
a86c6181 AT |
4389 | } |
4390 | ||
0e8b6879 | 4391 | static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, |
c174e6d6 | 4392 | ext4_lblk_t len, loff_t new_size, |
77a2e84d | 4393 | int flags) |
0e8b6879 LC |
4394 | { |
4395 | struct inode *inode = file_inode(file); | |
4396 | handle_t *handle; | |
64395d95 | 4397 | int ret = 0, ret2 = 0, ret3 = 0; |
0e8b6879 | 4398 | int retries = 0; |
4134f5c8 | 4399 | int depth = 0; |
0e8b6879 LC |
4400 | struct ext4_map_blocks map; |
4401 | unsigned int credits; | |
c174e6d6 | 4402 | loff_t epos; |
0e8b6879 | 4403 | |
c3fe493c | 4404 | BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)); |
0e8b6879 | 4405 | map.m_lblk = offset; |
c174e6d6 | 4406 | map.m_len = len; |
0e8b6879 LC |
4407 | /* |
4408 | * Don't normalize the request if it can fit in one extent so | |
4409 | * that it doesn't get unnecessarily split into multiple | |
4410 | * extents. | |
4411 | */ | |
556615dc | 4412 | if (len <= EXT_UNWRITTEN_MAX_LEN) |
0e8b6879 LC |
4413 | flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; |
4414 | ||
4415 | /* | |
4416 | * credits to insert 1 extent into extent tree | |
4417 | */ | |
4418 | credits = ext4_chunk_trans_blocks(inode, len); | |
c3fe493c | 4419 | depth = ext_depth(inode); |
0e8b6879 LC |
4420 | |
4421 | retry: | |
3258386a | 4422 | while (len) { |
4134f5c8 LC |
4423 | /* |
4424 | * Recalculate credits when extent tree depth changes. | |
4425 | */ | |
011c88e3 | 4426 | if (depth != ext_depth(inode)) { |
4134f5c8 LC |
4427 | credits = ext4_chunk_trans_blocks(inode, len); |
4428 | depth = ext_depth(inode); | |
4429 | } | |
4430 | ||
0e8b6879 LC |
4431 | handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, |
4432 | credits); | |
4433 | if (IS_ERR(handle)) { | |
4434 | ret = PTR_ERR(handle); | |
4435 | break; | |
4436 | } | |
4437 | ret = ext4_map_blocks(handle, inode, &map, flags); | |
4438 | if (ret <= 0) { | |
4439 | ext4_debug("inode #%lu: block %u: len %u: " | |
4440 | "ext4_ext_map_blocks returned %d", | |
4441 | inode->i_ino, map.m_lblk, | |
4442 | map.m_len, ret); | |
4443 | ext4_mark_inode_dirty(handle, inode); | |
3258386a | 4444 | ext4_journal_stop(handle); |
0e8b6879 LC |
4445 | break; |
4446 | } | |
3258386a EW |
4447 | /* |
4448 | * allow a full retry cycle for any remaining allocations | |
4449 | */ | |
4450 | retries = 0; | |
c174e6d6 DM |
4451 | map.m_lblk += ret; |
4452 | map.m_len = len = len - ret; | |
4453 | epos = (loff_t)map.m_lblk << inode->i_blkbits; | |
eeca7ea1 | 4454 | inode->i_ctime = current_time(inode); |
c174e6d6 DM |
4455 | if (new_size) { |
4456 | if (epos > new_size) | |
4457 | epos = new_size; | |
4458 | if (ext4_update_inode_size(inode, epos) & 0x1) | |
4459 | inode->i_mtime = inode->i_ctime; | |
c174e6d6 | 4460 | } |
4209ae12 | 4461 | ret2 = ext4_mark_inode_dirty(handle, inode); |
c894aa97 | 4462 | ext4_update_inode_fsync_trans(handle, inode, 1); |
4209ae12 HS |
4463 | ret3 = ext4_journal_stop(handle); |
4464 | ret2 = ret3 ? ret3 : ret2; | |
4465 | if (unlikely(ret2)) | |
0e8b6879 LC |
4466 | break; |
4467 | } | |
3258386a | 4468 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
0e8b6879 | 4469 | goto retry; |
0e8b6879 LC |
4470 | |
4471 | return ret > 0 ? ret2 : ret; | |
4472 | } | |
4473 | ||
43f81677 EB |
4474 | static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len); |
4475 | ||
4476 | static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len); | |
4477 | ||
b8a86845 LC |
4478 | static long ext4_zero_range(struct file *file, loff_t offset, |
4479 | loff_t len, int mode) | |
4480 | { | |
4481 | struct inode *inode = file_inode(file); | |
4482 | handle_t *handle = NULL; | |
4483 | unsigned int max_blocks; | |
4484 | loff_t new_size = 0; | |
4485 | int ret = 0; | |
4486 | int flags; | |
69dc9536 | 4487 | int credits; |
c174e6d6 | 4488 | int partial_begin, partial_end; |
b8a86845 LC |
4489 | loff_t start, end; |
4490 | ext4_lblk_t lblk; | |
b8a86845 LC |
4491 | unsigned int blkbits = inode->i_blkbits; |
4492 | ||
4493 | trace_ext4_zero_range(inode, offset, len, mode); | |
4494 | ||
e1ee60fd NJ |
4495 | /* Call ext4_force_commit to flush all data in case of data=journal. */ |
4496 | if (ext4_should_journal_data(inode)) { | |
4497 | ret = ext4_force_commit(inode->i_sb); | |
4498 | if (ret) | |
4499 | return ret; | |
4500 | } | |
4501 | ||
b8a86845 | 4502 | /* |
e4d7f2d3 | 4503 | * Round up offset. This is not fallocate, we need to zero out |
b8a86845 LC |
4504 | * blocks, so convert interior block aligned part of the range to |
4505 | * unwritten and possibly manually zero out unaligned parts of the | |
4506 | * range. | |
4507 | */ | |
4508 | start = round_up(offset, 1 << blkbits); | |
4509 | end = round_down((offset + len), 1 << blkbits); | |
4510 | ||
4511 | if (start < offset || end > offset + len) | |
4512 | return -EINVAL; | |
c174e6d6 DM |
4513 | partial_begin = offset & ((1 << blkbits) - 1); |
4514 | partial_end = (offset + len) & ((1 << blkbits) - 1); | |
b8a86845 LC |
4515 | |
4516 | lblk = start >> blkbits; | |
4517 | max_blocks = (end >> blkbits); | |
4518 | if (max_blocks < lblk) | |
4519 | max_blocks = 0; | |
4520 | else | |
4521 | max_blocks -= lblk; | |
4522 | ||
5955102c | 4523 | inode_lock(inode); |
b8a86845 LC |
4524 | |
4525 | /* | |
80dd4978 | 4526 | * Indirect files do not support unwritten extents |
b8a86845 LC |
4527 | */ |
4528 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { | |
4529 | ret = -EOPNOTSUPP; | |
4530 | goto out_mutex; | |
4531 | } | |
4532 | ||
4533 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | |
9b02e498 | 4534 | (offset + len > inode->i_size || |
51e3ae81 | 4535 | offset + len > EXT4_I(inode)->i_disksize)) { |
b8a86845 LC |
4536 | new_size = offset + len; |
4537 | ret = inode_newsize_ok(inode, new_size); | |
4538 | if (ret) | |
4539 | goto out_mutex; | |
b8a86845 LC |
4540 | } |
4541 | ||
0f2af21a | 4542 | flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; |
0f2af21a | 4543 | |
17048e8a | 4544 | /* Wait all existing dio workers, newcomers will block on i_mutex */ |
17048e8a JK |
4545 | inode_dio_wait(inode); |
4546 | ||
0f2af21a LC |
4547 | /* Preallocate the range including the unaligned edges */ |
4548 | if (partial_begin || partial_end) { | |
4549 | ret = ext4_alloc_file_blocks(file, | |
4550 | round_down(offset, 1 << blkbits) >> blkbits, | |
4551 | (round_up((offset + len), 1 << blkbits) - | |
4552 | round_down(offset, 1 << blkbits)) >> blkbits, | |
77a2e84d | 4553 | new_size, flags); |
0f2af21a | 4554 | if (ret) |
1d39834f | 4555 | goto out_mutex; |
0f2af21a LC |
4556 | |
4557 | } | |
4558 | ||
4559 | /* Zero range excluding the unaligned edges */ | |
b8a86845 | 4560 | if (max_blocks > 0) { |
0f2af21a LC |
4561 | flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | |
4562 | EXT4_EX_NOCACHE); | |
b8a86845 | 4563 | |
ea3d7209 JK |
4564 | /* |
4565 | * Prevent page faults from reinstantiating pages we have | |
4566 | * released from page cache. | |
4567 | */ | |
4568 | down_write(&EXT4_I(inode)->i_mmap_sem); | |
430657b6 RZ |
4569 | |
4570 | ret = ext4_break_layouts(inode); | |
4571 | if (ret) { | |
4572 | up_write(&EXT4_I(inode)->i_mmap_sem); | |
4573 | goto out_mutex; | |
4574 | } | |
4575 | ||
01127848 JK |
4576 | ret = ext4_update_disksize_before_punch(inode, offset, len); |
4577 | if (ret) { | |
4578 | up_write(&EXT4_I(inode)->i_mmap_sem); | |
1d39834f | 4579 | goto out_mutex; |
01127848 | 4580 | } |
ea3d7209 JK |
4581 | /* Now release the pages and zero block aligned part of pages */ |
4582 | truncate_pagecache_range(inode, start, end - 1); | |
eeca7ea1 | 4583 | inode->i_mtime = inode->i_ctime = current_time(inode); |
ea3d7209 | 4584 | |
713e8dde | 4585 | ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, |
77a2e84d | 4586 | flags); |
ea3d7209 | 4587 | up_write(&EXT4_I(inode)->i_mmap_sem); |
713e8dde | 4588 | if (ret) |
1d39834f | 4589 | goto out_mutex; |
b8a86845 | 4590 | } |
c174e6d6 | 4591 | if (!partial_begin && !partial_end) |
1d39834f | 4592 | goto out_mutex; |
c174e6d6 | 4593 | |
69dc9536 DM |
4594 | /* |
4595 | * In worst case we have to writeout two nonadjacent unwritten | |
4596 | * blocks and update the inode | |
4597 | */ | |
4598 | credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; | |
4599 | if (ext4_should_journal_data(inode)) | |
4600 | credits += 2; | |
4601 | handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); | |
b8a86845 LC |
4602 | if (IS_ERR(handle)) { |
4603 | ret = PTR_ERR(handle); | |
4604 | ext4_std_error(inode->i_sb, ret); | |
1d39834f | 4605 | goto out_mutex; |
b8a86845 LC |
4606 | } |
4607 | ||
eeca7ea1 | 4608 | inode->i_mtime = inode->i_ctime = current_time(inode); |
4337ecd1 | 4609 | if (new_size) |
4631dbf6 | 4610 | ext4_update_inode_size(inode, new_size); |
4209ae12 HS |
4611 | ret = ext4_mark_inode_dirty(handle, inode); |
4612 | if (unlikely(ret)) | |
4613 | goto out_handle; | |
a80f7fcf | 4614 | ext4_fc_track_range(handle, inode, offset >> inode->i_sb->s_blocksize_bits, |
aa75f4d3 | 4615 | (offset + len - 1) >> inode->i_sb->s_blocksize_bits); |
b8a86845 LC |
4616 | /* Zero out partial block at the edges of the range */ |
4617 | ret = ext4_zero_partial_blocks(handle, inode, offset, len); | |
67a7d5f5 JK |
4618 | if (ret >= 0) |
4619 | ext4_update_inode_fsync_trans(handle, inode, 1); | |
b8a86845 LC |
4620 | |
4621 | if (file->f_flags & O_SYNC) | |
4622 | ext4_handle_sync(handle); | |
4623 | ||
4209ae12 | 4624 | out_handle: |
b8a86845 | 4625 | ext4_journal_stop(handle); |
b8a86845 | 4626 | out_mutex: |
5955102c | 4627 | inode_unlock(inode); |
b8a86845 LC |
4628 | return ret; |
4629 | } | |
4630 | ||
a2df2a63 | 4631 | /* |
2fe17c10 | 4632 | * preallocate space for a file. This implements ext4's fallocate file |
a2df2a63 AA |
4633 | * operation, which gets called from sys_fallocate system call. |
4634 | * For block-mapped files, posix_fallocate should fall back to the method | |
4635 | * of writing zeroes to the required new blocks (the same behavior which is | |
4636 | * expected for file systems which do not support fallocate() system call). | |
4637 | */ | |
2fe17c10 | 4638 | long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) |
a2df2a63 | 4639 | { |
496ad9aa | 4640 | struct inode *inode = file_inode(file); |
f282ac19 | 4641 | loff_t new_size = 0; |
498e5f24 | 4642 | unsigned int max_blocks; |
a2df2a63 | 4643 | int ret = 0; |
a4e5d88b | 4644 | int flags; |
0e8b6879 | 4645 | ext4_lblk_t lblk; |
0e8b6879 | 4646 | unsigned int blkbits = inode->i_blkbits; |
a2df2a63 | 4647 | |
2058f83a MH |
4648 | /* |
4649 | * Encrypted inodes can't handle collapse range or insert | |
4650 | * range since we would need to re-encrypt blocks with a | |
4651 | * different IV or XTS tweak (which are based on the logical | |
4652 | * block number). | |
2058f83a | 4653 | */ |
592ddec7 | 4654 | if (IS_ENCRYPTED(inode) && |
457b1e35 | 4655 | (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) |
2058f83a MH |
4656 | return -EOPNOTSUPP; |
4657 | ||
a4bb6b64 | 4658 | /* Return error if mode is not supported */ |
9eb79482 | 4659 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | |
331573fe NJ |
4660 | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | |
4661 | FALLOC_FL_INSERT_RANGE)) | |
a4bb6b64 AH |
4662 | return -EOPNOTSUPP; |
4663 | ||
aa75f4d3 HS |
4664 | ext4_fc_start_update(inode); |
4665 | ||
4666 | if (mode & FALLOC_FL_PUNCH_HOLE) { | |
4667 | ret = ext4_punch_hole(inode, offset, len); | |
4668 | goto exit; | |
4669 | } | |
a4bb6b64 | 4670 | |
0c8d414f TM |
4671 | ret = ext4_convert_inline_data(inode); |
4672 | if (ret) | |
aa75f4d3 | 4673 | goto exit; |
0c8d414f | 4674 | |
aa75f4d3 HS |
4675 | if (mode & FALLOC_FL_COLLAPSE_RANGE) { |
4676 | ret = ext4_collapse_range(inode, offset, len); | |
4677 | goto exit; | |
4678 | } | |
331573fe | 4679 | |
aa75f4d3 HS |
4680 | if (mode & FALLOC_FL_INSERT_RANGE) { |
4681 | ret = ext4_insert_range(inode, offset, len); | |
4682 | goto exit; | |
4683 | } | |
b8a86845 | 4684 | |
aa75f4d3 HS |
4685 | if (mode & FALLOC_FL_ZERO_RANGE) { |
4686 | ret = ext4_zero_range(file, offset, len, mode); | |
4687 | goto exit; | |
4688 | } | |
0562e0ba | 4689 | trace_ext4_fallocate_enter(inode, offset, len, mode); |
0e8b6879 | 4690 | lblk = offset >> blkbits; |
0e8b6879 | 4691 | |
518eaa63 | 4692 | max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); |
556615dc | 4693 | flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; |
0e8b6879 | 4694 | |
5955102c | 4695 | inode_lock(inode); |
f282ac19 | 4696 | |
280227a7 DI |
4697 | /* |
4698 | * We only support preallocation for extent-based files only | |
4699 | */ | |
4700 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { | |
4701 | ret = -EOPNOTSUPP; | |
4702 | goto out; | |
4703 | } | |
4704 | ||
f282ac19 | 4705 | if (!(mode & FALLOC_FL_KEEP_SIZE) && |
9b02e498 | 4706 | (offset + len > inode->i_size || |
51e3ae81 | 4707 | offset + len > EXT4_I(inode)->i_disksize)) { |
f282ac19 LC |
4708 | new_size = offset + len; |
4709 | ret = inode_newsize_ok(inode, new_size); | |
4710 | if (ret) | |
4711 | goto out; | |
6d19c42b | 4712 | } |
f282ac19 | 4713 | |
17048e8a | 4714 | /* Wait all existing dio workers, newcomers will block on i_mutex */ |
17048e8a JK |
4715 | inode_dio_wait(inode); |
4716 | ||
77a2e84d | 4717 | ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); |
0e8b6879 LC |
4718 | if (ret) |
4719 | goto out; | |
f282ac19 | 4720 | |
c174e6d6 | 4721 | if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { |
aa75f4d3 HS |
4722 | ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal, |
4723 | EXT4_I(inode)->i_sync_tid); | |
f282ac19 | 4724 | } |
f282ac19 | 4725 | out: |
5955102c | 4726 | inode_unlock(inode); |
0e8b6879 | 4727 | trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); |
aa75f4d3 HS |
4728 | exit: |
4729 | ext4_fc_stop_update(inode); | |
0e8b6879 | 4730 | return ret; |
a2df2a63 | 4731 | } |
6873fa0d | 4732 | |
0031462b MC |
4733 | /* |
4734 | * This function convert a range of blocks to written extents | |
4735 | * The caller of this function will pass the start offset and the size. | |
4736 | * all unwritten extents within this range will be converted to | |
4737 | * written extents. | |
4738 | * | |
4739 | * This function is called from the direct IO end io call back | |
4740 | * function, to convert the fallocated extents after IO is completed. | |
109f5565 | 4741 | * Returns 0 on success. |
0031462b | 4742 | */ |
6b523df4 JK |
4743 | int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, |
4744 | loff_t offset, ssize_t len) | |
0031462b | 4745 | { |
0031462b | 4746 | unsigned int max_blocks; |
4209ae12 | 4747 | int ret = 0, ret2 = 0, ret3 = 0; |
2ed88685 | 4748 | struct ext4_map_blocks map; |
a00713ea RH |
4749 | unsigned int blkbits = inode->i_blkbits; |
4750 | unsigned int credits = 0; | |
0031462b | 4751 | |
2ed88685 | 4752 | map.m_lblk = offset >> blkbits; |
518eaa63 FF |
4753 | max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); |
4754 | ||
a00713ea | 4755 | if (!handle) { |
6b523df4 JK |
4756 | /* |
4757 | * credits to insert 1 extent into extent tree | |
4758 | */ | |
4759 | credits = ext4_chunk_trans_blocks(inode, max_blocks); | |
4760 | } | |
0031462b | 4761 | while (ret >= 0 && ret < max_blocks) { |
2ed88685 TT |
4762 | map.m_lblk += ret; |
4763 | map.m_len = (max_blocks -= ret); | |
6b523df4 JK |
4764 | if (credits) { |
4765 | handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, | |
4766 | credits); | |
4767 | if (IS_ERR(handle)) { | |
4768 | ret = PTR_ERR(handle); | |
4769 | break; | |
4770 | } | |
0031462b | 4771 | } |
2ed88685 | 4772 | ret = ext4_map_blocks(handle, inode, &map, |
c7064ef1 | 4773 | EXT4_GET_BLOCKS_IO_CONVERT_EXT); |
b06acd38 LC |
4774 | if (ret <= 0) |
4775 | ext4_warning(inode->i_sb, | |
4776 | "inode #%lu: block %u: len %u: " | |
4777 | "ext4_ext_map_blocks returned %d", | |
4778 | inode->i_ino, map.m_lblk, | |
4779 | map.m_len, ret); | |
4209ae12 HS |
4780 | ret2 = ext4_mark_inode_dirty(handle, inode); |
4781 | if (credits) { | |
4782 | ret3 = ext4_journal_stop(handle); | |
4783 | if (unlikely(ret3)) | |
4784 | ret2 = ret3; | |
4785 | } | |
4786 | ||
6b523df4 | 4787 | if (ret <= 0 || ret2) |
0031462b MC |
4788 | break; |
4789 | } | |
4790 | return ret > 0 ? ret2 : ret; | |
4791 | } | |
6d9c85eb | 4792 | |
a00713ea RH |
4793 | int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end) |
4794 | { | |
d1e18b88 | 4795 | int ret = 0, err = 0; |
c8cc8816 | 4796 | struct ext4_io_end_vec *io_end_vec; |
a00713ea RH |
4797 | |
4798 | /* | |
4799 | * This is somewhat ugly but the idea is clear: When transaction is | |
4800 | * reserved, everything goes into it. Otherwise we rather start several | |
4801 | * smaller transactions for conversion of each extent separately. | |
4802 | */ | |
4803 | if (handle) { | |
4804 | handle = ext4_journal_start_reserved(handle, | |
4805 | EXT4_HT_EXT_CONVERT); | |
4806 | if (IS_ERR(handle)) | |
4807 | return PTR_ERR(handle); | |
4808 | } | |
4809 | ||
c8cc8816 RH |
4810 | list_for_each_entry(io_end_vec, &io_end->list_vec, list) { |
4811 | ret = ext4_convert_unwritten_extents(handle, io_end->inode, | |
4812 | io_end_vec->offset, | |
4813 | io_end_vec->size); | |
4814 | if (ret) | |
4815 | break; | |
4816 | } | |
4817 | ||
a00713ea RH |
4818 | if (handle) |
4819 | err = ext4_journal_stop(handle); | |
4820 | ||
4821 | return ret < 0 ? ret : err; | |
4822 | } | |
4823 | ||
d3b6f23f | 4824 | static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap) |
6873fa0d ES |
4825 | { |
4826 | __u64 physical = 0; | |
d3b6f23f | 4827 | __u64 length = 0; |
6873fa0d ES |
4828 | int blockbits = inode->i_sb->s_blocksize_bits; |
4829 | int error = 0; | |
d3b6f23f | 4830 | u16 iomap_type; |
6873fa0d ES |
4831 | |
4832 | /* in-inode? */ | |
19f5fb7a | 4833 | if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { |
6873fa0d ES |
4834 | struct ext4_iloc iloc; |
4835 | int offset; /* offset of xattr in inode */ | |
4836 | ||
4837 | error = ext4_get_inode_loc(inode, &iloc); | |
4838 | if (error) | |
4839 | return error; | |
a60697f4 | 4840 | physical = (__u64)iloc.bh->b_blocknr << blockbits; |
6873fa0d ES |
4841 | offset = EXT4_GOOD_OLD_INODE_SIZE + |
4842 | EXT4_I(inode)->i_extra_isize; | |
4843 | physical += offset; | |
4844 | length = EXT4_SB(inode->i_sb)->s_inode_size - offset; | |
fd2dd9fb | 4845 | brelse(iloc.bh); |
d3b6f23f RH |
4846 | iomap_type = IOMAP_INLINE; |
4847 | } else if (EXT4_I(inode)->i_file_acl) { /* external block */ | |
a60697f4 | 4848 | physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; |
6873fa0d | 4849 | length = inode->i_sb->s_blocksize; |
d3b6f23f RH |
4850 | iomap_type = IOMAP_MAPPED; |
4851 | } else { | |
4852 | /* no in-inode or external block for xattr, so return -ENOENT */ | |
4853 | error = -ENOENT; | |
4854 | goto out; | |
6873fa0d ES |
4855 | } |
4856 | ||
d3b6f23f RH |
4857 | iomap->addr = physical; |
4858 | iomap->offset = 0; | |
4859 | iomap->length = length; | |
4860 | iomap->type = iomap_type; | |
4861 | iomap->flags = 0; | |
4862 | out: | |
4863 | return error; | |
6873fa0d ES |
4864 | } |
4865 | ||
d3b6f23f RH |
4866 | static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset, |
4867 | loff_t length, unsigned flags, | |
4868 | struct iomap *iomap, struct iomap *srcmap) | |
6873fa0d | 4869 | { |
d3b6f23f | 4870 | int error; |
bb5835ed | 4871 | |
d3b6f23f RH |
4872 | error = ext4_iomap_xattr_fiemap(inode, iomap); |
4873 | if (error == 0 && (offset >= iomap->length)) | |
4874 | error = -ENOENT; | |
4875 | return error; | |
4876 | } | |
94191985 | 4877 | |
d3b6f23f RH |
4878 | static const struct iomap_ops ext4_iomap_xattr_ops = { |
4879 | .iomap_begin = ext4_iomap_xattr_begin, | |
4880 | }; | |
94191985 | 4881 | |
328e24ae CH |
4882 | static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len) |
4883 | { | |
4884 | u64 maxbytes; | |
4885 | ||
4886 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | |
4887 | maxbytes = inode->i_sb->s_maxbytes; | |
4888 | else | |
4889 | maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; | |
4890 | ||
4891 | if (*len == 0) | |
4892 | return -EINVAL; | |
4893 | if (start > maxbytes) | |
4894 | return -EFBIG; | |
4895 | ||
4896 | /* | |
4897 | * Shrink request scope to what the fs can actually handle. | |
4898 | */ | |
4899 | if (*len > maxbytes || (maxbytes - *len) < start) | |
4900 | *len = maxbytes - start; | |
4901 | return 0; | |
4902 | } | |
4903 | ||
03a5ed24 CH |
4904 | int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
4905 | u64 start, u64 len) | |
d3b6f23f | 4906 | { |
d3b6f23f | 4907 | int error = 0; |
94191985 | 4908 | |
7869a4a6 TT |
4909 | if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { |
4910 | error = ext4_ext_precache(inode); | |
4911 | if (error) | |
4912 | return error; | |
bb5835ed | 4913 | fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; |
7869a4a6 TT |
4914 | } |
4915 | ||
328e24ae CH |
4916 | /* |
4917 | * For bitmap files the maximum size limit could be smaller than | |
4918 | * s_maxbytes, so check len here manually instead of just relying on the | |
4919 | * generic check. | |
4920 | */ | |
4921 | error = ext4_fiemap_check_ranges(inode, start, &len); | |
4922 | if (error) | |
4923 | return error; | |
4924 | ||
6873fa0d | 4925 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { |
d3b6f23f | 4926 | fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR; |
03a5ed24 CH |
4927 | return iomap_fiemap(inode, fieinfo, start, len, |
4928 | &ext4_iomap_xattr_ops); | |
6873fa0d | 4929 | } |
9eb79482 | 4930 | |
03a5ed24 | 4931 | return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops); |
bb5835ed TT |
4932 | } |
4933 | ||
4934 | int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo, | |
4935 | __u64 start, __u64 len) | |
4936 | { | |
03a5ed24 CH |
4937 | ext4_lblk_t start_blk, len_blks; |
4938 | __u64 last_blk; | |
4939 | int error = 0; | |
4940 | ||
bb5835ed TT |
4941 | if (ext4_has_inline_data(inode)) { |
4942 | int has_inline; | |
4943 | ||
4944 | down_read(&EXT4_I(inode)->xattr_sem); | |
4945 | has_inline = ext4_has_inline_data(inode); | |
4946 | up_read(&EXT4_I(inode)->xattr_sem); | |
4947 | if (has_inline) | |
4948 | return 0; | |
4949 | } | |
4950 | ||
03a5ed24 CH |
4951 | if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { |
4952 | error = ext4_ext_precache(inode); | |
4953 | if (error) | |
4954 | return error; | |
4955 | fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; | |
4956 | } | |
4957 | ||
45dd052e | 4958 | error = fiemap_prep(inode, fieinfo, start, &len, 0); |
cddf8a2c CH |
4959 | if (error) |
4960 | return error; | |
bb5835ed | 4961 | |
03a5ed24 CH |
4962 | error = ext4_fiemap_check_ranges(inode, start, &len); |
4963 | if (error) | |
4964 | return error; | |
4965 | ||
4966 | start_blk = start >> inode->i_sb->s_blocksize_bits; | |
4967 | last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; | |
4968 | if (last_blk >= EXT_MAX_BLOCKS) | |
4969 | last_blk = EXT_MAX_BLOCKS-1; | |
4970 | len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; | |
4971 | ||
4972 | /* | |
4973 | * Walk the extent tree gathering extent information | |
4974 | * and pushing extents back to the user. | |
4975 | */ | |
4976 | return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo); | |
4977 | } | |
bb5835ed | 4978 | |
9eb79482 NJ |
4979 | /* |
4980 | * ext4_access_path: | |
4981 | * Function to access the path buffer for marking it dirty. | |
4982 | * It also checks if there are sufficient credits left in the journal handle | |
4983 | * to update path. | |
4984 | */ | |
4985 | static int | |
4986 | ext4_access_path(handle_t *handle, struct inode *inode, | |
4987 | struct ext4_ext_path *path) | |
4988 | { | |
4989 | int credits, err; | |
4990 | ||
4991 | if (!ext4_handle_valid(handle)) | |
4992 | return 0; | |
4993 | ||
4994 | /* | |
4995 | * Check if need to extend journal credits | |
4996 | * 3 for leaf, sb, and inode plus 2 (bmap and group | |
4997 | * descriptor) for each block group; assume two block | |
4998 | * groups | |
4999 | */ | |
a4130367 | 5000 | credits = ext4_writepage_trans_blocks(inode); |
83448bdf | 5001 | err = ext4_datasem_ensure_credits(handle, inode, 7, credits, 0); |
a4130367 JK |
5002 | if (err < 0) |
5003 | return err; | |
9eb79482 NJ |
5004 | |
5005 | err = ext4_ext_get_access(handle, inode, path); | |
5006 | return err; | |
5007 | } | |
5008 | ||
5009 | /* | |
5010 | * ext4_ext_shift_path_extents: | |
5011 | * Shift the extents of a path structure lying between path[depth].p_ext | |
331573fe NJ |
5012 | * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells |
5013 | * if it is right shift or left shift operation. | |
9eb79482 NJ |
5014 | */ |
5015 | static int | |
5016 | ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, | |
5017 | struct inode *inode, handle_t *handle, | |
331573fe | 5018 | enum SHIFT_DIRECTION SHIFT) |
9eb79482 NJ |
5019 | { |
5020 | int depth, err = 0; | |
5021 | struct ext4_extent *ex_start, *ex_last; | |
4756ee18 | 5022 | bool update = false; |
9eb79482 NJ |
5023 | depth = path->p_depth; |
5024 | ||
5025 | while (depth >= 0) { | |
5026 | if (depth == path->p_depth) { | |
5027 | ex_start = path[depth].p_ext; | |
5028 | if (!ex_start) | |
6a797d27 | 5029 | return -EFSCORRUPTED; |
9eb79482 NJ |
5030 | |
5031 | ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); | |
9eb79482 NJ |
5032 | |
5033 | err = ext4_access_path(handle, inode, path + depth); | |
5034 | if (err) | |
5035 | goto out; | |
5036 | ||
5037 | if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) | |
4756ee18 | 5038 | update = true; |
9eb79482 | 5039 | |
9eb79482 | 5040 | while (ex_start <= ex_last) { |
331573fe NJ |
5041 | if (SHIFT == SHIFT_LEFT) { |
5042 | le32_add_cpu(&ex_start->ee_block, | |
5043 | -shift); | |
5044 | /* Try to merge to the left. */ | |
5045 | if ((ex_start > | |
5046 | EXT_FIRST_EXTENT(path[depth].p_hdr)) | |
5047 | && | |
5048 | ext4_ext_try_to_merge_right(inode, | |
5049 | path, ex_start - 1)) | |
5050 | ex_last--; | |
5051 | else | |
5052 | ex_start++; | |
5053 | } else { | |
5054 | le32_add_cpu(&ex_last->ee_block, shift); | |
5055 | ext4_ext_try_to_merge_right(inode, path, | |
5056 | ex_last); | |
6dd834ef | 5057 | ex_last--; |
331573fe | 5058 | } |
9eb79482 NJ |
5059 | } |
5060 | err = ext4_ext_dirty(handle, inode, path + depth); | |
5061 | if (err) | |
5062 | goto out; | |
5063 | ||
5064 | if (--depth < 0 || !update) | |
5065 | break; | |
5066 | } | |
5067 | ||
5068 | /* Update index too */ | |
5069 | err = ext4_access_path(handle, inode, path + depth); | |
5070 | if (err) | |
5071 | goto out; | |
5072 | ||
331573fe NJ |
5073 | if (SHIFT == SHIFT_LEFT) |
5074 | le32_add_cpu(&path[depth].p_idx->ei_block, -shift); | |
5075 | else | |
5076 | le32_add_cpu(&path[depth].p_idx->ei_block, shift); | |
9eb79482 NJ |
5077 | err = ext4_ext_dirty(handle, inode, path + depth); |
5078 | if (err) | |
5079 | goto out; | |
5080 | ||
5081 | /* we are done if current index is not a starting index */ | |
5082 | if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) | |
5083 | break; | |
5084 | ||
5085 | depth--; | |
5086 | } | |
5087 | ||
5088 | out: | |
5089 | return err; | |
5090 | } | |
5091 | ||
5092 | /* | |
5093 | * ext4_ext_shift_extents: | |
331573fe NJ |
5094 | * All the extents which lies in the range from @start to the last allocated |
5095 | * block for the @inode are shifted either towards left or right (depending | |
5096 | * upon @SHIFT) by @shift blocks. | |
9eb79482 NJ |
5097 | * On success, 0 is returned, error otherwise. |
5098 | */ | |
5099 | static int | |
5100 | ext4_ext_shift_extents(struct inode *inode, handle_t *handle, | |
331573fe NJ |
5101 | ext4_lblk_t start, ext4_lblk_t shift, |
5102 | enum SHIFT_DIRECTION SHIFT) | |
9eb79482 NJ |
5103 | { |
5104 | struct ext4_ext_path *path; | |
5105 | int ret = 0, depth; | |
5106 | struct ext4_extent *extent; | |
331573fe | 5107 | ext4_lblk_t stop, *iterator, ex_start, ex_end; |
9eb79482 NJ |
5108 | |
5109 | /* Let path point to the last extent */ | |
03e916fa RP |
5110 | path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, |
5111 | EXT4_EX_NOCACHE); | |
9eb79482 NJ |
5112 | if (IS_ERR(path)) |
5113 | return PTR_ERR(path); | |
5114 | ||
5115 | depth = path->p_depth; | |
5116 | extent = path[depth].p_ext; | |
ee4bd0d9 TT |
5117 | if (!extent) |
5118 | goto out; | |
9eb79482 | 5119 | |
2a9b8cba | 5120 | stop = le32_to_cpu(extent->ee_block); |
9eb79482 | 5121 | |
331573fe | 5122 | /* |
349fa7d6 EB |
5123 | * For left shifts, make sure the hole on the left is big enough to |
5124 | * accommodate the shift. For right shifts, make sure the last extent | |
5125 | * won't be shifted beyond EXT_MAX_BLOCKS. | |
331573fe NJ |
5126 | */ |
5127 | if (SHIFT == SHIFT_LEFT) { | |
03e916fa RP |
5128 | path = ext4_find_extent(inode, start - 1, &path, |
5129 | EXT4_EX_NOCACHE); | |
331573fe NJ |
5130 | if (IS_ERR(path)) |
5131 | return PTR_ERR(path); | |
5132 | depth = path->p_depth; | |
5133 | extent = path[depth].p_ext; | |
5134 | if (extent) { | |
5135 | ex_start = le32_to_cpu(extent->ee_block); | |
5136 | ex_end = le32_to_cpu(extent->ee_block) + | |
5137 | ext4_ext_get_actual_len(extent); | |
5138 | } else { | |
5139 | ex_start = 0; | |
5140 | ex_end = 0; | |
5141 | } | |
9eb79482 | 5142 | |
331573fe NJ |
5143 | if ((start == ex_start && shift > ex_start) || |
5144 | (shift > start - ex_end)) { | |
349fa7d6 EB |
5145 | ret = -EINVAL; |
5146 | goto out; | |
5147 | } | |
5148 | } else { | |
5149 | if (shift > EXT_MAX_BLOCKS - | |
5150 | (stop + ext4_ext_get_actual_len(extent))) { | |
5151 | ret = -EINVAL; | |
5152 | goto out; | |
331573fe | 5153 | } |
8dc79ec4 | 5154 | } |
9eb79482 | 5155 | |
331573fe NJ |
5156 | /* |
5157 | * In case of left shift, iterator points to start and it is increased | |
5158 | * till we reach stop. In case of right shift, iterator points to stop | |
5159 | * and it is decreased till we reach start. | |
5160 | */ | |
5161 | if (SHIFT == SHIFT_LEFT) | |
5162 | iterator = &start; | |
5163 | else | |
5164 | iterator = &stop; | |
9eb79482 | 5165 | |
2a9b8cba RP |
5166 | /* |
5167 | * Its safe to start updating extents. Start and stop are unsigned, so | |
5168 | * in case of right shift if extent with 0 block is reached, iterator | |
5169 | * becomes NULL to indicate the end of the loop. | |
5170 | */ | |
5171 | while (iterator && start <= stop) { | |
03e916fa RP |
5172 | path = ext4_find_extent(inode, *iterator, &path, |
5173 | EXT4_EX_NOCACHE); | |
9eb79482 NJ |
5174 | if (IS_ERR(path)) |
5175 | return PTR_ERR(path); | |
5176 | depth = path->p_depth; | |
5177 | extent = path[depth].p_ext; | |
a18ed359 DM |
5178 | if (!extent) { |
5179 | EXT4_ERROR_INODE(inode, "unexpected hole at %lu", | |
331573fe | 5180 | (unsigned long) *iterator); |
6a797d27 | 5181 | return -EFSCORRUPTED; |
a18ed359 | 5182 | } |
331573fe NJ |
5183 | if (SHIFT == SHIFT_LEFT && *iterator > |
5184 | le32_to_cpu(extent->ee_block)) { | |
9eb79482 | 5185 | /* Hole, move to the next extent */ |
f8fb4f41 DM |
5186 | if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { |
5187 | path[depth].p_ext++; | |
5188 | } else { | |
331573fe | 5189 | *iterator = ext4_ext_next_allocated_block(path); |
f8fb4f41 | 5190 | continue; |
9eb79482 NJ |
5191 | } |
5192 | } | |
331573fe NJ |
5193 | |
5194 | if (SHIFT == SHIFT_LEFT) { | |
5195 | extent = EXT_LAST_EXTENT(path[depth].p_hdr); | |
5196 | *iterator = le32_to_cpu(extent->ee_block) + | |
5197 | ext4_ext_get_actual_len(extent); | |
5198 | } else { | |
5199 | extent = EXT_FIRST_EXTENT(path[depth].p_hdr); | |
2a9b8cba RP |
5200 | if (le32_to_cpu(extent->ee_block) > 0) |
5201 | *iterator = le32_to_cpu(extent->ee_block) - 1; | |
5202 | else | |
5203 | /* Beginning is reached, end of the loop */ | |
5204 | iterator = NULL; | |
331573fe NJ |
5205 | /* Update path extent in case we need to stop */ |
5206 | while (le32_to_cpu(extent->ee_block) < start) | |
5207 | extent++; | |
5208 | path[depth].p_ext = extent; | |
5209 | } | |
9eb79482 | 5210 | ret = ext4_ext_shift_path_extents(path, shift, inode, |
331573fe | 5211 | handle, SHIFT); |
9eb79482 NJ |
5212 | if (ret) |
5213 | break; | |
5214 | } | |
ee4bd0d9 TT |
5215 | out: |
5216 | ext4_ext_drop_refs(path); | |
5217 | kfree(path); | |
9eb79482 NJ |
5218 | return ret; |
5219 | } | |
5220 | ||
5221 | /* | |
5222 | * ext4_collapse_range: | |
5223 | * This implements the fallocate's collapse range functionality for ext4 | |
5224 | * Returns: 0 and non-zero on error. | |
5225 | */ | |
43f81677 | 5226 | static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) |
9eb79482 NJ |
5227 | { |
5228 | struct super_block *sb = inode->i_sb; | |
5229 | ext4_lblk_t punch_start, punch_stop; | |
5230 | handle_t *handle; | |
5231 | unsigned int credits; | |
a8680e0d | 5232 | loff_t new_size, ioffset; |
9eb79482 NJ |
5233 | int ret; |
5234 | ||
b9576fc3 TT |
5235 | /* |
5236 | * We need to test this early because xfstests assumes that a | |
5237 | * collapse range of (0, 1) will return EOPNOTSUPP if the file | |
5238 | * system does not support collapse range. | |
5239 | */ | |
5240 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | |
5241 | return -EOPNOTSUPP; | |
5242 | ||
9b02e498 EB |
5243 | /* Collapse range works only on fs cluster size aligned regions. */ |
5244 | if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) | |
9eb79482 NJ |
5245 | return -EINVAL; |
5246 | ||
9eb79482 NJ |
5247 | trace_ext4_collapse_range(inode, offset, len); |
5248 | ||
5249 | punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); | |
5250 | punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); | |
5251 | ||
1ce01c4a NJ |
5252 | /* Call ext4_force_commit to flush all data in case of data=journal. */ |
5253 | if (ext4_should_journal_data(inode)) { | |
5254 | ret = ext4_force_commit(inode->i_sb); | |
5255 | if (ret) | |
5256 | return ret; | |
5257 | } | |
5258 | ||
5955102c | 5259 | inode_lock(inode); |
23fffa92 LC |
5260 | /* |
5261 | * There is no need to overlap collapse range with EOF, in which case | |
5262 | * it is effectively a truncate operation | |
5263 | */ | |
9b02e498 | 5264 | if (offset + len >= inode->i_size) { |
23fffa92 LC |
5265 | ret = -EINVAL; |
5266 | goto out_mutex; | |
5267 | } | |
5268 | ||
9eb79482 NJ |
5269 | /* Currently just for extent based files */ |
5270 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { | |
5271 | ret = -EOPNOTSUPP; | |
5272 | goto out_mutex; | |
5273 | } | |
5274 | ||
9eb79482 | 5275 | /* Wait for existing dio to complete */ |
9eb79482 NJ |
5276 | inode_dio_wait(inode); |
5277 | ||
ea3d7209 JK |
5278 | /* |
5279 | * Prevent page faults from reinstantiating pages we have released from | |
5280 | * page cache. | |
5281 | */ | |
5282 | down_write(&EXT4_I(inode)->i_mmap_sem); | |
430657b6 RZ |
5283 | |
5284 | ret = ext4_break_layouts(inode); | |
5285 | if (ret) | |
5286 | goto out_mmap; | |
5287 | ||
32ebffd3 JK |
5288 | /* |
5289 | * Need to round down offset to be aligned with page size boundary | |
5290 | * for page size > block size. | |
5291 | */ | |
5292 | ioffset = round_down(offset, PAGE_SIZE); | |
5293 | /* | |
5294 | * Write tail of the last page before removed range since it will get | |
5295 | * removed from the page cache below. | |
5296 | */ | |
5297 | ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset); | |
5298 | if (ret) | |
5299 | goto out_mmap; | |
5300 | /* | |
5301 | * Write data that will be shifted to preserve them when discarding | |
5302 | * page cache below. We are also protected from pages becoming dirty | |
5303 | * by i_mmap_sem. | |
5304 | */ | |
5305 | ret = filemap_write_and_wait_range(inode->i_mapping, offset + len, | |
5306 | LLONG_MAX); | |
5307 | if (ret) | |
5308 | goto out_mmap; | |
ea3d7209 JK |
5309 | truncate_pagecache(inode, ioffset); |
5310 | ||
9eb79482 NJ |
5311 | credits = ext4_writepage_trans_blocks(inode); |
5312 | handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); | |
5313 | if (IS_ERR(handle)) { | |
5314 | ret = PTR_ERR(handle); | |
ea3d7209 | 5315 | goto out_mmap; |
9eb79482 | 5316 | } |
aa75f4d3 | 5317 | ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE); |
9eb79482 NJ |
5318 | |
5319 | down_write(&EXT4_I(inode)->i_data_sem); | |
27bc446e | 5320 | ext4_discard_preallocations(inode, 0); |
9eb79482 NJ |
5321 | |
5322 | ret = ext4_es_remove_extent(inode, punch_start, | |
2c1d2328 | 5323 | EXT_MAX_BLOCKS - punch_start); |
9eb79482 NJ |
5324 | if (ret) { |
5325 | up_write(&EXT4_I(inode)->i_data_sem); | |
5326 | goto out_stop; | |
5327 | } | |
5328 | ||
5329 | ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); | |
5330 | if (ret) { | |
5331 | up_write(&EXT4_I(inode)->i_data_sem); | |
5332 | goto out_stop; | |
5333 | } | |
27bc446e | 5334 | ext4_discard_preallocations(inode, 0); |
9eb79482 NJ |
5335 | |
5336 | ret = ext4_ext_shift_extents(inode, handle, punch_stop, | |
331573fe | 5337 | punch_stop - punch_start, SHIFT_LEFT); |
9eb79482 NJ |
5338 | if (ret) { |
5339 | up_write(&EXT4_I(inode)->i_data_sem); | |
5340 | goto out_stop; | |
5341 | } | |
5342 | ||
9b02e498 | 5343 | new_size = inode->i_size - len; |
9337d5d3 | 5344 | i_size_write(inode, new_size); |
9eb79482 NJ |
5345 | EXT4_I(inode)->i_disksize = new_size; |
5346 | ||
9eb79482 NJ |
5347 | up_write(&EXT4_I(inode)->i_data_sem); |
5348 | if (IS_SYNC(inode)) | |
5349 | ext4_handle_sync(handle); | |
eeca7ea1 | 5350 | inode->i_mtime = inode->i_ctime = current_time(inode); |
4209ae12 | 5351 | ret = ext4_mark_inode_dirty(handle, inode); |
67a7d5f5 | 5352 | ext4_update_inode_fsync_trans(handle, inode, 1); |
9eb79482 NJ |
5353 | |
5354 | out_stop: | |
5355 | ext4_journal_stop(handle); | |
aa75f4d3 | 5356 | ext4_fc_stop_ineligible(sb); |
ea3d7209 JK |
5357 | out_mmap: |
5358 | up_write(&EXT4_I(inode)->i_mmap_sem); | |
9eb79482 | 5359 | out_mutex: |
5955102c | 5360 | inode_unlock(inode); |
9eb79482 NJ |
5361 | return ret; |
5362 | } | |
fcf6b1b7 | 5363 | |
331573fe NJ |
5364 | /* |
5365 | * ext4_insert_range: | |
5366 | * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate. | |
5367 | * The data blocks starting from @offset to the EOF are shifted by @len | |
5368 | * towards right to create a hole in the @inode. Inode size is increased | |
5369 | * by len bytes. | |
5370 | * Returns 0 on success, error otherwise. | |
5371 | */ | |
43f81677 | 5372 | static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) |
331573fe NJ |
5373 | { |
5374 | struct super_block *sb = inode->i_sb; | |
5375 | handle_t *handle; | |
5376 | struct ext4_ext_path *path; | |
5377 | struct ext4_extent *extent; | |
5378 | ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; | |
5379 | unsigned int credits, ee_len; | |
5380 | int ret = 0, depth, split_flag = 0; | |
5381 | loff_t ioffset; | |
5382 | ||
5383 | /* | |
5384 | * We need to test this early because xfstests assumes that an | |
5385 | * insert range of (0, 1) will return EOPNOTSUPP if the file | |
5386 | * system does not support insert range. | |
5387 | */ | |
5388 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | |
5389 | return -EOPNOTSUPP; | |
5390 | ||
9b02e498 EB |
5391 | /* Insert range works only on fs cluster size aligned regions. */ |
5392 | if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) | |
331573fe NJ |
5393 | return -EINVAL; |
5394 | ||
331573fe NJ |
5395 | trace_ext4_insert_range(inode, offset, len); |
5396 | ||
5397 | offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); | |
5398 | len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); | |
5399 | ||
5400 | /* Call ext4_force_commit to flush all data in case of data=journal */ | |
5401 | if (ext4_should_journal_data(inode)) { | |
5402 | ret = ext4_force_commit(inode->i_sb); | |
5403 | if (ret) | |
5404 | return ret; | |
5405 | } | |
5406 | ||
5955102c | 5407 | inode_lock(inode); |
331573fe NJ |
5408 | /* Currently just for extent based files */ |
5409 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { | |
5410 | ret = -EOPNOTSUPP; | |
5411 | goto out_mutex; | |
5412 | } | |
5413 | ||
9b02e498 EB |
5414 | /* Check whether the maximum file size would be exceeded */ |
5415 | if (len > inode->i_sb->s_maxbytes - inode->i_size) { | |
331573fe NJ |
5416 | ret = -EFBIG; |
5417 | goto out_mutex; | |
5418 | } | |
5419 | ||
9b02e498 EB |
5420 | /* Offset must be less than i_size */ |
5421 | if (offset >= inode->i_size) { | |
331573fe NJ |
5422 | ret = -EINVAL; |
5423 | goto out_mutex; | |
5424 | } | |
5425 | ||
331573fe | 5426 | /* Wait for existing dio to complete */ |
331573fe NJ |
5427 | inode_dio_wait(inode); |
5428 | ||
ea3d7209 JK |
5429 | /* |
5430 | * Prevent page faults from reinstantiating pages we have released from | |
5431 | * page cache. | |
5432 | */ | |
5433 | down_write(&EXT4_I(inode)->i_mmap_sem); | |
430657b6 RZ |
5434 | |
5435 | ret = ext4_break_layouts(inode); | |
5436 | if (ret) | |
5437 | goto out_mmap; | |
5438 | ||
32ebffd3 JK |
5439 | /* |
5440 | * Need to round down to align start offset to page size boundary | |
5441 | * for page size > block size. | |
5442 | */ | |
5443 | ioffset = round_down(offset, PAGE_SIZE); | |
5444 | /* Write out all dirty pages */ | |
5445 | ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, | |
5446 | LLONG_MAX); | |
5447 | if (ret) | |
5448 | goto out_mmap; | |
ea3d7209 JK |
5449 | truncate_pagecache(inode, ioffset); |
5450 | ||
331573fe NJ |
5451 | credits = ext4_writepage_trans_blocks(inode); |
5452 | handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); | |
5453 | if (IS_ERR(handle)) { | |
5454 | ret = PTR_ERR(handle); | |
ea3d7209 | 5455 | goto out_mmap; |
331573fe | 5456 | } |
aa75f4d3 | 5457 | ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE); |
331573fe NJ |
5458 | |
5459 | /* Expand file to avoid data loss if there is error while shifting */ | |
5460 | inode->i_size += len; | |
5461 | EXT4_I(inode)->i_disksize += len; | |
eeca7ea1 | 5462 | inode->i_mtime = inode->i_ctime = current_time(inode); |
331573fe NJ |
5463 | ret = ext4_mark_inode_dirty(handle, inode); |
5464 | if (ret) | |
5465 | goto out_stop; | |
5466 | ||
5467 | down_write(&EXT4_I(inode)->i_data_sem); | |
27bc446e | 5468 | ext4_discard_preallocations(inode, 0); |
331573fe NJ |
5469 | |
5470 | path = ext4_find_extent(inode, offset_lblk, NULL, 0); | |
5471 | if (IS_ERR(path)) { | |
5472 | up_write(&EXT4_I(inode)->i_data_sem); | |
5473 | goto out_stop; | |
5474 | } | |
5475 | ||
5476 | depth = ext_depth(inode); | |
5477 | extent = path[depth].p_ext; | |
5478 | if (extent) { | |
5479 | ee_start_lblk = le32_to_cpu(extent->ee_block); | |
5480 | ee_len = ext4_ext_get_actual_len(extent); | |
5481 | ||
5482 | /* | |
5483 | * If offset_lblk is not the starting block of extent, split | |
5484 | * the extent @offset_lblk | |
5485 | */ | |
5486 | if ((offset_lblk > ee_start_lblk) && | |
5487 | (offset_lblk < (ee_start_lblk + ee_len))) { | |
5488 | if (ext4_ext_is_unwritten(extent)) | |
5489 | split_flag = EXT4_EXT_MARK_UNWRIT1 | | |
5490 | EXT4_EXT_MARK_UNWRIT2; | |
5491 | ret = ext4_split_extent_at(handle, inode, &path, | |
5492 | offset_lblk, split_flag, | |
5493 | EXT4_EX_NOCACHE | | |
5494 | EXT4_GET_BLOCKS_PRE_IO | | |
5495 | EXT4_GET_BLOCKS_METADATA_NOFAIL); | |
5496 | } | |
5497 | ||
5498 | ext4_ext_drop_refs(path); | |
5499 | kfree(path); | |
5500 | if (ret < 0) { | |
5501 | up_write(&EXT4_I(inode)->i_data_sem); | |
5502 | goto out_stop; | |
5503 | } | |
edf15aa1 FF |
5504 | } else { |
5505 | ext4_ext_drop_refs(path); | |
5506 | kfree(path); | |
331573fe NJ |
5507 | } |
5508 | ||
5509 | ret = ext4_es_remove_extent(inode, offset_lblk, | |
5510 | EXT_MAX_BLOCKS - offset_lblk); | |
5511 | if (ret) { | |
5512 | up_write(&EXT4_I(inode)->i_data_sem); | |
5513 | goto out_stop; | |
5514 | } | |
5515 | ||
5516 | /* | |
5517 | * if offset_lblk lies in a hole which is at start of file, use | |
5518 | * ee_start_lblk to shift extents | |
5519 | */ | |
5520 | ret = ext4_ext_shift_extents(inode, handle, | |
5521 | ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk, | |
5522 | len_lblk, SHIFT_RIGHT); | |
5523 | ||
5524 | up_write(&EXT4_I(inode)->i_data_sem); | |
5525 | if (IS_SYNC(inode)) | |
5526 | ext4_handle_sync(handle); | |
67a7d5f5 JK |
5527 | if (ret >= 0) |
5528 | ext4_update_inode_fsync_trans(handle, inode, 1); | |
331573fe NJ |
5529 | |
5530 | out_stop: | |
5531 | ext4_journal_stop(handle); | |
aa75f4d3 | 5532 | ext4_fc_stop_ineligible(sb); |
ea3d7209 JK |
5533 | out_mmap: |
5534 | up_write(&EXT4_I(inode)->i_mmap_sem); | |
331573fe | 5535 | out_mutex: |
5955102c | 5536 | inode_unlock(inode); |
331573fe NJ |
5537 | return ret; |
5538 | } | |
5539 | ||
fcf6b1b7 | 5540 | /** |
c60990b3 TT |
5541 | * ext4_swap_extents() - Swap extents between two inodes |
5542 | * @handle: handle for this transaction | |
fcf6b1b7 DM |
5543 | * @inode1: First inode |
5544 | * @inode2: Second inode | |
5545 | * @lblk1: Start block for first inode | |
5546 | * @lblk2: Start block for second inode | |
5547 | * @count: Number of blocks to swap | |
dcae058a | 5548 | * @unwritten: Mark second inode's extents as unwritten after swap |
fcf6b1b7 DM |
5549 | * @erp: Pointer to save error value |
5550 | * | |
5551 | * This helper routine does exactly what is promise "swap extents". All other | |
5552 | * stuff such as page-cache locking consistency, bh mapping consistency or | |
5553 | * extent's data copying must be performed by caller. | |
5554 | * Locking: | |
5555 | * i_mutex is held for both inodes | |
5556 | * i_data_sem is locked for write for both inodes | |
5557 | * Assumptions: | |
5558 | * All pages from requested range are locked for both inodes | |
5559 | */ | |
5560 | int | |
5561 | ext4_swap_extents(handle_t *handle, struct inode *inode1, | |
dcae058a | 5562 | struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, |
fcf6b1b7 DM |
5563 | ext4_lblk_t count, int unwritten, int *erp) |
5564 | { | |
5565 | struct ext4_ext_path *path1 = NULL; | |
5566 | struct ext4_ext_path *path2 = NULL; | |
5567 | int replaced_count = 0; | |
5568 | ||
5569 | BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); | |
5570 | BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); | |
5955102c AV |
5571 | BUG_ON(!inode_is_locked(inode1)); |
5572 | BUG_ON(!inode_is_locked(inode2)); | |
fcf6b1b7 DM |
5573 | |
5574 | *erp = ext4_es_remove_extent(inode1, lblk1, count); | |
19008f6d | 5575 | if (unlikely(*erp)) |
fcf6b1b7 DM |
5576 | return 0; |
5577 | *erp = ext4_es_remove_extent(inode2, lblk2, count); | |
19008f6d | 5578 | if (unlikely(*erp)) |
fcf6b1b7 DM |
5579 | return 0; |
5580 | ||
5581 | while (count) { | |
5582 | struct ext4_extent *ex1, *ex2, tmp_ex; | |
5583 | ext4_lblk_t e1_blk, e2_blk; | |
5584 | int e1_len, e2_len, len; | |
5585 | int split = 0; | |
5586 | ||
ed8a1a76 | 5587 | path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); |
a1c83681 | 5588 | if (IS_ERR(path1)) { |
fcf6b1b7 | 5589 | *erp = PTR_ERR(path1); |
19008f6d TT |
5590 | path1 = NULL; |
5591 | finish: | |
5592 | count = 0; | |
5593 | goto repeat; | |
fcf6b1b7 | 5594 | } |
ed8a1a76 | 5595 | path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); |
a1c83681 | 5596 | if (IS_ERR(path2)) { |
fcf6b1b7 | 5597 | *erp = PTR_ERR(path2); |
19008f6d TT |
5598 | path2 = NULL; |
5599 | goto finish; | |
fcf6b1b7 DM |
5600 | } |
5601 | ex1 = path1[path1->p_depth].p_ext; | |
5602 | ex2 = path2[path2->p_depth].p_ext; | |
e4d7f2d3 | 5603 | /* Do we have something to swap ? */ |
fcf6b1b7 | 5604 | if (unlikely(!ex2 || !ex1)) |
19008f6d | 5605 | goto finish; |
fcf6b1b7 DM |
5606 | |
5607 | e1_blk = le32_to_cpu(ex1->ee_block); | |
5608 | e2_blk = le32_to_cpu(ex2->ee_block); | |
5609 | e1_len = ext4_ext_get_actual_len(ex1); | |
5610 | e2_len = ext4_ext_get_actual_len(ex2); | |
5611 | ||
5612 | /* Hole handling */ | |
5613 | if (!in_range(lblk1, e1_blk, e1_len) || | |
5614 | !in_range(lblk2, e2_blk, e2_len)) { | |
5615 | ext4_lblk_t next1, next2; | |
5616 | ||
5617 | /* if hole after extent, then go to next extent */ | |
5618 | next1 = ext4_ext_next_allocated_block(path1); | |
5619 | next2 = ext4_ext_next_allocated_block(path2); | |
5620 | /* If hole before extent, then shift to that extent */ | |
5621 | if (e1_blk > lblk1) | |
5622 | next1 = e1_blk; | |
5623 | if (e2_blk > lblk2) | |
4e562013 | 5624 | next2 = e2_blk; |
fcf6b1b7 DM |
5625 | /* Do we have something to swap */ |
5626 | if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) | |
19008f6d | 5627 | goto finish; |
fcf6b1b7 DM |
5628 | /* Move to the rightest boundary */ |
5629 | len = next1 - lblk1; | |
5630 | if (len < next2 - lblk2) | |
5631 | len = next2 - lblk2; | |
5632 | if (len > count) | |
5633 | len = count; | |
5634 | lblk1 += len; | |
5635 | lblk2 += len; | |
5636 | count -= len; | |
5637 | goto repeat; | |
5638 | } | |
5639 | ||
5640 | /* Prepare left boundary */ | |
5641 | if (e1_blk < lblk1) { | |
5642 | split = 1; | |
5643 | *erp = ext4_force_split_extent_at(handle, inode1, | |
dfe50809 | 5644 | &path1, lblk1, 0); |
19008f6d TT |
5645 | if (unlikely(*erp)) |
5646 | goto finish; | |
fcf6b1b7 DM |
5647 | } |
5648 | if (e2_blk < lblk2) { | |
5649 | split = 1; | |
5650 | *erp = ext4_force_split_extent_at(handle, inode2, | |
dfe50809 | 5651 | &path2, lblk2, 0); |
19008f6d TT |
5652 | if (unlikely(*erp)) |
5653 | goto finish; | |
fcf6b1b7 | 5654 | } |
dfe50809 | 5655 | /* ext4_split_extent_at() may result in leaf extent split, |
fcf6b1b7 DM |
5656 | * path must to be revalidated. */ |
5657 | if (split) | |
5658 | goto repeat; | |
5659 | ||
5660 | /* Prepare right boundary */ | |
5661 | len = count; | |
5662 | if (len > e1_blk + e1_len - lblk1) | |
5663 | len = e1_blk + e1_len - lblk1; | |
5664 | if (len > e2_blk + e2_len - lblk2) | |
5665 | len = e2_blk + e2_len - lblk2; | |
5666 | ||
5667 | if (len != e1_len) { | |
5668 | split = 1; | |
5669 | *erp = ext4_force_split_extent_at(handle, inode1, | |
dfe50809 | 5670 | &path1, lblk1 + len, 0); |
19008f6d TT |
5671 | if (unlikely(*erp)) |
5672 | goto finish; | |
fcf6b1b7 DM |
5673 | } |
5674 | if (len != e2_len) { | |
5675 | split = 1; | |
5676 | *erp = ext4_force_split_extent_at(handle, inode2, | |
dfe50809 | 5677 | &path2, lblk2 + len, 0); |
fcf6b1b7 | 5678 | if (*erp) |
19008f6d | 5679 | goto finish; |
fcf6b1b7 | 5680 | } |
dfe50809 | 5681 | /* ext4_split_extent_at() may result in leaf extent split, |
fcf6b1b7 DM |
5682 | * path must to be revalidated. */ |
5683 | if (split) | |
5684 | goto repeat; | |
5685 | ||
5686 | BUG_ON(e2_len != e1_len); | |
5687 | *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); | |
19008f6d TT |
5688 | if (unlikely(*erp)) |
5689 | goto finish; | |
fcf6b1b7 | 5690 | *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); |
19008f6d TT |
5691 | if (unlikely(*erp)) |
5692 | goto finish; | |
fcf6b1b7 DM |
5693 | |
5694 | /* Both extents are fully inside boundaries. Swap it now */ | |
5695 | tmp_ex = *ex1; | |
5696 | ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); | |
5697 | ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); | |
5698 | ex1->ee_len = cpu_to_le16(e2_len); | |
5699 | ex2->ee_len = cpu_to_le16(e1_len); | |
5700 | if (unwritten) | |
5701 | ext4_ext_mark_unwritten(ex2); | |
5702 | if (ext4_ext_is_unwritten(&tmp_ex)) | |
5703 | ext4_ext_mark_unwritten(ex1); | |
5704 | ||
5705 | ext4_ext_try_to_merge(handle, inode2, path2, ex2); | |
5706 | ext4_ext_try_to_merge(handle, inode1, path1, ex1); | |
5707 | *erp = ext4_ext_dirty(handle, inode2, path2 + | |
5708 | path2->p_depth); | |
19008f6d TT |
5709 | if (unlikely(*erp)) |
5710 | goto finish; | |
fcf6b1b7 DM |
5711 | *erp = ext4_ext_dirty(handle, inode1, path1 + |
5712 | path1->p_depth); | |
5713 | /* | |
5714 | * Looks scarry ah..? second inode already points to new blocks, | |
5715 | * and it was successfully dirtied. But luckily error may happen | |
5716 | * only due to journal error, so full transaction will be | |
5717 | * aborted anyway. | |
5718 | */ | |
19008f6d TT |
5719 | if (unlikely(*erp)) |
5720 | goto finish; | |
fcf6b1b7 DM |
5721 | lblk1 += len; |
5722 | lblk2 += len; | |
5723 | replaced_count += len; | |
5724 | count -= len; | |
5725 | ||
5726 | repeat: | |
b7ea89ad TT |
5727 | ext4_ext_drop_refs(path1); |
5728 | kfree(path1); | |
5729 | ext4_ext_drop_refs(path2); | |
5730 | kfree(path2); | |
5731 | path1 = path2 = NULL; | |
fcf6b1b7 | 5732 | } |
fcf6b1b7 DM |
5733 | return replaced_count; |
5734 | } | |
0b02f4c0 EW |
5735 | |
5736 | /* | |
5737 | * ext4_clu_mapped - determine whether any block in a logical cluster has | |
5738 | * been mapped to a physical cluster | |
5739 | * | |
5740 | * @inode - file containing the logical cluster | |
5741 | * @lclu - logical cluster of interest | |
5742 | * | |
5743 | * Returns 1 if any block in the logical cluster is mapped, signifying | |
5744 | * that a physical cluster has been allocated for it. Otherwise, | |
5745 | * returns 0. Can also return negative error codes. Derived from | |
5746 | * ext4_ext_map_blocks(). | |
5747 | */ | |
5748 | int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) | |
5749 | { | |
5750 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | |
5751 | struct ext4_ext_path *path; | |
5752 | int depth, mapped = 0, err = 0; | |
5753 | struct ext4_extent *extent; | |
5754 | ext4_lblk_t first_lblk, first_lclu, last_lclu; | |
5755 | ||
5756 | /* search for the extent closest to the first block in the cluster */ | |
5757 | path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); | |
5758 | if (IS_ERR(path)) { | |
5759 | err = PTR_ERR(path); | |
5760 | path = NULL; | |
5761 | goto out; | |
5762 | } | |
5763 | ||
5764 | depth = ext_depth(inode); | |
5765 | ||
5766 | /* | |
5767 | * A consistent leaf must not be empty. This situation is possible, | |
5768 | * though, _during_ tree modification, and it's why an assert can't | |
5769 | * be put in ext4_find_extent(). | |
5770 | */ | |
5771 | if (unlikely(path[depth].p_ext == NULL && depth != 0)) { | |
5772 | EXT4_ERROR_INODE(inode, | |
5773 | "bad extent address - lblock: %lu, depth: %d, pblock: %lld", | |
5774 | (unsigned long) EXT4_C2B(sbi, lclu), | |
5775 | depth, path[depth].p_block); | |
5776 | err = -EFSCORRUPTED; | |
5777 | goto out; | |
5778 | } | |
5779 | ||
5780 | extent = path[depth].p_ext; | |
5781 | ||
5782 | /* can't be mapped if the extent tree is empty */ | |
5783 | if (extent == NULL) | |
5784 | goto out; | |
5785 | ||
5786 | first_lblk = le32_to_cpu(extent->ee_block); | |
5787 | first_lclu = EXT4_B2C(sbi, first_lblk); | |
5788 | ||
5789 | /* | |
5790 | * Three possible outcomes at this point - found extent spanning | |
5791 | * the target cluster, to the left of the target cluster, or to the | |
5792 | * right of the target cluster. The first two cases are handled here. | |
5793 | * The last case indicates the target cluster is not mapped. | |
5794 | */ | |
5795 | if (lclu >= first_lclu) { | |
5796 | last_lclu = EXT4_B2C(sbi, first_lblk + | |
5797 | ext4_ext_get_actual_len(extent) - 1); | |
5798 | if (lclu <= last_lclu) { | |
5799 | mapped = 1; | |
5800 | } else { | |
5801 | first_lblk = ext4_ext_next_allocated_block(path); | |
5802 | first_lclu = EXT4_B2C(sbi, first_lblk); | |
5803 | if (lclu == first_lclu) | |
5804 | mapped = 1; | |
5805 | } | |
5806 | } | |
5807 | ||
5808 | out: | |
5809 | ext4_ext_drop_refs(path); | |
5810 | kfree(path); | |
5811 | ||
5812 | return err ? err : mapped; | |
5813 | } | |
8016e29f HS |
5814 | |
5815 | /* | |
5816 | * Updates physical block address and unwritten status of extent | |
5817 | * starting at lblk start and of len. If such an extent doesn't exist, | |
5818 | * this function splits the extent tree appropriately to create an | |
5819 | * extent like this. This function is called in the fast commit | |
5820 | * replay path. Returns 0 on success and error on failure. | |
5821 | */ | |
5822 | int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start, | |
5823 | int len, int unwritten, ext4_fsblk_t pblk) | |
5824 | { | |
5825 | struct ext4_ext_path *path = NULL, *ppath; | |
5826 | struct ext4_extent *ex; | |
5827 | int ret; | |
5828 | ||
5829 | path = ext4_find_extent(inode, start, NULL, 0); | |
bc18546b DC |
5830 | if (IS_ERR(path)) |
5831 | return PTR_ERR(path); | |
8016e29f HS |
5832 | ex = path[path->p_depth].p_ext; |
5833 | if (!ex) { | |
5834 | ret = -EFSCORRUPTED; | |
5835 | goto out; | |
5836 | } | |
5837 | ||
5838 | if (le32_to_cpu(ex->ee_block) != start || | |
5839 | ext4_ext_get_actual_len(ex) != len) { | |
5840 | /* We need to split this extent to match our extent first */ | |
5841 | ppath = path; | |
5842 | down_write(&EXT4_I(inode)->i_data_sem); | |
5843 | ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1); | |
5844 | up_write(&EXT4_I(inode)->i_data_sem); | |
5845 | if (ret) | |
5846 | goto out; | |
5847 | kfree(path); | |
5848 | path = ext4_find_extent(inode, start, NULL, 0); | |
5849 | if (IS_ERR(path)) | |
5850 | return -1; | |
5851 | ppath = path; | |
5852 | ex = path[path->p_depth].p_ext; | |
5853 | WARN_ON(le32_to_cpu(ex->ee_block) != start); | |
5854 | if (ext4_ext_get_actual_len(ex) != len) { | |
5855 | down_write(&EXT4_I(inode)->i_data_sem); | |
5856 | ret = ext4_force_split_extent_at(NULL, inode, &ppath, | |
5857 | start + len, 1); | |
5858 | up_write(&EXT4_I(inode)->i_data_sem); | |
5859 | if (ret) | |
5860 | goto out; | |
5861 | kfree(path); | |
5862 | path = ext4_find_extent(inode, start, NULL, 0); | |
5863 | if (IS_ERR(path)) | |
5864 | return -EINVAL; | |
5865 | ex = path[path->p_depth].p_ext; | |
5866 | } | |
5867 | } | |
5868 | if (unwritten) | |
5869 | ext4_ext_mark_unwritten(ex); | |
5870 | else | |
5871 | ext4_ext_mark_initialized(ex); | |
5872 | ext4_ext_store_pblock(ex, pblk); | |
5873 | down_write(&EXT4_I(inode)->i_data_sem); | |
5874 | ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]); | |
5875 | up_write(&EXT4_I(inode)->i_data_sem); | |
5876 | out: | |
5877 | ext4_ext_drop_refs(path); | |
5878 | kfree(path); | |
5879 | ext4_mark_inode_dirty(NULL, inode); | |
5880 | return ret; | |
5881 | } | |
5882 | ||
5883 | /* Try to shrink the extent tree */ | |
5884 | void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end) | |
5885 | { | |
5886 | struct ext4_ext_path *path = NULL; | |
5887 | struct ext4_extent *ex; | |
5888 | ext4_lblk_t old_cur, cur = 0; | |
5889 | ||
5890 | while (cur < end) { | |
5891 | path = ext4_find_extent(inode, cur, NULL, 0); | |
5892 | if (IS_ERR(path)) | |
5893 | return; | |
5894 | ex = path[path->p_depth].p_ext; | |
5895 | if (!ex) { | |
5896 | ext4_ext_drop_refs(path); | |
5897 | kfree(path); | |
5898 | ext4_mark_inode_dirty(NULL, inode); | |
5899 | return; | |
5900 | } | |
5901 | old_cur = cur; | |
5902 | cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); | |
5903 | if (cur <= old_cur) | |
5904 | cur = old_cur + 1; | |
5905 | ext4_ext_try_to_merge(NULL, inode, path, ex); | |
5906 | down_write(&EXT4_I(inode)->i_data_sem); | |
5907 | ext4_ext_dirty(NULL, inode, &path[path->p_depth]); | |
5908 | up_write(&EXT4_I(inode)->i_data_sem); | |
5909 | ext4_mark_inode_dirty(NULL, inode); | |
5910 | ext4_ext_drop_refs(path); | |
5911 | kfree(path); | |
5912 | } | |
5913 | } | |
5914 | ||
5915 | /* Check if *cur is a hole and if it is, skip it */ | |
1fd95c05 | 5916 | static int skip_hole(struct inode *inode, ext4_lblk_t *cur) |
8016e29f HS |
5917 | { |
5918 | int ret; | |
5919 | struct ext4_map_blocks map; | |
5920 | ||
5921 | map.m_lblk = *cur; | |
5922 | map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur; | |
5923 | ||
5924 | ret = ext4_map_blocks(NULL, inode, &map, 0); | |
1fd95c05 TT |
5925 | if (ret < 0) |
5926 | return ret; | |
8016e29f | 5927 | if (ret != 0) |
1fd95c05 | 5928 | return 0; |
8016e29f | 5929 | *cur = *cur + map.m_len; |
1fd95c05 | 5930 | return 0; |
8016e29f HS |
5931 | } |
5932 | ||
5933 | /* Count number of blocks used by this inode and update i_blocks */ | |
5934 | int ext4_ext_replay_set_iblocks(struct inode *inode) | |
5935 | { | |
5936 | struct ext4_ext_path *path = NULL, *path2 = NULL; | |
5937 | struct ext4_extent *ex; | |
5938 | ext4_lblk_t cur = 0, end; | |
5939 | int numblks = 0, i, ret = 0; | |
5940 | ext4_fsblk_t cmp1, cmp2; | |
5941 | struct ext4_map_blocks map; | |
5942 | ||
5943 | /* Determin the size of the file first */ | |
5944 | path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, | |
5945 | EXT4_EX_NOCACHE); | |
5946 | if (IS_ERR(path)) | |
5947 | return PTR_ERR(path); | |
5948 | ex = path[path->p_depth].p_ext; | |
5949 | if (!ex) { | |
5950 | ext4_ext_drop_refs(path); | |
5951 | kfree(path); | |
5952 | goto out; | |
5953 | } | |
5954 | end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); | |
5955 | ext4_ext_drop_refs(path); | |
5956 | kfree(path); | |
5957 | ||
5958 | /* Count the number of data blocks */ | |
5959 | cur = 0; | |
5960 | while (cur < end) { | |
5961 | map.m_lblk = cur; | |
5962 | map.m_len = end - cur; | |
5963 | ret = ext4_map_blocks(NULL, inode, &map, 0); | |
5964 | if (ret < 0) | |
5965 | break; | |
5966 | if (ret > 0) | |
5967 | numblks += ret; | |
5968 | cur = cur + map.m_len; | |
5969 | } | |
5970 | ||
5971 | /* | |
5972 | * Count the number of extent tree blocks. We do it by looking up | |
5973 | * two successive extents and determining the difference between | |
5974 | * their paths. When path is different for 2 successive extents | |
5975 | * we compare the blocks in the path at each level and increment | |
5976 | * iblocks by total number of differences found. | |
5977 | */ | |
5978 | cur = 0; | |
1fd95c05 TT |
5979 | ret = skip_hole(inode, &cur); |
5980 | if (ret < 0) | |
5981 | goto out; | |
8016e29f HS |
5982 | path = ext4_find_extent(inode, cur, NULL, 0); |
5983 | if (IS_ERR(path)) | |
5984 | goto out; | |
5985 | numblks += path->p_depth; | |
5986 | ext4_ext_drop_refs(path); | |
5987 | kfree(path); | |
5988 | while (cur < end) { | |
5989 | path = ext4_find_extent(inode, cur, NULL, 0); | |
5990 | if (IS_ERR(path)) | |
5991 | break; | |
5992 | ex = path[path->p_depth].p_ext; | |
5993 | if (!ex) { | |
5994 | ext4_ext_drop_refs(path); | |
5995 | kfree(path); | |
5996 | return 0; | |
5997 | } | |
5998 | cur = max(cur + 1, le32_to_cpu(ex->ee_block) + | |
5999 | ext4_ext_get_actual_len(ex)); | |
1fd95c05 TT |
6000 | ret = skip_hole(inode, &cur); |
6001 | if (ret < 0) { | |
6002 | ext4_ext_drop_refs(path); | |
6003 | kfree(path); | |
6004 | break; | |
6005 | } | |
8016e29f HS |
6006 | path2 = ext4_find_extent(inode, cur, NULL, 0); |
6007 | if (IS_ERR(path2)) { | |
6008 | ext4_ext_drop_refs(path); | |
6009 | kfree(path); | |
6010 | break; | |
6011 | } | |
8016e29f HS |
6012 | for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) { |
6013 | cmp1 = cmp2 = 0; | |
6014 | if (i <= path->p_depth) | |
6015 | cmp1 = path[i].p_bh ? | |
6016 | path[i].p_bh->b_blocknr : 0; | |
6017 | if (i <= path2->p_depth) | |
6018 | cmp2 = path2[i].p_bh ? | |
6019 | path2[i].p_bh->b_blocknr : 0; | |
6020 | if (cmp1 != cmp2 && cmp2 != 0) | |
6021 | numblks++; | |
6022 | } | |
6023 | ext4_ext_drop_refs(path); | |
6024 | ext4_ext_drop_refs(path2); | |
6025 | kfree(path); | |
6026 | kfree(path2); | |
6027 | } | |
6028 | ||
6029 | out: | |
6030 | inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9); | |
6031 | ext4_mark_inode_dirty(NULL, inode); | |
6032 | return 0; | |
6033 | } | |
6034 | ||
6035 | int ext4_ext_clear_bb(struct inode *inode) | |
6036 | { | |
6037 | struct ext4_ext_path *path = NULL; | |
6038 | struct ext4_extent *ex; | |
6039 | ext4_lblk_t cur = 0, end; | |
6040 | int j, ret = 0; | |
6041 | struct ext4_map_blocks map; | |
6042 | ||
6043 | /* Determin the size of the file first */ | |
6044 | path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, | |
6045 | EXT4_EX_NOCACHE); | |
6046 | if (IS_ERR(path)) | |
6047 | return PTR_ERR(path); | |
6048 | ex = path[path->p_depth].p_ext; | |
6049 | if (!ex) { | |
6050 | ext4_ext_drop_refs(path); | |
6051 | kfree(path); | |
6052 | return 0; | |
6053 | } | |
6054 | end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); | |
6055 | ext4_ext_drop_refs(path); | |
6056 | kfree(path); | |
6057 | ||
6058 | cur = 0; | |
6059 | while (cur < end) { | |
6060 | map.m_lblk = cur; | |
6061 | map.m_len = end - cur; | |
6062 | ret = ext4_map_blocks(NULL, inode, &map, 0); | |
6063 | if (ret < 0) | |
6064 | break; | |
6065 | if (ret > 0) { | |
6066 | path = ext4_find_extent(inode, map.m_lblk, NULL, 0); | |
6067 | if (!IS_ERR_OR_NULL(path)) { | |
6068 | for (j = 0; j < path->p_depth; j++) { | |
6069 | ||
6070 | ext4_mb_mark_bb(inode->i_sb, | |
6071 | path[j].p_block, 1, 0); | |
6072 | } | |
6073 | ext4_ext_drop_refs(path); | |
6074 | kfree(path); | |
6075 | } | |
6076 | ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0); | |
6077 | } | |
6078 | cur = cur + map.m_len; | |
6079 | } | |
6080 | ||
6081 | return 0; | |
6082 | } |