Merge tag 'backlight-next-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / fs / xfs / xfs_reflink.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0+
3993baeb
DW
2/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
3993baeb 4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
3993baeb
DW
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_defer.h"
3993baeb
DW
14#include "xfs_inode.h"
15#include "xfs_trans.h"
3993baeb
DW
16#include "xfs_bmap.h"
17#include "xfs_bmap_util.h"
3993baeb 18#include "xfs_trace.h"
3993baeb 19#include "xfs_icache.h"
174edb0e 20#include "xfs_btree.h"
3993baeb
DW
21#include "xfs_refcount_btree.h"
22#include "xfs_refcount.h"
23#include "xfs_bmap_btree.h"
24#include "xfs_trans_space.h"
25#include "xfs_bit.h"
26#include "xfs_alloc.h"
3993baeb 27#include "xfs_quota.h"
3993baeb 28#include "xfs_reflink.h"
2a06705c 29#include "xfs_iomap.h"
9bbafc71 30#include "xfs_ag.h"
6fa164b8 31#include "xfs_ag_resv.h"
3993baeb
DW
32
33/*
34 * Copy on Write of Shared Blocks
35 *
36 * XFS must preserve "the usual" file semantics even when two files share
37 * the same physical blocks. This means that a write to one file must not
38 * alter the blocks in a different file; the way that we'll do that is
39 * through the use of a copy-on-write mechanism. At a high level, that
40 * means that when we want to write to a shared block, we allocate a new
41 * block, write the data to the new block, and if that succeeds we map the
42 * new block into the file.
43 *
44 * XFS provides a "delayed allocation" mechanism that defers the allocation
45 * of disk blocks to dirty-but-not-yet-mapped file blocks as long as
46 * possible. This reduces fragmentation by enabling the filesystem to ask
47 * for bigger chunks less often, which is exactly what we want for CoW.
48 *
49 * The delalloc mechanism begins when the kernel wants to make a block
50 * writable (write_begin or page_mkwrite). If the offset is not mapped, we
51 * create a delalloc mapping, which is a regular in-core extent, but without
52 * a real startblock. (For delalloc mappings, the startblock encodes both
53 * a flag that this is a delalloc mapping, and a worst-case estimate of how
54 * many blocks might be required to put the mapping into the BMBT.) delalloc
55 * mappings are a reservation against the free space in the filesystem;
56 * adjacent mappings can also be combined into fewer larger mappings.
57 *
5eda4300
DW
58 * As an optimization, the CoW extent size hint (cowextsz) creates
59 * outsized aligned delalloc reservations in the hope of landing out of
60 * order nearby CoW writes in a single extent on disk, thereby reducing
61 * fragmentation and improving future performance.
62 *
63 * D: --RRRRRRSSSRRRRRRRR--- (data fork)
64 * C: ------DDDDDDD--------- (CoW fork)
65 *
3993baeb 66 * When dirty pages are being written out (typically in writepage), the
5eda4300
DW
67 * delalloc reservations are converted into unwritten mappings by
68 * allocating blocks and replacing the delalloc mapping with real ones.
69 * A delalloc mapping can be replaced by several unwritten ones if the
70 * free space is fragmented.
71 *
72 * D: --RRRRRRSSSRRRRRRRR---
73 * C: ------UUUUUUU---------
3993baeb
DW
74 *
75 * We want to adapt the delalloc mechanism for copy-on-write, since the
76 * write paths are similar. The first two steps (creating the reservation
77 * and allocating the blocks) are exactly the same as delalloc except that
78 * the mappings must be stored in a separate CoW fork because we do not want
79 * to disturb the mapping in the data fork until we're sure that the write
80 * succeeded. IO completion in this case is the process of removing the old
81 * mapping from the data fork and moving the new mapping from the CoW fork to
82 * the data fork. This will be discussed shortly.
83 *
84 * For now, unaligned directio writes will be bounced back to the page cache.
85 * Block-aligned directio writes will use the same mechanism as buffered
86 * writes.
87 *
5eda4300
DW
88 * Just prior to submitting the actual disk write requests, we convert
89 * the extents representing the range of the file actually being written
90 * (as opposed to extra pieces created for the cowextsize hint) to real
91 * extents. This will become important in the next step:
92 *
93 * D: --RRRRRRSSSRRRRRRRR---
94 * C: ------UUrrUUU---------
95 *
3993baeb
DW
96 * CoW remapping must be done after the data block write completes,
97 * because we don't want to destroy the old data fork map until we're sure
98 * the new block has been written. Since the new mappings are kept in a
99 * separate fork, we can simply iterate these mappings to find the ones
100 * that cover the file blocks that we just CoW'd. For each extent, simply
101 * unmap the corresponding range in the data fork, map the new range into
5eda4300
DW
102 * the data fork, and remove the extent from the CoW fork. Because of
103 * the presence of the cowextsize hint, however, we must be careful
104 * only to remap the blocks that we've actually written out -- we must
105 * never remap delalloc reservations nor CoW staging blocks that have
106 * yet to be written. This corresponds exactly to the real extents in
107 * the CoW fork:
108 *
109 * D: --RRRRRRrrSRRRRRRRR---
110 * C: ------UU--UUU---------
3993baeb
DW
111 *
112 * Since the remapping operation can be applied to an arbitrary file
113 * range, we record the need for the remap step as a flag in the ioend
114 * instead of declaring a new IO type. This is required for direct io
115 * because we only have ioend for the whole dio, and we have to be able to
116 * remember the presence of unwritten blocks and CoW blocks with a single
117 * ioend structure. Better yet, the more ground we can cover with one
118 * ioend, the better.
119 */
2a06705c
DW
120
121/*
122 * Given an AG extent, find the lowest-numbered run of shared blocks
123 * within that range and return the range in fbno/flen. If
124 * find_end_of_shared is true, return the longest contiguous extent of
125 * shared blocks. If there are no shared extents, fbno and flen will
126 * be set to NULLAGBLOCK and 0, respectively.
127 */
128int
129xfs_reflink_find_shared(
130 struct xfs_mount *mp,
92ff7285 131 struct xfs_trans *tp,
2a06705c
DW
132 xfs_agnumber_t agno,
133 xfs_agblock_t agbno,
134 xfs_extlen_t aglen,
135 xfs_agblock_t *fbno,
136 xfs_extlen_t *flen,
137 bool find_end_of_shared)
138{
139 struct xfs_buf *agbp;
140 struct xfs_btree_cur *cur;
141 int error;
142
92ff7285 143 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
2a06705c
DW
144 if (error)
145 return error;
146
a81a0621 147 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agbp->b_pag);
2a06705c
DW
148
149 error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
150 find_end_of_shared);
151
0b04b6b8 152 xfs_btree_del_cursor(cur, error);
2a06705c 153
92ff7285 154 xfs_trans_brelse(tp, agbp);
2a06705c
DW
155 return error;
156}
157
158/*
159 * Trim the mapping to the next block where there's a change in the
160 * shared/unshared status. More specifically, this means that we
161 * find the lowest-numbered extent of shared blocks that coincides with
162 * the given block mapping. If the shared extent overlaps the start of
163 * the mapping, trim the mapping to the end of the shared extent. If
164 * the shared region intersects the mapping, trim the mapping to the
165 * start of the shared extent. If there are no shared regions that
166 * overlap, just return the original extent.
167 */
168int
169xfs_reflink_trim_around_shared(
170 struct xfs_inode *ip,
171 struct xfs_bmbt_irec *irec,
d392bc81 172 bool *shared)
2a06705c
DW
173{
174 xfs_agnumber_t agno;
175 xfs_agblock_t agbno;
176 xfs_extlen_t aglen;
177 xfs_agblock_t fbno;
178 xfs_extlen_t flen;
179 int error = 0;
180
181 /* Holes, unwritten, and delalloc extents cannot be shared */
877f58f5 182 if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_written_extent(irec)) {
2a06705c
DW
183 *shared = false;
184 return 0;
185 }
186
187 trace_xfs_reflink_trim_around_shared(ip, irec);
188
189 agno = XFS_FSB_TO_AGNO(ip->i_mount, irec->br_startblock);
190 agbno = XFS_FSB_TO_AGBNO(ip->i_mount, irec->br_startblock);
191 aglen = irec->br_blockcount;
192
92ff7285 193 error = xfs_reflink_find_shared(ip->i_mount, NULL, agno, agbno,
2a06705c
DW
194 aglen, &fbno, &flen, true);
195 if (error)
196 return error;
197
d392bc81 198 *shared = false;
2a06705c
DW
199 if (fbno == NULLAGBLOCK) {
200 /* No shared blocks at all. */
201 return 0;
202 } else if (fbno == agbno) {
203 /*
204 * The start of this extent is shared. Truncate the
205 * mapping at the end of the shared region so that a
206 * subsequent iteration starts at the start of the
207 * unshared region.
208 */
209 irec->br_blockcount = flen;
210 *shared = true;
2a06705c
DW
211 return 0;
212 } else {
213 /*
214 * There's a shared extent midway through this extent.
215 * Truncate the mapping at the start of the shared
216 * extent so that a subsequent iteration starts at the
217 * start of the shared region.
218 */
219 irec->br_blockcount = fbno - agbno;
2a06705c
DW
220 return 0;
221 }
222}
223
aa124436 224int
225xfs_bmap_trim_cow(
66ae56a5
CH
226 struct xfs_inode *ip,
227 struct xfs_bmbt_irec *imap,
228 bool *shared)
229{
230 /* We can't update any real extents in always COW mode. */
231 if (xfs_is_always_cow_inode(ip) &&
232 !isnullstartblock(imap->br_startblock)) {
233 *shared = true;
234 return 0;
235 }
236
237 /* Trim the mapping to the nearest shared extent boundary. */
238 return xfs_reflink_trim_around_shared(ip, imap, shared);
239}
240
26b91c72
CH
241static int
242xfs_reflink_convert_cow_locked(
243 struct xfs_inode *ip,
244 xfs_fileoff_t offset_fsb,
245 xfs_filblks_t count_fsb)
5eda4300 246{
26b91c72
CH
247 struct xfs_iext_cursor icur;
248 struct xfs_bmbt_irec got;
249 struct xfs_btree_cur *dummy_cur = NULL;
250 int dummy_logflags;
c1a4447f 251 int error = 0;
5eda4300 252
26b91c72 253 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
5eda4300
DW
254 return 0;
255
26b91c72
CH
256 do {
257 if (got.br_startoff >= offset_fsb + count_fsb)
258 break;
259 if (got.br_state == XFS_EXT_NORM)
260 continue;
261 if (WARN_ON_ONCE(isnullstartblock(got.br_startblock)))
262 return -EIO;
263
264 xfs_trim_extent(&got, offset_fsb, count_fsb);
265 if (!got.br_blockcount)
266 continue;
267
268 got.br_state = XFS_EXT_NORM;
269 error = xfs_bmap_add_extent_unwritten_real(NULL, ip,
270 XFS_COW_FORK, &icur, &dummy_cur, &got,
271 &dummy_logflags);
272 if (error)
273 return error;
274 } while (xfs_iext_next_extent(ip->i_cowfp, &icur, &got));
275
276 return error;
5eda4300
DW
277}
278
279/* Convert all of the unwritten CoW extents in a file's range to real ones. */
280int
281xfs_reflink_convert_cow(
282 struct xfs_inode *ip,
283 xfs_off_t offset,
284 xfs_off_t count)
285{
5eda4300 286 struct xfs_mount *mp = ip->i_mount;
5eda4300
DW
287 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
288 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
b121459c 289 xfs_filblks_t count_fsb = end_fsb - offset_fsb;
26b91c72 290 int error;
5eda4300 291
b121459c 292 ASSERT(count != 0);
5eda4300 293
b121459c 294 xfs_ilock(ip, XFS_ILOCK_EXCL);
26b91c72 295 error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
5eda4300
DW
296 xfs_iunlock(ip, XFS_ILOCK_EXCL);
297 return error;
298}
299
df307077
DC
300/*
301 * Find the extent that maps the given range in the COW fork. Even if the extent
302 * is not shared we might have a preallocation for it in the COW fork. If so we
303 * use it that rather than trigger a new allocation.
304 */
305static int
306xfs_find_trim_cow_extent(
307 struct xfs_inode *ip,
308 struct xfs_bmbt_irec *imap,
ffb375a8 309 struct xfs_bmbt_irec *cmap,
df307077
DC
310 bool *shared,
311 bool *found)
312{
313 xfs_fileoff_t offset_fsb = imap->br_startoff;
314 xfs_filblks_t count_fsb = imap->br_blockcount;
315 struct xfs_iext_cursor icur;
df307077
DC
316
317 *found = false;
318
319 /*
320 * If we don't find an overlapping extent, trim the range we need to
321 * allocate to fit the hole we found.
322 */
ffb375a8
CH
323 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, cmap))
324 cmap->br_startoff = offset_fsb + count_fsb;
325 if (cmap->br_startoff > offset_fsb) {
032dc923 326 xfs_trim_extent(imap, imap->br_startoff,
ffb375a8 327 cmap->br_startoff - imap->br_startoff);
aa124436 328 return xfs_bmap_trim_cow(ip, imap, shared);
032dc923 329 }
df307077
DC
330
331 *shared = true;
ffb375a8
CH
332 if (isnullstartblock(cmap->br_startblock)) {
333 xfs_trim_extent(imap, cmap->br_startoff, cmap->br_blockcount);
df307077
DC
334 return 0;
335 }
336
337 /* real extent found - no need to allocate */
ffb375a8 338 xfs_trim_extent(cmap, offset_fsb, count_fsb);
df307077
DC
339 *found = true;
340 return 0;
341}
342
0613f16c 343/* Allocate all CoW reservations covering a range of blocks in a file. */
3c68d44a
CH
344int
345xfs_reflink_allocate_cow(
0613f16c 346 struct xfs_inode *ip,
3c68d44a 347 struct xfs_bmbt_irec *imap,
ffb375a8 348 struct xfs_bmbt_irec *cmap,
3c68d44a 349 bool *shared,
78f0cc9d 350 uint *lockmode,
affe250a 351 bool convert_now)
0613f16c
DW
352{
353 struct xfs_mount *mp = ip->i_mount;
3c68d44a
CH
354 xfs_fileoff_t offset_fsb = imap->br_startoff;
355 xfs_filblks_t count_fsb = imap->br_blockcount;
df307077 356 struct xfs_trans *tp;
3c68d44a 357 int nimaps, error = 0;
df307077 358 bool found;
a14234c7 359 xfs_filblks_t resaligned;
3c68d44a 360 xfs_extlen_t resblks = 0;
0613f16c 361
c7dbe3f2 362 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
66ae56a5
CH
363 if (!ip->i_cowfp) {
364 ASSERT(!xfs_is_reflink_inode(ip));
365 xfs_ifork_init_cow(ip);
366 }
0613f16c 367
ffb375a8 368 error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
df307077
DC
369 if (error || !*shared)
370 return error;
371 if (found)
372 goto convert;
3c68d44a 373
df307077
DC
374 resaligned = xfs_aligned_fsb_count(imap->br_startoff,
375 imap->br_blockcount, xfs_get_cowextsz_hint(ip));
376 resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
a14234c7 377
df307077 378 xfs_iunlock(ip, *lockmode);
f273387b 379 *lockmode = 0;
3ba020be 380
f273387b
DW
381 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
382 false, &tp);
df307077
DC
383 if (error)
384 return error;
a14234c7 385
f273387b 386 *lockmode = XFS_ILOCK_EXCL;
3c68d44a 387
df307077
DC
388 /*
389 * Check for an overlapping extent again now that we dropped the ilock.
390 */
ffb375a8 391 error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
df307077
DC
392 if (error || !*shared)
393 goto out_trans_cancel;
394 if (found) {
395 xfs_trans_cancel(tp);
396 goto convert;
a14234c7
CH
397 }
398
5eda4300 399 /* Allocate the entire reservation as unwritten blocks. */
df307077 400 nimaps = 1;
3c68d44a 401 error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
da781e64
BF
402 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 0, cmap,
403 &nimaps);
0613f16c 404 if (error)
35b11010 405 goto out_trans_cancel;
0613f16c 406
86d692bf 407 xfs_inode_set_cowblocks_tag(ip);
0613f16c 408 error = xfs_trans_commit(tp);
a14234c7 409 if (error)
3c68d44a 410 return error;
9f37bd11
DW
411
412 /*
413 * Allocation succeeded but the requested range was not even partially
414 * satisfied? Bail out!
415 */
416 if (nimaps == 0)
417 return -ENOSPC;
3c68d44a 418convert:
ffb375a8 419 xfs_trim_extent(cmap, offset_fsb, count_fsb);
78f0cc9d
CH
420 /*
421 * COW fork extents are supposed to remain unwritten until we're ready
422 * to initiate a disk write. For direct I/O we are going to write the
423 * data and need the conversion, but for buffered writes we're done.
424 */
ffb375a8 425 if (!convert_now || cmap->br_state == XFS_EXT_NORM)
78f0cc9d 426 return 0;
ffb375a8 427 trace_xfs_reflink_convert_cow(ip, cmap);
26b91c72 428 return xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
df307077 429
df307077
DC
430out_trans_cancel:
431 xfs_trans_cancel(tp);
3c68d44a 432 return error;
0613f16c
DW
433}
434
43caeb18 435/*
3802a345
CH
436 * Cancel CoW reservations for some block range of an inode.
437 *
438 * If cancel_real is true this function cancels all COW fork extents for the
439 * inode; if cancel_real is false, real extents are not cleared.
c5295c6a
DC
440 *
441 * Caller must have already joined the inode to the current transaction. The
442 * inode will be joined to the transaction returned to the caller.
43caeb18
DW
443 */
444int
445xfs_reflink_cancel_cow_blocks(
446 struct xfs_inode *ip,
447 struct xfs_trans **tpp,
448 xfs_fileoff_t offset_fsb,
3802a345
CH
449 xfs_fileoff_t end_fsb,
450 bool cancel_real)
43caeb18 451{
3e0ee78f 452 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
df5ab1b5 453 struct xfs_bmbt_irec got, del;
b2b1712a 454 struct xfs_iext_cursor icur;
df5ab1b5 455 int error = 0;
43caeb18 456
51d62690 457 if (!xfs_inode_has_cow_data(ip))
43caeb18 458 return 0;
41caabd0 459 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
3e0ee78f 460 return 0;
43caeb18 461
41caabd0
CH
462 /* Walk backwards until we're out of the I/O range... */
463 while (got.br_startoff + got.br_blockcount > offset_fsb) {
3e0ee78f
CH
464 del = got;
465 xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
41caabd0
CH
466
467 /* Extent delete may have bumped ext forward */
468 if (!del.br_blockcount) {
469 xfs_iext_prev(ifp, &icur);
470 goto next_extent;
471 }
472
3e0ee78f 473 trace_xfs_reflink_cancel_cow(ip, &del);
43caeb18 474
3e0ee78f
CH
475 if (isnullstartblock(del.br_startblock)) {
476 error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
b2b1712a 477 &icur, &got, &del);
43caeb18
DW
478 if (error)
479 break;
3802a345 480 } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
1e5ae199 481 ASSERT((*tpp)->t_firstblock == NULLFSBLOCK);
43caeb18 482
174edb0e 483 /* Free the CoW orphan record. */
74b4c5d4
DW
484 xfs_refcount_free_cow_extent(*tpp, del.br_startblock,
485 del.br_blockcount);
174edb0e 486
0f37d178
BF
487 xfs_bmap_add_free(*tpp, del.br_startblock,
488 del.br_blockcount, NULL);
43caeb18 489
43caeb18 490 /* Roll the transaction */
9e28a242 491 error = xfs_defer_finish(tpp);
9b1f4e98 492 if (error)
43caeb18 493 break;
43caeb18
DW
494
495 /* Remove the mapping from the CoW fork. */
b2b1712a 496 xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
4b4c1326
DW
497
498 /* Remove the quota reservation */
85546500
DW
499 error = xfs_quota_unreserve_blkres(ip,
500 del.br_blockcount);
4b4c1326
DW
501 if (error)
502 break;
9d40fba8
DW
503 } else {
504 /* Didn't do anything, push cursor back. */
505 xfs_iext_prev(ifp, &icur);
43caeb18 506 }
41caabd0
CH
507next_extent:
508 if (!xfs_iext_get_extent(ifp, &icur, &got))
c17a8ef4 509 break;
43caeb18
DW
510 }
511
c17a8ef4
BF
512 /* clear tag if cow fork is emptied */
513 if (!ifp->if_bytes)
514 xfs_inode_clear_cowblocks_tag(ip);
43caeb18
DW
515 return error;
516}
517
518/*
3802a345
CH
519 * Cancel CoW reservations for some byte range of an inode.
520 *
521 * If cancel_real is true this function cancels all COW fork extents for the
522 * inode; if cancel_real is false, real extents are not cleared.
43caeb18
DW
523 */
524int
525xfs_reflink_cancel_cow_range(
526 struct xfs_inode *ip,
527 xfs_off_t offset,
3802a345
CH
528 xfs_off_t count,
529 bool cancel_real)
43caeb18
DW
530{
531 struct xfs_trans *tp;
532 xfs_fileoff_t offset_fsb;
533 xfs_fileoff_t end_fsb;
534 int error;
535
536 trace_xfs_reflink_cancel_cow_range(ip, offset, count);
66ae56a5 537 ASSERT(ip->i_cowfp);
43caeb18
DW
538
539 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
540 if (count == NULLFILEOFF)
541 end_fsb = NULLFILEOFF;
542 else
543 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
544
545 /* Start a rolling transaction to remove the mappings */
546 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
73d30d48 547 0, 0, 0, &tp);
43caeb18
DW
548 if (error)
549 goto out;
550
551 xfs_ilock(ip, XFS_ILOCK_EXCL);
552 xfs_trans_ijoin(tp, ip, 0);
553
554 /* Scrape out the old CoW reservations */
3802a345
CH
555 error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
556 cancel_real);
43caeb18
DW
557 if (error)
558 goto out_cancel;
559
560 error = xfs_trans_commit(tp);
561
562 xfs_iunlock(ip, XFS_ILOCK_EXCL);
563 return error;
564
565out_cancel:
566 xfs_trans_cancel(tp);
567 xfs_iunlock(ip, XFS_ILOCK_EXCL);
568out:
569 trace_xfs_reflink_cancel_cow_range_error(ip, error, _RET_IP_);
570 return error;
571}
572
573/*
d6f215f3
DW
574 * Remap part of the CoW fork into the data fork.
575 *
576 * We aim to remap the range starting at @offset_fsb and ending at @end_fsb
577 * into the data fork; this function will remap what it can (at the end of the
578 * range) and update @end_fsb appropriately. Each remap gets its own
579 * transaction because we can end up merging and splitting bmbt blocks for
580 * every remap operation and we'd like to keep the block reservation
581 * requirements as low as possible.
43caeb18 582 */
d6f215f3
DW
583STATIC int
584xfs_reflink_end_cow_extent(
585 struct xfs_inode *ip,
586 xfs_fileoff_t offset_fsb,
587 xfs_fileoff_t *end_fsb)
43caeb18 588{
d6f215f3
DW
589 struct xfs_bmbt_irec got, del;
590 struct xfs_iext_cursor icur;
591 struct xfs_mount *mp = ip->i_mount;
592 struct xfs_trans *tp;
593 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
594 xfs_filblks_t rlen;
595 unsigned int resblks;
596 int error;
43caeb18 597
c1112b6e 598 /* No COW extents? That's easy! */
d6f215f3
DW
599 if (ifp->if_bytes == 0) {
600 *end_fsb = offset_fsb;
c1112b6e 601 return 0;
d6f215f3 602 }
c1112b6e 603
d6f215f3
DW
604 resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
605 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
73d30d48 606 XFS_TRANS_RESERVE, &tp);
d6f215f3
DW
607 if (error)
608 return error;
43caeb18 609
fe0be23e 610 /*
d6f215f3
DW
611 * Lock the inode. We have to ijoin without automatic unlock because
612 * the lead transaction is the refcountbt record deletion; the data
613 * fork update follows as a deferred log item.
fe0be23e 614 */
43caeb18
DW
615 xfs_ilock(ip, XFS_ILOCK_EXCL);
616 xfs_trans_ijoin(tp, ip, 0);
617
5f1d5bbf
CB
618 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
619 XFS_IEXT_REFLINK_END_COW_CNT);
620 if (error)
621 goto out_cancel;
622
dc56015f
CH
623 /*
624 * In case of racing, overlapping AIO writes no COW extents might be
625 * left by the time I/O completes for the loser of the race. In that
626 * case we are done.
627 */
d6f215f3
DW
628 if (!xfs_iext_lookup_extent_before(ip, ifp, end_fsb, &icur, &got) ||
629 got.br_startoff + got.br_blockcount <= offset_fsb) {
630 *end_fsb = offset_fsb;
dc56015f 631 goto out_cancel;
d6f215f3 632 }
43caeb18 633
d6f215f3
DW
634 /*
635 * Structure copy @got into @del, then trim @del to the range that we
636 * were asked to remap. We preserve @got for the eventual CoW fork
637 * deletion; from now on @del represents the mapping that we're
638 * actually remapping.
639 */
640 del = got;
641 xfs_trim_extent(&del, offset_fsb, *end_fsb - offset_fsb);
c1112b6e 642
d6f215f3 643 ASSERT(del.br_blockcount > 0);
5eda4300 644
d6f215f3
DW
645 /*
646 * Only remap real extents that contain data. With AIO, speculative
647 * preallocations can leak into the range we are called upon, and we
648 * need to skip them.
649 */
877f58f5 650 if (!xfs_bmap_is_written_extent(&got)) {
d6f215f3
DW
651 *end_fsb = del.br_startoff;
652 goto out_cancel;
653 }
43caeb18 654
d6f215f3
DW
655 /* Unmap the old blocks in the data fork. */
656 rlen = del.br_blockcount;
657 error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1);
658 if (error)
659 goto out_cancel;
174edb0e 660
d6f215f3
DW
661 /* Trim the extent to whatever got unmapped. */
662 xfs_trim_extent(&del, del.br_startoff + rlen, del.br_blockcount - rlen);
663 trace_xfs_reflink_cow_remap(ip, &del);
43caeb18 664
d6f215f3 665 /* Free the CoW orphan record. */
74b4c5d4 666 xfs_refcount_free_cow_extent(tp, del.br_startblock, del.br_blockcount);
43caeb18 667
d6f215f3 668 /* Map the new blocks into the data fork. */
3e08f42a 669 xfs_bmap_map_extent(tp, ip, &del);
4b4c1326 670
d6f215f3
DW
671 /* Charge this new data fork mapping to the on-disk quota. */
672 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
673 (long)del.br_blockcount);
c1112b6e 674
d6f215f3
DW
675 /* Remove the mapping from the CoW fork. */
676 xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
43caeb18
DW
677
678 error = xfs_trans_commit(tp);
679 xfs_iunlock(ip, XFS_ILOCK_EXCL);
680 if (error)
d6f215f3
DW
681 return error;
682
683 /* Update the caller about how much progress we made. */
684 *end_fsb = del.br_startoff;
43caeb18
DW
685 return 0;
686
e12199f8 687out_cancel:
43caeb18
DW
688 xfs_trans_cancel(tp);
689 xfs_iunlock(ip, XFS_ILOCK_EXCL);
d6f215f3
DW
690 return error;
691}
692
693/*
694 * Remap parts of a file's data fork after a successful CoW.
695 */
696int
697xfs_reflink_end_cow(
698 struct xfs_inode *ip,
699 xfs_off_t offset,
700 xfs_off_t count)
701{
702 xfs_fileoff_t offset_fsb;
703 xfs_fileoff_t end_fsb;
704 int error = 0;
705
706 trace_xfs_reflink_end_cow(ip, offset, count);
707
708 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
709 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
710
711 /*
712 * Walk backwards until we're out of the I/O range. The loop function
713 * repeatedly cycles the ILOCK to allocate one transaction per remapped
714 * extent.
715 *
b63da6c8 716 * If we're being called by writeback then the pages will still
d6f215f3
DW
717 * have PageWriteback set, which prevents races with reflink remapping
718 * and truncate. Reflink remapping prevents races with writeback by
719 * taking the iolock and mmaplock before flushing the pages and
720 * remapping, which means there won't be any further writeback or page
721 * cache dirtying until the reflink completes.
722 *
723 * We should never have two threads issuing writeback for the same file
724 * region. There are also have post-eof checks in the writeback
725 * preparation code so that we don't bother writing out pages that are
726 * about to be truncated.
727 *
728 * If we're being called as part of directio write completion, the dio
729 * count is still elevated, which reflink and truncate will wait for.
730 * Reflink remapping takes the iolock and mmaplock and waits for
731 * pending dio to finish, which should prevent any directio until the
732 * remap completes. Multiple concurrent directio writes to the same
733 * region are handled by end_cow processing only occurring for the
734 * threads which succeed; the outcome of multiple overlapping direct
735 * writes is not well defined anyway.
736 *
737 * It's possible that a buffered write and a direct write could collide
738 * here (the buffered write stumbles in after the dio flushes and
739 * invalidates the page cache and immediately queues writeback), but we
740 * have never supported this 100%. If either disk write succeeds the
741 * blocks will be remapped.
742 */
743 while (end_fsb > offset_fsb && !error)
744 error = xfs_reflink_end_cow_extent(ip, offset_fsb, &end_fsb);
745
746 if (error)
747 trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_);
43caeb18
DW
748 return error;
749}
174edb0e
DW
750
751/*
752 * Free leftover CoW reservations that didn't get cleaned out.
753 */
754int
755xfs_reflink_recover_cow(
756 struct xfs_mount *mp)
757{
934933c3 758 struct xfs_perag *pag;
174edb0e
DW
759 xfs_agnumber_t agno;
760 int error = 0;
761
38c26bfd 762 if (!xfs_has_reflink(mp))
174edb0e
DW
763 return 0;
764
934933c3 765 for_each_perag(mp, agno, pag) {
a81a0621 766 error = xfs_refcount_recover_cow_leftovers(mp, pag);
934933c3
DC
767 if (error) {
768 xfs_perag_put(pag);
174edb0e 769 break;
934933c3 770 }
174edb0e
DW
771 }
772
773 return error;
774}
862bb360
DW
775
776/*
777 * Reflinking (Block) Ranges of Two Files Together
778 *
779 * First, ensure that the reflink flag is set on both inodes. The flag is an
780 * optimization to avoid unnecessary refcount btree lookups in the write path.
781 *
782 * Now we can iteratively remap the range of extents (and holes) in src to the
783 * corresponding ranges in dest. Let drange and srange denote the ranges of
784 * logical blocks in dest and src touched by the reflink operation.
785 *
786 * While the length of drange is greater than zero,
787 * - Read src's bmbt at the start of srange ("imap")
788 * - If imap doesn't exist, make imap appear to start at the end of srange
789 * with zero length.
790 * - If imap starts before srange, advance imap to start at srange.
791 * - If imap goes beyond srange, truncate imap to end at the end of srange.
792 * - Punch (imap start - srange start + imap len) blocks from dest at
793 * offset (drange start).
794 * - If imap points to a real range of pblks,
795 * > Increase the refcount of the imap's pblks
796 * > Map imap's pblks into dest at the offset
797 * (drange start + imap start - srange start)
798 * - Advance drange and srange by (imap start - srange start + imap len)
799 *
800 * Finally, if the reflink made dest longer, update both the in-core and
801 * on-disk file sizes.
802 *
803 * ASCII Art Demonstration:
804 *
805 * Let's say we want to reflink this source file:
806 *
807 * ----SSSSSSS-SSSSS----SSSSSS (src file)
808 * <-------------------->
809 *
810 * into this destination file:
811 *
812 * --DDDDDDDDDDDDDDDDDDD--DDD (dest file)
813 * <-------------------->
814 * '-' means a hole, and 'S' and 'D' are written blocks in the src and dest.
815 * Observe that the range has different logical offsets in either file.
816 *
817 * Consider that the first extent in the source file doesn't line up with our
818 * reflink range. Unmapping and remapping are separate operations, so we can
819 * unmap more blocks from the destination file than we remap.
820 *
821 * ----SSSSSSS-SSSSS----SSSSSS
822 * <------->
823 * --DDDDD---------DDDDD--DDD
824 * <------->
825 *
826 * Now remap the source extent into the destination file:
827 *
828 * ----SSSSSSS-SSSSS----SSSSSS
829 * <------->
830 * --DDDDD--SSSSSSSDDDDD--DDD
831 * <------->
832 *
833 * Do likewise with the second hole and extent in our range. Holes in the
834 * unmap range don't affect our operation.
835 *
836 * ----SSSSSSS-SSSSS----SSSSSS
837 * <---->
838 * --DDDDD--SSSSSSS-SSSSS-DDD
839 * <---->
840 *
841 * Finally, unmap and remap part of the third extent. This will increase the
842 * size of the destination file.
843 *
844 * ----SSSSSSS-SSSSS----SSSSSS
845 * <----->
846 * --DDDDD--SSSSSSS-SSSSS----SSS
847 * <----->
848 *
849 * Once we update the destination file's i_size, we're done.
850 */
851
852/*
853 * Ensure the reflink bit is set in both inodes.
854 */
855STATIC int
856xfs_reflink_set_inode_flag(
857 struct xfs_inode *src,
858 struct xfs_inode *dest)
859{
860 struct xfs_mount *mp = src->i_mount;
861 int error;
862 struct xfs_trans *tp;
863
864 if (xfs_is_reflink_inode(src) && xfs_is_reflink_inode(dest))
865 return 0;
866
867 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
868 if (error)
869 goto out_error;
870
871 /* Lock both files against IO */
872 if (src->i_ino == dest->i_ino)
873 xfs_ilock(src, XFS_ILOCK_EXCL);
874 else
7c2d238a 875 xfs_lock_two_inodes(src, XFS_ILOCK_EXCL, dest, XFS_ILOCK_EXCL);
862bb360
DW
876
877 if (!xfs_is_reflink_inode(src)) {
878 trace_xfs_reflink_set_inode_flag(src);
879 xfs_trans_ijoin(tp, src, XFS_ILOCK_EXCL);
3e09ab8f 880 src->i_diflags2 |= XFS_DIFLAG2_REFLINK;
862bb360
DW
881 xfs_trans_log_inode(tp, src, XFS_ILOG_CORE);
882 xfs_ifork_init_cow(src);
883 } else
884 xfs_iunlock(src, XFS_ILOCK_EXCL);
885
886 if (src->i_ino == dest->i_ino)
887 goto commit_flags;
888
889 if (!xfs_is_reflink_inode(dest)) {
890 trace_xfs_reflink_set_inode_flag(dest);
891 xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
3e09ab8f 892 dest->i_diflags2 |= XFS_DIFLAG2_REFLINK;
862bb360
DW
893 xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
894 xfs_ifork_init_cow(dest);
895 } else
896 xfs_iunlock(dest, XFS_ILOCK_EXCL);
897
898commit_flags:
899 error = xfs_trans_commit(tp);
900 if (error)
901 goto out_error;
902 return error;
903
904out_error:
905 trace_xfs_reflink_set_inode_flag_error(dest, error, _RET_IP_);
906 return error;
907}
908
909/*
f7ca3522 910 * Update destination inode size & cowextsize hint, if necessary.
862bb360 911 */
3fc9f5e4 912int
862bb360
DW
913xfs_reflink_update_dest(
914 struct xfs_inode *dest,
f7ca3522 915 xfs_off_t newlen,
c5ecb423 916 xfs_extlen_t cowextsize,
a91ae49b 917 unsigned int remap_flags)
862bb360
DW
918{
919 struct xfs_mount *mp = dest->i_mount;
920 struct xfs_trans *tp;
921 int error;
922
bf4a1fcf 923 if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
862bb360
DW
924 return 0;
925
926 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
927 if (error)
928 goto out_error;
929
930 xfs_ilock(dest, XFS_ILOCK_EXCL);
931 xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
932
f7ca3522
DW
933 if (newlen > i_size_read(VFS_I(dest))) {
934 trace_xfs_reflink_update_inode_size(dest, newlen);
935 i_size_write(VFS_I(dest), newlen);
13d2c10b 936 dest->i_disk_size = newlen;
f7ca3522
DW
937 }
938
939 if (cowextsize) {
b33ce57d 940 dest->i_cowextsize = cowextsize;
3e09ab8f 941 dest->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
f7ca3522
DW
942 }
943
862bb360
DW
944 xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
945
946 error = xfs_trans_commit(tp);
947 if (error)
948 goto out_error;
949 return error;
950
951out_error:
952 trace_xfs_reflink_update_inode_size_error(dest, error, _RET_IP_);
953 return error;
954}
955
6fa164b8
DW
956/*
957 * Do we have enough reserve in this AG to handle a reflink? The refcount
958 * btree already reserved all the space it needs, but the rmap btree can grow
959 * infinitely, so we won't allow more reflinks when the AG is down to the
960 * btree reserves.
961 */
962static int
963xfs_reflink_ag_has_free_space(
964 struct xfs_mount *mp,
965 xfs_agnumber_t agno)
966{
967 struct xfs_perag *pag;
968 int error = 0;
969
38c26bfd 970 if (!xfs_has_rmapbt(mp))
6fa164b8
DW
971 return 0;
972
973 pag = xfs_perag_get(mp, agno);
21592863 974 if (xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) ||
6fa164b8
DW
975 xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA))
976 error = -ENOSPC;
977 xfs_perag_put(pag);
978 return error;
979}
980
862bb360 981/*
00fd1d56
DW
982 * Remap the given extent into the file. The dmap blockcount will be set to
983 * the number of blocks that were actually remapped.
862bb360
DW
984 */
985STATIC int
986xfs_reflink_remap_extent(
987 struct xfs_inode *ip,
00fd1d56 988 struct xfs_bmbt_irec *dmap,
862bb360
DW
989 xfs_off_t new_isize)
990{
00fd1d56 991 struct xfs_bmbt_irec smap;
862bb360
DW
992 struct xfs_mount *mp = ip->i_mount;
993 struct xfs_trans *tp;
862bb360 994 xfs_off_t newlen;
f273387b 995 int64_t qdelta = 0;
00fd1d56 996 unsigned int resblks;
4ca74205 997 bool quota_reserved = true;
00fd1d56
DW
998 bool smap_real;
999 bool dmap_written = xfs_bmap_is_written_extent(dmap);
ee898d78 1000 int iext_delta = 0;
00fd1d56 1001 int nimaps;
862bb360
DW
1002 int error;
1003
f273387b
DW
1004 /*
1005 * Start a rolling transaction to switch the mappings.
1006 *
1007 * Adding a written extent to the extent map can cause a bmbt split,
1008 * and removing a mapped extent from the extent can cause a bmbt split.
1009 * The two operations cannot both cause a split since they operate on
1010 * the same index in the bmap btree, so we only need a reservation for
1011 * one bmbt split if either thing is happening. However, we haven't
1012 * locked the inode yet, so we reserve assuming this is the case.
4ca74205
DW
1013 *
1014 * The first allocation call tries to reserve enough space to handle
1015 * mapping dmap into a sparse part of the file plus the bmbt split. We
1016 * haven't locked the inode or read the existing mapping yet, so we do
1017 * not know for sure that we need the space. This should succeed most
1018 * of the time.
1019 *
1020 * If the first attempt fails, try again but reserving only enough
1021 * space to handle a bmbt split. This is the hard minimum requirement,
1022 * and we revisit quota reservations later when we know more about what
1023 * we're remapping.
f273387b 1024 */
00fd1d56 1025 resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
4ca74205
DW
1026 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
1027 resblks + dmap->br_blockcount, 0, false, &tp);
1028 if (error == -EDQUOT || error == -ENOSPC) {
1029 quota_reserved = false;
1030 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
1031 resblks, 0, false, &tp);
1032 }
862bb360
DW
1033 if (error)
1034 goto out;
1035
83895227 1036 /*
00fd1d56
DW
1037 * Read what's currently mapped in the destination file into smap.
1038 * If smap isn't a hole, we will have to remove it before we can add
1039 * dmap to the destination file.
83895227 1040 */
00fd1d56
DW
1041 nimaps = 1;
1042 error = xfs_bmapi_read(ip, dmap->br_startoff, dmap->br_blockcount,
1043 &smap, &nimaps, 0);
83895227
DW
1044 if (error)
1045 goto out_cancel;
00fd1d56
DW
1046 ASSERT(nimaps == 1 && smap.br_startoff == dmap->br_startoff);
1047 smap_real = xfs_bmap_is_real_extent(&smap);
862bb360 1048
00fd1d56
DW
1049 /*
1050 * We can only remap as many blocks as the smaller of the two extent
1051 * maps, because we can only remap one extent at a time.
1052 */
1053 dmap->br_blockcount = min(dmap->br_blockcount, smap.br_blockcount);
1054 ASSERT(dmap->br_blockcount == smap.br_blockcount);
862bb360 1055
00fd1d56
DW
1056 trace_xfs_reflink_remap_extent_dest(ip, &smap);
1057
168eae80
DW
1058 /*
1059 * Two extents mapped to the same physical block must not have
1060 * different states; that's filesystem corruption. Move on to the next
1061 * extent if they're both holes or both the same physical extent.
1062 */
1063 if (dmap->br_startblock == smap.br_startblock) {
1064 if (dmap->br_state != smap.br_state)
1065 error = -EFSCORRUPTED;
1066 goto out_cancel;
1067 }
1068
1069 /* If both extents are unwritten, leave them alone. */
1070 if (dmap->br_state == XFS_EXT_UNWRITTEN &&
1071 smap.br_state == XFS_EXT_UNWRITTEN)
1072 goto out_cancel;
1073
00fd1d56
DW
1074 /* No reflinking if the AG of the dest mapping is low on space. */
1075 if (dmap_written) {
1076 error = xfs_reflink_ag_has_free_space(mp,
1077 XFS_FSB_TO_AGNO(mp, dmap->br_startblock));
862bb360 1078 if (error)
c8eac49e 1079 goto out_cancel;
00fd1d56 1080 }
862bb360 1081
00fd1d56 1082 /*
f273387b 1083 * Increase quota reservation if we think the quota block counter for
00fd1d56
DW
1084 * this file could increase.
1085 *
00fd1d56
DW
1086 * If we are mapping a written extent into the file, we need to have
1087 * enough quota block count reservation to handle the blocks in that
94b941fd
DW
1088 * extent. We log only the delta to the quota block counts, so if the
1089 * extent we're unmapping also has blocks allocated to it, we don't
1090 * need a quota reservation for the extent itself.
00fd1d56
DW
1091 *
1092 * Note that if we're replacing a delalloc reservation with a written
1093 * extent, we have to take the full quota reservation because removing
1094 * the delalloc reservation gives the block count back to the quota
1095 * count. This is suboptimal, but the VFS flushed the dest range
1096 * before we started. That should have removed all the delalloc
1097 * reservations, but we code defensively.
766aabd5
DW
1098 *
1099 * xfs_trans_alloc_inode above already tried to grab an even larger
1100 * quota reservation, and kicked off a blockgc scan if it couldn't.
1101 * If we can't get a potentially smaller quota reservation now, we're
1102 * done.
00fd1d56 1103 */
4ca74205 1104 if (!quota_reserved && !smap_real && dmap_written) {
f273387b
DW
1105 error = xfs_trans_reserve_quota_nblks(tp, ip,
1106 dmap->br_blockcount, 0, false);
aa5d0ba0
DW
1107 if (error)
1108 goto out_cancel;
1109 }
00fd1d56 1110
ee898d78
CB
1111 if (smap_real)
1112 ++iext_delta;
1113
1114 if (dmap_written)
1115 ++iext_delta;
1116
1117 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK, iext_delta);
1118 if (error)
1119 goto out_cancel;
1120
00fd1d56 1121 if (smap_real) {
862bb360 1122 /*
00fd1d56
DW
1123 * If the extent we're unmapping is backed by storage (written
1124 * or not), unmap the extent and drop its refcount.
862bb360 1125 */
00fd1d56
DW
1126 xfs_bmap_unmap_extent(tp, ip, &smap);
1127 xfs_refcount_decrease_extent(tp, &smap);
1128 qdelta -= smap.br_blockcount;
1129 } else if (smap.br_startblock == DELAYSTARTBLOCK) {
1130 xfs_filblks_t len = smap.br_blockcount;
862bb360 1131
00fd1d56
DW
1132 /*
1133 * If the extent we're unmapping is a delalloc reservation,
1134 * we can use the regular bunmapi function to release the
1135 * incore state. Dropping the delalloc reservation takes care
1136 * of the quota reservation for us.
1137 */
1138 error = __xfs_bunmapi(NULL, ip, smap.br_startoff, &len, 0, 1);
1139 if (error)
1140 goto out_cancel;
1141 ASSERT(len == 0);
1142 }
862bb360 1143
00fd1d56
DW
1144 /*
1145 * If the extent we're sharing is backed by written storage, increase
1146 * its refcount and map it into the file.
1147 */
1148 if (dmap_written) {
1149 xfs_refcount_increase_extent(tp, dmap);
1150 xfs_bmap_map_extent(tp, ip, dmap);
1151 qdelta += dmap->br_blockcount;
1152 }
862bb360 1153
00fd1d56 1154 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, qdelta);
862bb360 1155
00fd1d56
DW
1156 /* Update dest isize if needed. */
1157 newlen = XFS_FSB_TO_B(mp, dmap->br_startoff + dmap->br_blockcount);
1158 newlen = min_t(xfs_off_t, newlen, new_isize);
1159 if (newlen > i_size_read(VFS_I(ip))) {
1160 trace_xfs_reflink_update_inode_size(ip, newlen);
1161 i_size_write(VFS_I(ip), newlen);
13d2c10b 1162 ip->i_disk_size = newlen;
00fd1d56 1163 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
862bb360
DW
1164 }
1165
00fd1d56 1166 /* Commit everything and unlock. */
862bb360 1167 error = xfs_trans_commit(tp);
00fd1d56 1168 goto out_unlock;
862bb360 1169
862bb360
DW
1170out_cancel:
1171 xfs_trans_cancel(tp);
00fd1d56 1172out_unlock:
862bb360
DW
1173 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1174out:
00fd1d56
DW
1175 if (error)
1176 trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_);
862bb360
DW
1177 return error;
1178}
1179
00fd1d56 1180/* Remap a range of one file to the other. */
3fc9f5e4 1181int
862bb360
DW
1182xfs_reflink_remap_blocks(
1183 struct xfs_inode *src,
9f04aaff 1184 loff_t pos_in,
862bb360 1185 struct xfs_inode *dest,
9f04aaff 1186 loff_t pos_out,
3f68c1f5
DW
1187 loff_t remap_len,
1188 loff_t *remapped)
862bb360
DW
1189{
1190 struct xfs_bmbt_irec imap;
00fd1d56
DW
1191 struct xfs_mount *mp = src->i_mount;
1192 xfs_fileoff_t srcoff = XFS_B_TO_FSBT(mp, pos_in);
1193 xfs_fileoff_t destoff = XFS_B_TO_FSBT(mp, pos_out);
9f04aaff 1194 xfs_filblks_t len;
3f68c1f5 1195 xfs_filblks_t remapped_len = 0;
9f04aaff 1196 xfs_off_t new_isize = pos_out + remap_len;
862bb360
DW
1197 int nimaps;
1198 int error = 0;
9f04aaff 1199
00fd1d56
DW
1200 len = min_t(xfs_filblks_t, XFS_B_TO_FSB(mp, remap_len),
1201 XFS_MAX_FILEOFF);
862bb360 1202
00fd1d56 1203 trace_xfs_reflink_remap_blocks(src, srcoff, len, dest, destoff);
01c2e13d 1204
00fd1d56
DW
1205 while (len > 0) {
1206 unsigned int lock_mode;
01c2e13d 1207
862bb360
DW
1208 /* Read extent from the source file */
1209 nimaps = 1;
01c2e13d 1210 lock_mode = xfs_ilock_data_map_shared(src);
862bb360 1211 error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
01c2e13d 1212 xfs_iunlock(src, lock_mode);
862bb360 1213 if (error)
9f04aaff 1214 break;
00fd1d56
DW
1215 /*
1216 * The caller supposedly flushed all dirty pages in the source
1217 * file range, which means that writeback should have allocated
1218 * or deleted all delalloc reservations in that range. If we
1219 * find one, that's a good sign that something is seriously
1220 * wrong here.
1221 */
1222 ASSERT(nimaps == 1 && imap.br_startoff == srcoff);
1223 if (imap.br_startblock == DELAYSTARTBLOCK) {
1224 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1225 error = -EFSCORRUPTED;
1226 break;
1227 }
862bb360 1228
00fd1d56 1229 trace_xfs_reflink_remap_extent_src(src, &imap);
862bb360 1230
00fd1d56
DW
1231 /* Remap into the destination file at the given offset. */
1232 imap.br_startoff = destoff;
1233 error = xfs_reflink_remap_extent(dest, &imap, new_isize);
862bb360 1234 if (error)
9f04aaff 1235 break;
862bb360
DW
1236
1237 if (fatal_signal_pending(current)) {
1238 error = -EINTR;
9f04aaff 1239 break;
862bb360
DW
1240 }
1241
1242 /* Advance drange/srange */
00fd1d56
DW
1243 srcoff += imap.br_blockcount;
1244 destoff += imap.br_blockcount;
1245 len -= imap.br_blockcount;
1246 remapped_len += imap.br_blockcount;
862bb360
DW
1247 }
1248
9f04aaff
DW
1249 if (error)
1250 trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
3f68c1f5
DW
1251 *remapped = min_t(loff_t, remap_len,
1252 XFS_FSB_TO_B(src->i_mount, remapped_len));
862bb360
DW
1253 return error;
1254}
1255
410fdc72
DW
1256/*
1257 * If we're reflinking to a point past the destination file's EOF, we must
1258 * zero any speculative post-EOF preallocations that sit between the old EOF
1259 * and the destination file offset.
1260 */
1261static int
1262xfs_reflink_zero_posteof(
1263 struct xfs_inode *ip,
1264 loff_t pos)
1265{
1266 loff_t isize = i_size_read(VFS_I(ip));
1267
1268 if (pos <= isize)
1269 return 0;
1270
1271 trace_xfs_zero_eof(ip, isize, pos - isize);
1272 return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
f150b423 1273 &xfs_buffered_write_iomap_ops);
410fdc72
DW
1274}
1275
862bb360 1276/*
0d41e1d2 1277 * Prepare two files for range cloning. Upon a successful return both inodes
b3998900
DC
1278 * will have the iolock and mmaplock held, the page cache of the out file will
1279 * be truncated, and any leases on the out file will have been broken. This
1280 * function borrows heavily from xfs_file_aio_write_checks.
dceeb47b
DC
1281 *
1282 * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
1283 * checked that the bytes beyond EOF physically match. Hence we cannot use the
1284 * EOF block in the source dedupe range because it's not a complete block match,
b3998900 1285 * hence can introduce a corruption into the file that has it's block replaced.
dceeb47b 1286 *
b3998900
DC
1287 * In similar fashion, the VFS file cloning also allows partial EOF blocks to be
1288 * "block aligned" for the purposes of cloning entire files. However, if the
1289 * source file range includes the EOF block and it lands within the existing EOF
1290 * of the destination file, then we can expose stale data from beyond the source
1291 * file EOF in the destination file.
1292 *
1293 * XFS doesn't support partial block sharing, so in both cases we have check
1294 * these cases ourselves. For dedupe, we can simply round the length to dedupe
1295 * down to the previous whole block and ignore the partial EOF block. While this
1296 * means we can't dedupe the last block of a file, this is an acceptible
1297 * tradeoff for simplicity on implementation.
1298 *
1299 * For cloning, we want to share the partial EOF block if it is also the new EOF
1300 * block of the destination file. If the partial EOF block lies inside the
1301 * existing destination EOF, then we have to abort the clone to avoid exposing
1302 * stale data in the destination file. Hence we reject these clone attempts with
1303 * -EINVAL in this case.
862bb360 1304 */
3fc9f5e4 1305int
0d41e1d2 1306xfs_reflink_remap_prep(
5faaf4fa
CH
1307 struct file *file_in,
1308 loff_t pos_in,
1309 struct file *file_out,
1310 loff_t pos_out,
42ec3d4c 1311 loff_t *len,
a91ae49b 1312 unsigned int remap_flags)
862bb360 1313{
5faaf4fa
CH
1314 struct inode *inode_in = file_inode(file_in);
1315 struct xfs_inode *src = XFS_I(inode_in);
1316 struct inode *inode_out = file_inode(file_out);
1317 struct xfs_inode *dest = XFS_I(inode_out);
451d34ee 1318 int ret;
862bb360 1319
5faaf4fa 1320 /* Lock both files against IO */
e2aaee9c 1321 ret = xfs_ilock2_io_mmap(src, dest);
1364b1d4
DW
1322 if (ret)
1323 return ret;
5faaf4fa 1324
876bec6f 1325 /* Check file eligibility and prepare for block sharing. */
5faaf4fa 1326 ret = -EINVAL;
862bb360
DW
1327 /* Don't reflink realtime inodes */
1328 if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
5faaf4fa
CH
1329 goto out_unlock;
1330
1331 /* Don't share DAX file data for now. */
1332 if (IS_DAX(inode_in) || IS_DAX(inode_out))
1333 goto out_unlock;
1334
a83ab01a 1335 ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
a91ae49b 1336 len, remap_flags);
451d34ee 1337 if (ret || *len == 0)
5faaf4fa
CH
1338 goto out_unlock;
1339
09ac8623 1340 /* Attach dquots to dest inode before changing block map */
c14cfcca 1341 ret = xfs_qm_dqattach(dest);
09ac8623
DW
1342 if (ret)
1343 goto out_unlock;
1344
5c989a0e 1345 /*
410fdc72
DW
1346 * Zero existing post-eof speculative preallocations in the destination
1347 * file.
5c989a0e 1348 */
410fdc72
DW
1349 ret = xfs_reflink_zero_posteof(dest, pos_out);
1350 if (ret)
1351 goto out_unlock;
5c989a0e 1352
876bec6f 1353 /* Set flags and remap blocks. */
5faaf4fa
CH
1354 ret = xfs_reflink_set_inode_flag(src, dest);
1355 if (ret)
1356 goto out_unlock;
862bb360 1357
2c307174
DC
1358 /*
1359 * If pos_out > EOF, we may have dirtied blocks between EOF and
1360 * pos_out. In that case, we need to extend the flush and unmap to cover
1361 * from EOF to the end of the copy length.
1362 */
1363 if (pos_out > XFS_ISIZE(dest)) {
1364 loff_t flen = *len + (pos_out - XFS_ISIZE(dest));
1365 ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen);
1366 } else {
1367 ret = xfs_flush_unmap_range(dest, pos_out, *len);
1368 }
1369 if (ret)
1370 goto out_unlock;
7debbf01 1371
451d34ee 1372 return 0;
0d41e1d2 1373out_unlock:
e2aaee9c 1374 xfs_iunlock2_io_mmap(src, dest);
0d41e1d2
DW
1375 return ret;
1376}
1377
ea7cdd7b 1378/* Does this inode need the reflink flag? */
98cc2db5 1379int
ea7cdd7b
DW
1380xfs_reflink_inode_has_shared_extents(
1381 struct xfs_trans *tp,
1382 struct xfs_inode *ip,
1383 bool *has_shared)
98cc2db5 1384{
ea7cdd7b
DW
1385 struct xfs_bmbt_irec got;
1386 struct xfs_mount *mp = ip->i_mount;
1387 struct xfs_ifork *ifp;
1388 xfs_agnumber_t agno;
1389 xfs_agblock_t agbno;
1390 xfs_extlen_t aglen;
1391 xfs_agblock_t rbno;
1392 xfs_extlen_t rlen;
b2b1712a 1393 struct xfs_iext_cursor icur;
ea7cdd7b
DW
1394 bool found;
1395 int error;
98cc2db5 1396
ea7cdd7b 1397 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
862a804a
CH
1398 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1399 if (error)
1400 return error;
98cc2db5 1401
ea7cdd7b 1402 *has_shared = false;
b2b1712a 1403 found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got);
ea7cdd7b
DW
1404 while (found) {
1405 if (isnullstartblock(got.br_startblock) ||
1406 got.br_state != XFS_EXT_NORM)
1407 goto next;
1408 agno = XFS_FSB_TO_AGNO(mp, got.br_startblock);
1409 agbno = XFS_FSB_TO_AGBNO(mp, got.br_startblock);
1410 aglen = got.br_blockcount;
98cc2db5 1411
ea7cdd7b 1412 error = xfs_reflink_find_shared(mp, tp, agno, agbno, aglen,
024adf48
DW
1413 &rbno, &rlen, false);
1414 if (error)
1415 return error;
1416 /* Is there still a shared block here? */
ea7cdd7b
DW
1417 if (rbno != NULLAGBLOCK) {
1418 *has_shared = true;
024adf48 1419 return 0;
ea7cdd7b 1420 }
98cc2db5 1421next:
b2b1712a 1422 found = xfs_iext_next_extent(ifp, &icur, &got);
98cc2db5
DW
1423 }
1424
ea7cdd7b
DW
1425 return 0;
1426}
1427
844e5e74
DC
1428/*
1429 * Clear the inode reflink flag if there are no shared extents.
1430 *
1431 * The caller is responsible for joining the inode to the transaction passed in.
1432 * The inode will be joined to the transaction that is returned to the caller.
1433 */
ea7cdd7b
DW
1434int
1435xfs_reflink_clear_inode_flag(
1436 struct xfs_inode *ip,
1437 struct xfs_trans **tpp)
1438{
1439 bool needs_flag;
1440 int error = 0;
1441
1442 ASSERT(xfs_is_reflink_inode(ip));
1443
1444 error = xfs_reflink_inode_has_shared_extents(*tpp, ip, &needs_flag);
1445 if (error || needs_flag)
1446 return error;
1447
98cc2db5
DW
1448 /*
1449 * We didn't find any shared blocks so turn off the reflink flag.
1450 * First, get rid of any leftover CoW mappings.
1451 */
a5084865
DW
1452 error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, XFS_MAX_FILEOFF,
1453 true);
98cc2db5
DW
1454 if (error)
1455 return error;
1456
1457 /* Clear the inode flag. */
1458 trace_xfs_reflink_unset_inode_flag(ip);
3e09ab8f 1459 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
83104d44 1460 xfs_inode_clear_cowblocks_tag(ip);
98cc2db5
DW
1461 xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
1462
1463 return error;
1464}
1465
1466/*
1467 * Clear the inode reflink flag if there are no shared extents and the size
1468 * hasn't changed.
1469 */
1470STATIC int
1471xfs_reflink_try_clear_inode_flag(
97a1b87e 1472 struct xfs_inode *ip)
98cc2db5
DW
1473{
1474 struct xfs_mount *mp = ip->i_mount;
1475 struct xfs_trans *tp;
1476 int error = 0;
1477
1478 /* Start a rolling transaction to remove the mappings */
1479 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1480 if (error)
1481 return error;
1482
1483 xfs_ilock(ip, XFS_ILOCK_EXCL);
1484 xfs_trans_ijoin(tp, ip, 0);
1485
98cc2db5
DW
1486 error = xfs_reflink_clear_inode_flag(ip, &tp);
1487 if (error)
1488 goto cancel;
1489
1490 error = xfs_trans_commit(tp);
1491 if (error)
1492 goto out;
1493
1494 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1495 return 0;
1496cancel:
1497 xfs_trans_cancel(tp);
1498out:
1499 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1500 return error;
1501}
1502
1503/*
1504 * Pre-COW all shared blocks within a given byte range of a file and turn off
1505 * the reflink flag if we unshare all of the file's blocks.
1506 */
1507int
1508xfs_reflink_unshare(
1509 struct xfs_inode *ip,
1510 xfs_off_t offset,
1511 xfs_off_t len)
1512{
dd26b846 1513 struct inode *inode = VFS_I(ip);
98cc2db5
DW
1514 int error;
1515
1516 if (!xfs_is_reflink_inode(ip))
1517 return 0;
1518
1519 trace_xfs_reflink_unshare(ip, offset, len);
1520
dd26b846 1521 inode_dio_wait(inode);
98cc2db5 1522
f150b423
CH
1523 error = iomap_file_unshare(inode, offset, len,
1524 &xfs_buffered_write_iomap_ops);
98cc2db5 1525 if (error)
dd26b846 1526 goto out;
46afb062 1527
d4f74e16
DW
1528 error = filemap_write_and_wait_range(inode->i_mapping, offset,
1529 offset + len - 1);
98cc2db5
DW
1530 if (error)
1531 goto out;
1532
97a1b87e
DW
1533 /* Turn off the reflink flag if possible. */
1534 error = xfs_reflink_try_clear_inode_flag(ip);
1535 if (error)
1536 goto out;
98cc2db5
DW
1537 return 0;
1538
98cc2db5
DW
1539out:
1540 trace_xfs_reflink_unshare_error(ip, error, _RET_IP_);
1541 return error;
1542}