xfs: merge xfs_cud_init into xfs_trans_get_cud
[linux-2.6-block.git] / fs / xfs / xfs_rmap_item.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0+
5880f2d7
DW
2/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
5880f2d7 4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5880f2d7
DW
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
9e88b5d8 11#include "xfs_bit.h"
b31c2bdc 12#include "xfs_shared.h"
5880f2d7 13#include "xfs_mount.h"
9c194644 14#include "xfs_defer.h"
5880f2d7
DW
15#include "xfs_trans.h"
16#include "xfs_trans_priv.h"
17#include "xfs_buf_item.h"
18#include "xfs_rmap_item.h"
19#include "xfs_log.h"
9c194644 20#include "xfs_rmap.h"
5880f2d7
DW
21
22
23kmem_zone_t *xfs_rui_zone;
24kmem_zone_t *xfs_rud_zone;
25
26static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
27{
28 return container_of(lip, struct xfs_rui_log_item, rui_item);
29}
30
31void
32xfs_rui_item_free(
33 struct xfs_rui_log_item *ruip)
34{
35 if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
36 kmem_free(ruip);
37 else
38 kmem_zone_free(xfs_rui_zone, ruip);
39}
40
0612d116
DC
41/*
42 * Freeing the RUI requires that we remove it from the AIL if it has already
43 * been placed there. However, the RUI may not yet have been placed in the AIL
44 * when called by xfs_rui_release() from RUD processing due to the ordering of
45 * committed vs unpin operations in bulk insert operations. Hence the reference
46 * count to ensure only the last caller frees the RUI.
47 */
48void
49xfs_rui_release(
50 struct xfs_rui_log_item *ruip)
51{
52 ASSERT(atomic_read(&ruip->rui_refcount) > 0);
53 if (atomic_dec_and_test(&ruip->rui_refcount)) {
54 xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
55 xfs_rui_item_free(ruip);
56 }
57}
58
5880f2d7
DW
59STATIC void
60xfs_rui_item_size(
61 struct xfs_log_item *lip,
62 int *nvecs,
63 int *nbytes)
64{
cd00158c
DW
65 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
66
5880f2d7 67 *nvecs += 1;
cd00158c 68 *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
5880f2d7
DW
69}
70
71/*
72 * This is called to fill in the vector of log iovecs for the
73 * given rui log item. We use only 1 iovec, and we point that
74 * at the rui_log_format structure embedded in the rui item.
75 * It is at this point that we assert that all of the extent
76 * slots in the rui item have been filled.
77 */
78STATIC void
79xfs_rui_item_format(
80 struct xfs_log_item *lip,
81 struct xfs_log_vec *lv)
82{
83 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
84 struct xfs_log_iovec *vecp = NULL;
85
86 ASSERT(atomic_read(&ruip->rui_next_extent) ==
87 ruip->rui_format.rui_nextents);
88
89 ruip->rui_format.rui_type = XFS_LI_RUI;
90 ruip->rui_format.rui_size = 1;
91
92 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
cd00158c 93 xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
5880f2d7
DW
94}
95
5880f2d7
DW
96/*
97 * The unpin operation is the last place an RUI is manipulated in the log. It is
98 * either inserted in the AIL or aborted in the event of a log I/O error. In
99 * either case, the RUI transaction has been successfully committed to make it
100 * this far. Therefore, we expect whoever committed the RUI to either construct
101 * and commit the RUD or drop the RUD's reference in the event of error. Simply
102 * drop the log's RUI reference now that the log is done with it.
103 */
104STATIC void
105xfs_rui_item_unpin(
106 struct xfs_log_item *lip,
107 int remove)
108{
109 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
110
111 xfs_rui_release(ruip);
112}
113
5880f2d7
DW
114/*
115 * The RUI has been either committed or aborted if the transaction has been
116 * cancelled. If the transaction was cancelled, an RUD isn't going to be
117 * constructed and thus we free the RUI here directly.
118 */
119STATIC void
ddf92053 120xfs_rui_item_release(
5880f2d7
DW
121 struct xfs_log_item *lip)
122{
ddf92053 123 xfs_rui_release(RUI_ITEM(lip));
5880f2d7
DW
124}
125
5880f2d7
DW
126static const struct xfs_item_ops xfs_rui_item_ops = {
127 .iop_size = xfs_rui_item_size,
128 .iop_format = xfs_rui_item_format,
5880f2d7 129 .iop_unpin = xfs_rui_item_unpin,
ddf92053 130 .iop_release = xfs_rui_item_release,
5880f2d7
DW
131};
132
133/*
134 * Allocate and initialize an rui item with the given number of extents.
135 */
136struct xfs_rui_log_item *
137xfs_rui_init(
138 struct xfs_mount *mp,
139 uint nextents)
140
141{
142 struct xfs_rui_log_item *ruip;
5880f2d7
DW
143
144 ASSERT(nextents > 0);
cd00158c
DW
145 if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
146 ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), KM_SLEEP);
147 else
5880f2d7 148 ruip = kmem_zone_zalloc(xfs_rui_zone, KM_SLEEP);
5880f2d7
DW
149
150 xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
151 ruip->rui_format.rui_nextents = nextents;
152 ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
153 atomic_set(&ruip->rui_next_extent, 0);
154 atomic_set(&ruip->rui_refcount, 2);
155
156 return ruip;
157}
158
159/*
160 * Copy an RUI format buffer from the given buf, and into the destination
161 * RUI format structure. The RUI/RUD items were designed not to need any
162 * special alignment handling.
163 */
164int
165xfs_rui_copy_format(
166 struct xfs_log_iovec *buf,
167 struct xfs_rui_log_format *dst_rui_fmt)
168{
169 struct xfs_rui_log_format *src_rui_fmt;
170 uint len;
171
172 src_rui_fmt = buf->i_addr;
cd00158c 173 len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
5880f2d7
DW
174
175 if (buf->i_len != len)
176 return -EFSCORRUPTED;
177
cd00158c 178 memcpy(dst_rui_fmt, src_rui_fmt, len);
5880f2d7
DW
179 return 0;
180}
181
5880f2d7
DW
182static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
183{
184 return container_of(lip, struct xfs_rud_log_item, rud_item);
185}
186
5880f2d7
DW
187STATIC void
188xfs_rud_item_size(
189 struct xfs_log_item *lip,
190 int *nvecs,
191 int *nbytes)
192{
193 *nvecs += 1;
722e2517 194 *nbytes += sizeof(struct xfs_rud_log_format);
5880f2d7
DW
195}
196
197/*
198 * This is called to fill in the vector of log iovecs for the
199 * given rud log item. We use only 1 iovec, and we point that
200 * at the rud_log_format structure embedded in the rud item.
201 * It is at this point that we assert that all of the extent
202 * slots in the rud item have been filled.
203 */
204STATIC void
205xfs_rud_item_format(
206 struct xfs_log_item *lip,
207 struct xfs_log_vec *lv)
208{
209 struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
210 struct xfs_log_iovec *vecp = NULL;
211
5880f2d7
DW
212 rudp->rud_format.rud_type = XFS_LI_RUD;
213 rudp->rud_format.rud_size = 1;
214
215 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
722e2517 216 sizeof(struct xfs_rud_log_format));
5880f2d7
DW
217}
218
5880f2d7
DW
219/*
220 * The RUD is either committed or aborted if the transaction is cancelled. If
221 * the transaction is cancelled, drop our reference to the RUI and free the
222 * RUD.
223 */
224STATIC void
ddf92053 225xfs_rud_item_release(
5880f2d7
DW
226 struct xfs_log_item *lip)
227{
228 struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
229
ddf92053
CH
230 xfs_rui_release(rudp->rud_ruip);
231 kmem_zone_free(xfs_rud_zone, rudp);
5880f2d7
DW
232}
233
5880f2d7 234static const struct xfs_item_ops xfs_rud_item_ops = {
9ce632a2 235 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED,
5880f2d7
DW
236 .iop_size = xfs_rud_item_size,
237 .iop_format = xfs_rud_item_format,
ddf92053 238 .iop_release = xfs_rud_item_release,
5880f2d7
DW
239};
240
241/*
242 * Allocate and initialize an rud item with the given number of extents.
243 */
244struct xfs_rud_log_item *
245xfs_rud_init(
246 struct xfs_mount *mp,
722e2517 247 struct xfs_rui_log_item *ruip)
5880f2d7
DW
248
249{
250 struct xfs_rud_log_item *rudp;
5880f2d7 251
722e2517 252 rudp = kmem_zone_zalloc(xfs_rud_zone, KM_SLEEP);
5880f2d7
DW
253 xfs_log_item_init(mp, &rudp->rud_item, XFS_LI_RUD, &xfs_rud_item_ops);
254 rudp->rud_ruip = ruip;
5880f2d7
DW
255 rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
256
257 return rudp;
258}
9e88b5d8
DW
259
260/*
261 * Process an rmap update intent item that was recovered from the log.
262 * We need to update the rmapbt.
263 */
264int
265xfs_rui_recover(
266 struct xfs_mount *mp,
267 struct xfs_rui_log_item *ruip)
268{
269 int i;
270 int error = 0;
271 struct xfs_map_extent *rmap;
272 xfs_fsblock_t startblock_fsb;
273 bool op_ok;
9c194644
DW
274 struct xfs_rud_log_item *rudp;
275 enum xfs_rmap_intent_type type;
276 int whichfork;
277 xfs_exntst_t state;
278 struct xfs_trans *tp;
279 struct xfs_btree_cur *rcur = NULL;
9e88b5d8
DW
280
281 ASSERT(!test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags));
282
283 /*
284 * First check the validity of the extents described by the
285 * RUI. If any are bad, then assume that all are bad and
286 * just toss the RUI.
287 */
288 for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
e127fafd 289 rmap = &ruip->rui_format.rui_extents[i];
9e88b5d8
DW
290 startblock_fsb = XFS_BB_TO_FSB(mp,
291 XFS_FSB_TO_DADDR(mp, rmap->me_startblock));
292 switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
293 case XFS_RMAP_EXTENT_MAP:
0e07c039 294 case XFS_RMAP_EXTENT_MAP_SHARED:
9e88b5d8 295 case XFS_RMAP_EXTENT_UNMAP:
0e07c039 296 case XFS_RMAP_EXTENT_UNMAP_SHARED:
9e88b5d8 297 case XFS_RMAP_EXTENT_CONVERT:
0e07c039 298 case XFS_RMAP_EXTENT_CONVERT_SHARED:
9e88b5d8
DW
299 case XFS_RMAP_EXTENT_ALLOC:
300 case XFS_RMAP_EXTENT_FREE:
301 op_ok = true;
302 break;
303 default:
304 op_ok = false;
305 break;
306 }
e127fafd
DW
307 if (!op_ok || startblock_fsb == 0 ||
308 rmap->me_len == 0 ||
309 startblock_fsb >= mp->m_sb.sb_dblocks ||
310 rmap->me_len >= mp->m_sb.sb_agblocks ||
9e88b5d8
DW
311 (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)) {
312 /*
313 * This will pull the RUI from the AIL and
314 * free the memory associated with it.
315 */
316 set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
317 xfs_rui_release(ruip);
318 return -EIO;
319 }
320 }
321
b31c2bdc
DW
322 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
323 mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
9c194644
DW
324 if (error)
325 return error;
722e2517 326 rudp = xfs_trans_get_rud(tp, ruip);
9c194644
DW
327
328 for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
e127fafd 329 rmap = &ruip->rui_format.rui_extents[i];
9c194644
DW
330 state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
331 XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
332 whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
333 XFS_ATTR_FORK : XFS_DATA_FORK;
334 switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
335 case XFS_RMAP_EXTENT_MAP:
336 type = XFS_RMAP_MAP;
337 break;
ceeb9c83
DW
338 case XFS_RMAP_EXTENT_MAP_SHARED:
339 type = XFS_RMAP_MAP_SHARED;
340 break;
9c194644
DW
341 case XFS_RMAP_EXTENT_UNMAP:
342 type = XFS_RMAP_UNMAP;
343 break;
ceeb9c83
DW
344 case XFS_RMAP_EXTENT_UNMAP_SHARED:
345 type = XFS_RMAP_UNMAP_SHARED;
346 break;
9c194644
DW
347 case XFS_RMAP_EXTENT_CONVERT:
348 type = XFS_RMAP_CONVERT;
349 break;
3f165b33
DW
350 case XFS_RMAP_EXTENT_CONVERT_SHARED:
351 type = XFS_RMAP_CONVERT_SHARED;
352 break;
9c194644
DW
353 case XFS_RMAP_EXTENT_ALLOC:
354 type = XFS_RMAP_ALLOC;
355 break;
356 case XFS_RMAP_EXTENT_FREE:
357 type = XFS_RMAP_FREE;
358 break;
359 default:
360 error = -EFSCORRUPTED;
361 goto abort_error;
362 }
363 error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
364 rmap->me_owner, whichfork,
365 rmap->me_startoff, rmap->me_startblock,
366 rmap->me_len, state, &rcur);
367 if (error)
368 goto abort_error;
369
370 }
371
372 xfs_rmap_finish_one_cleanup(tp, rcur, error);
9e88b5d8 373 set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
9c194644
DW
374 error = xfs_trans_commit(tp);
375 return error;
376
377abort_error:
378 xfs_rmap_finish_one_cleanup(tp, rcur, error);
379 xfs_trans_cancel(tp);
9e88b5d8
DW
380 return error;
381}