Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
5880f2d7 DW |
2 | /* |
3 | * Copyright (C) 2016 Oracle. All Rights Reserved. | |
5880f2d7 | 4 | * Author: Darrick J. Wong <darrick.wong@oracle.com> |
5880f2d7 DW |
5 | */ |
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
8 | #include "xfs_format.h" | |
9 | #include "xfs_log_format.h" | |
10 | #include "xfs_trans_resv.h" | |
9e88b5d8 | 11 | #include "xfs_bit.h" |
b31c2bdc | 12 | #include "xfs_shared.h" |
5880f2d7 | 13 | #include "xfs_mount.h" |
9c194644 | 14 | #include "xfs_defer.h" |
5880f2d7 DW |
15 | #include "xfs_trans.h" |
16 | #include "xfs_trans_priv.h" | |
5880f2d7 DW |
17 | #include "xfs_rmap_item.h" |
18 | #include "xfs_log.h" | |
9c194644 | 19 | #include "xfs_rmap.h" |
a5155b87 | 20 | #include "xfs_error.h" |
07590a9d | 21 | #include "xfs_log_priv.h" |
86ffa471 | 22 | #include "xfs_log_recover.h" |
5880f2d7 DW |
23 | |
24 | kmem_zone_t *xfs_rui_zone; | |
25 | kmem_zone_t *xfs_rud_zone; | |
26 | ||
27 | static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip) | |
28 | { | |
29 | return container_of(lip, struct xfs_rui_log_item, rui_item); | |
30 | } | |
31 | ||
07590a9d | 32 | STATIC void |
5880f2d7 DW |
33 | xfs_rui_item_free( |
34 | struct xfs_rui_log_item *ruip) | |
35 | { | |
36 | if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS) | |
37 | kmem_free(ruip); | |
38 | else | |
377bcd5f | 39 | kmem_cache_free(xfs_rui_zone, ruip); |
5880f2d7 DW |
40 | } |
41 | ||
0612d116 DC |
42 | /* |
43 | * Freeing the RUI requires that we remove it from the AIL if it has already | |
44 | * been placed there. However, the RUI may not yet have been placed in the AIL | |
45 | * when called by xfs_rui_release() from RUD processing due to the ordering of | |
46 | * committed vs unpin operations in bulk insert operations. Hence the reference | |
47 | * count to ensure only the last caller frees the RUI. | |
48 | */ | |
49 | void | |
50 | xfs_rui_release( | |
51 | struct xfs_rui_log_item *ruip) | |
52 | { | |
53 | ASSERT(atomic_read(&ruip->rui_refcount) > 0); | |
54 | if (atomic_dec_and_test(&ruip->rui_refcount)) { | |
65587929 | 55 | xfs_trans_ail_delete(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR); |
0612d116 DC |
56 | xfs_rui_item_free(ruip); |
57 | } | |
58 | } | |
59 | ||
5880f2d7 DW |
60 | STATIC void |
61 | xfs_rui_item_size( | |
62 | struct xfs_log_item *lip, | |
63 | int *nvecs, | |
64 | int *nbytes) | |
65 | { | |
cd00158c DW |
66 | struct xfs_rui_log_item *ruip = RUI_ITEM(lip); |
67 | ||
5880f2d7 | 68 | *nvecs += 1; |
cd00158c | 69 | *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents); |
5880f2d7 DW |
70 | } |
71 | ||
72 | /* | |
73 | * This is called to fill in the vector of log iovecs for the | |
74 | * given rui log item. We use only 1 iovec, and we point that | |
75 | * at the rui_log_format structure embedded in the rui item. | |
76 | * It is at this point that we assert that all of the extent | |
77 | * slots in the rui item have been filled. | |
78 | */ | |
79 | STATIC void | |
80 | xfs_rui_item_format( | |
81 | struct xfs_log_item *lip, | |
82 | struct xfs_log_vec *lv) | |
83 | { | |
84 | struct xfs_rui_log_item *ruip = RUI_ITEM(lip); | |
85 | struct xfs_log_iovec *vecp = NULL; | |
86 | ||
87 | ASSERT(atomic_read(&ruip->rui_next_extent) == | |
88 | ruip->rui_format.rui_nextents); | |
89 | ||
90 | ruip->rui_format.rui_type = XFS_LI_RUI; | |
91 | ruip->rui_format.rui_size = 1; | |
92 | ||
93 | xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format, | |
cd00158c | 94 | xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents)); |
5880f2d7 DW |
95 | } |
96 | ||
5880f2d7 DW |
97 | /* |
98 | * The unpin operation is the last place an RUI is manipulated in the log. It is | |
99 | * either inserted in the AIL or aborted in the event of a log I/O error. In | |
100 | * either case, the RUI transaction has been successfully committed to make it | |
101 | * this far. Therefore, we expect whoever committed the RUI to either construct | |
102 | * and commit the RUD or drop the RUD's reference in the event of error. Simply | |
103 | * drop the log's RUI reference now that the log is done with it. | |
104 | */ | |
105 | STATIC void | |
106 | xfs_rui_item_unpin( | |
107 | struct xfs_log_item *lip, | |
108 | int remove) | |
109 | { | |
110 | struct xfs_rui_log_item *ruip = RUI_ITEM(lip); | |
111 | ||
112 | xfs_rui_release(ruip); | |
113 | } | |
114 | ||
5880f2d7 DW |
115 | /* |
116 | * The RUI has been either committed or aborted if the transaction has been | |
117 | * cancelled. If the transaction was cancelled, an RUD isn't going to be | |
118 | * constructed and thus we free the RUI here directly. | |
119 | */ | |
120 | STATIC void | |
ddf92053 | 121 | xfs_rui_item_release( |
5880f2d7 DW |
122 | struct xfs_log_item *lip) |
123 | { | |
ddf92053 | 124 | xfs_rui_release(RUI_ITEM(lip)); |
5880f2d7 DW |
125 | } |
126 | ||
5880f2d7 DW |
127 | static const struct xfs_item_ops xfs_rui_item_ops = { |
128 | .iop_size = xfs_rui_item_size, | |
129 | .iop_format = xfs_rui_item_format, | |
5880f2d7 | 130 | .iop_unpin = xfs_rui_item_unpin, |
ddf92053 | 131 | .iop_release = xfs_rui_item_release, |
5880f2d7 DW |
132 | }; |
133 | ||
134 | /* | |
135 | * Allocate and initialize an rui item with the given number of extents. | |
136 | */ | |
07590a9d | 137 | STATIC struct xfs_rui_log_item * |
5880f2d7 DW |
138 | xfs_rui_init( |
139 | struct xfs_mount *mp, | |
140 | uint nextents) | |
141 | ||
142 | { | |
143 | struct xfs_rui_log_item *ruip; | |
5880f2d7 DW |
144 | |
145 | ASSERT(nextents > 0); | |
cd00158c | 146 | if (nextents > XFS_RUI_MAX_FAST_EXTENTS) |
707e0dda | 147 | ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0); |
cd00158c | 148 | else |
707e0dda | 149 | ruip = kmem_zone_zalloc(xfs_rui_zone, 0); |
5880f2d7 DW |
150 | |
151 | xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops); | |
152 | ruip->rui_format.rui_nextents = nextents; | |
153 | ruip->rui_format.rui_id = (uintptr_t)(void *)ruip; | |
154 | atomic_set(&ruip->rui_next_extent, 0); | |
155 | atomic_set(&ruip->rui_refcount, 2); | |
156 | ||
157 | return ruip; | |
158 | } | |
159 | ||
160 | /* | |
161 | * Copy an RUI format buffer from the given buf, and into the destination | |
162 | * RUI format structure. The RUI/RUD items were designed not to need any | |
163 | * special alignment handling. | |
164 | */ | |
07590a9d | 165 | STATIC int |
5880f2d7 DW |
166 | xfs_rui_copy_format( |
167 | struct xfs_log_iovec *buf, | |
168 | struct xfs_rui_log_format *dst_rui_fmt) | |
169 | { | |
170 | struct xfs_rui_log_format *src_rui_fmt; | |
171 | uint len; | |
172 | ||
173 | src_rui_fmt = buf->i_addr; | |
cd00158c | 174 | len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents); |
5880f2d7 | 175 | |
a5155b87 DW |
176 | if (buf->i_len != len) { |
177 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); | |
5880f2d7 | 178 | return -EFSCORRUPTED; |
a5155b87 | 179 | } |
5880f2d7 | 180 | |
cd00158c | 181 | memcpy(dst_rui_fmt, src_rui_fmt, len); |
5880f2d7 DW |
182 | return 0; |
183 | } | |
184 | ||
5880f2d7 DW |
185 | static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip) |
186 | { | |
187 | return container_of(lip, struct xfs_rud_log_item, rud_item); | |
188 | } | |
189 | ||
5880f2d7 DW |
190 | STATIC void |
191 | xfs_rud_item_size( | |
192 | struct xfs_log_item *lip, | |
193 | int *nvecs, | |
194 | int *nbytes) | |
195 | { | |
196 | *nvecs += 1; | |
722e2517 | 197 | *nbytes += sizeof(struct xfs_rud_log_format); |
5880f2d7 DW |
198 | } |
199 | ||
200 | /* | |
201 | * This is called to fill in the vector of log iovecs for the | |
202 | * given rud log item. We use only 1 iovec, and we point that | |
203 | * at the rud_log_format structure embedded in the rud item. | |
204 | * It is at this point that we assert that all of the extent | |
205 | * slots in the rud item have been filled. | |
206 | */ | |
207 | STATIC void | |
208 | xfs_rud_item_format( | |
209 | struct xfs_log_item *lip, | |
210 | struct xfs_log_vec *lv) | |
211 | { | |
212 | struct xfs_rud_log_item *rudp = RUD_ITEM(lip); | |
213 | struct xfs_log_iovec *vecp = NULL; | |
214 | ||
5880f2d7 DW |
215 | rudp->rud_format.rud_type = XFS_LI_RUD; |
216 | rudp->rud_format.rud_size = 1; | |
217 | ||
218 | xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format, | |
722e2517 | 219 | sizeof(struct xfs_rud_log_format)); |
5880f2d7 DW |
220 | } |
221 | ||
5880f2d7 DW |
222 | /* |
223 | * The RUD is either committed or aborted if the transaction is cancelled. If | |
224 | * the transaction is cancelled, drop our reference to the RUI and free the | |
225 | * RUD. | |
226 | */ | |
227 | STATIC void | |
ddf92053 | 228 | xfs_rud_item_release( |
5880f2d7 DW |
229 | struct xfs_log_item *lip) |
230 | { | |
231 | struct xfs_rud_log_item *rudp = RUD_ITEM(lip); | |
232 | ||
ddf92053 | 233 | xfs_rui_release(rudp->rud_ruip); |
377bcd5f | 234 | kmem_cache_free(xfs_rud_zone, rudp); |
5880f2d7 DW |
235 | } |
236 | ||
5880f2d7 | 237 | static const struct xfs_item_ops xfs_rud_item_ops = { |
9ce632a2 | 238 | .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED, |
5880f2d7 DW |
239 | .iop_size = xfs_rud_item_size, |
240 | .iop_format = xfs_rud_item_format, | |
ddf92053 | 241 | .iop_release = xfs_rud_item_release, |
5880f2d7 DW |
242 | }; |
243 | ||
3cfce1e3 | 244 | static struct xfs_rud_log_item * |
60883447 CH |
245 | xfs_trans_get_rud( |
246 | struct xfs_trans *tp, | |
722e2517 | 247 | struct xfs_rui_log_item *ruip) |
5880f2d7 | 248 | { |
60883447 | 249 | struct xfs_rud_log_item *rudp; |
5880f2d7 | 250 | |
707e0dda | 251 | rudp = kmem_zone_zalloc(xfs_rud_zone, 0); |
60883447 CH |
252 | xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD, |
253 | &xfs_rud_item_ops); | |
5880f2d7 | 254 | rudp->rud_ruip = ruip; |
5880f2d7 DW |
255 | rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id; |
256 | ||
60883447 | 257 | xfs_trans_add_item(tp, &rudp->rud_item); |
5880f2d7 DW |
258 | return rudp; |
259 | } | |
9e88b5d8 | 260 | |
3cfce1e3 CH |
261 | /* Set the map extent flags for this reverse mapping. */ |
262 | static void | |
263 | xfs_trans_set_rmap_flags( | |
264 | struct xfs_map_extent *rmap, | |
265 | enum xfs_rmap_intent_type type, | |
266 | int whichfork, | |
267 | xfs_exntst_t state) | |
268 | { | |
269 | rmap->me_flags = 0; | |
270 | if (state == XFS_EXT_UNWRITTEN) | |
271 | rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN; | |
272 | if (whichfork == XFS_ATTR_FORK) | |
273 | rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK; | |
274 | switch (type) { | |
275 | case XFS_RMAP_MAP: | |
276 | rmap->me_flags |= XFS_RMAP_EXTENT_MAP; | |
277 | break; | |
278 | case XFS_RMAP_MAP_SHARED: | |
279 | rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED; | |
280 | break; | |
281 | case XFS_RMAP_UNMAP: | |
282 | rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP; | |
283 | break; | |
284 | case XFS_RMAP_UNMAP_SHARED: | |
285 | rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED; | |
286 | break; | |
287 | case XFS_RMAP_CONVERT: | |
288 | rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT; | |
289 | break; | |
290 | case XFS_RMAP_CONVERT_SHARED: | |
291 | rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED; | |
292 | break; | |
293 | case XFS_RMAP_ALLOC: | |
294 | rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC; | |
295 | break; | |
296 | case XFS_RMAP_FREE: | |
297 | rmap->me_flags |= XFS_RMAP_EXTENT_FREE; | |
298 | break; | |
299 | default: | |
300 | ASSERT(0); | |
301 | } | |
302 | } | |
303 | ||
304 | /* | |
305 | * Finish an rmap update and log it to the RUD. Note that the transaction is | |
306 | * marked dirty regardless of whether the rmap update succeeds or fails to | |
307 | * support the RUI/RUD lifecycle rules. | |
308 | */ | |
309 | static int | |
310 | xfs_trans_log_finish_rmap_update( | |
311 | struct xfs_trans *tp, | |
312 | struct xfs_rud_log_item *rudp, | |
313 | enum xfs_rmap_intent_type type, | |
314 | uint64_t owner, | |
315 | int whichfork, | |
316 | xfs_fileoff_t startoff, | |
317 | xfs_fsblock_t startblock, | |
318 | xfs_filblks_t blockcount, | |
319 | xfs_exntst_t state, | |
320 | struct xfs_btree_cur **pcur) | |
321 | { | |
322 | int error; | |
323 | ||
324 | error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff, | |
325 | startblock, blockcount, state, pcur); | |
326 | ||
327 | /* | |
328 | * Mark the transaction dirty, even on error. This ensures the | |
329 | * transaction is aborted, which: | |
330 | * | |
331 | * 1.) releases the RUI and frees the RUD | |
332 | * 2.) shuts down the filesystem | |
333 | */ | |
334 | tp->t_flags |= XFS_TRANS_DIRTY; | |
335 | set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags); | |
336 | ||
337 | return error; | |
338 | } | |
339 | ||
340 | /* Sort rmap intents by AG. */ | |
341 | static int | |
342 | xfs_rmap_update_diff_items( | |
343 | void *priv, | |
344 | struct list_head *a, | |
345 | struct list_head *b) | |
346 | { | |
347 | struct xfs_mount *mp = priv; | |
348 | struct xfs_rmap_intent *ra; | |
349 | struct xfs_rmap_intent *rb; | |
350 | ||
351 | ra = container_of(a, struct xfs_rmap_intent, ri_list); | |
352 | rb = container_of(b, struct xfs_rmap_intent, ri_list); | |
353 | return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) - | |
354 | XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock); | |
355 | } | |
356 | ||
3cfce1e3 CH |
357 | /* Log rmap updates in the intent item. */ |
358 | STATIC void | |
359 | xfs_rmap_update_log_item( | |
360 | struct xfs_trans *tp, | |
c1f09188 CH |
361 | struct xfs_rui_log_item *ruip, |
362 | struct xfs_rmap_intent *rmap) | |
3cfce1e3 | 363 | { |
3cfce1e3 CH |
364 | uint next_extent; |
365 | struct xfs_map_extent *map; | |
366 | ||
3cfce1e3 CH |
367 | tp->t_flags |= XFS_TRANS_DIRTY; |
368 | set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags); | |
369 | ||
370 | /* | |
371 | * atomic_inc_return gives us the value after the increment; | |
372 | * we want to use it as an array index so we need to subtract 1 from | |
373 | * it. | |
374 | */ | |
375 | next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1; | |
376 | ASSERT(next_extent < ruip->rui_format.rui_nextents); | |
377 | map = &ruip->rui_format.rui_extents[next_extent]; | |
378 | map->me_owner = rmap->ri_owner; | |
379 | map->me_startblock = rmap->ri_bmap.br_startblock; | |
380 | map->me_startoff = rmap->ri_bmap.br_startoff; | |
381 | map->me_len = rmap->ri_bmap.br_blockcount; | |
382 | xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork, | |
383 | rmap->ri_bmap.br_state); | |
384 | } | |
385 | ||
13a83333 | 386 | static struct xfs_log_item * |
c1f09188 CH |
387 | xfs_rmap_update_create_intent( |
388 | struct xfs_trans *tp, | |
389 | struct list_head *items, | |
d367a868 CH |
390 | unsigned int count, |
391 | bool sort) | |
c1f09188 CH |
392 | { |
393 | struct xfs_mount *mp = tp->t_mountp; | |
394 | struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count); | |
395 | struct xfs_rmap_intent *rmap; | |
396 | ||
397 | ASSERT(count > 0); | |
398 | ||
399 | xfs_trans_add_item(tp, &ruip->rui_item); | |
d367a868 CH |
400 | if (sort) |
401 | list_sort(mp, items, xfs_rmap_update_diff_items); | |
c1f09188 CH |
402 | list_for_each_entry(rmap, items, ri_list) |
403 | xfs_rmap_update_log_item(tp, ruip, rmap); | |
13a83333 | 404 | return &ruip->rui_item; |
c1f09188 CH |
405 | } |
406 | ||
3cfce1e3 | 407 | /* Get an RUD so we can process all the deferred rmap updates. */ |
f09d167c | 408 | static struct xfs_log_item * |
3cfce1e3 CH |
409 | xfs_rmap_update_create_done( |
410 | struct xfs_trans *tp, | |
13a83333 | 411 | struct xfs_log_item *intent, |
3cfce1e3 CH |
412 | unsigned int count) |
413 | { | |
f09d167c | 414 | return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item; |
3cfce1e3 CH |
415 | } |
416 | ||
417 | /* Process a deferred rmap update. */ | |
418 | STATIC int | |
419 | xfs_rmap_update_finish_item( | |
420 | struct xfs_trans *tp, | |
f09d167c | 421 | struct xfs_log_item *done, |
3cfce1e3 | 422 | struct list_head *item, |
3ec1b26c | 423 | struct xfs_btree_cur **state) |
3cfce1e3 CH |
424 | { |
425 | struct xfs_rmap_intent *rmap; | |
426 | int error; | |
427 | ||
428 | rmap = container_of(item, struct xfs_rmap_intent, ri_list); | |
f09d167c | 429 | error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done), |
3ec1b26c CH |
430 | rmap->ri_type, rmap->ri_owner, rmap->ri_whichfork, |
431 | rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock, | |
432 | rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state, | |
433 | state); | |
3cfce1e3 CH |
434 | kmem_free(rmap); |
435 | return error; | |
436 | } | |
437 | ||
3cfce1e3 CH |
438 | /* Abort all pending RUIs. */ |
439 | STATIC void | |
440 | xfs_rmap_update_abort_intent( | |
13a83333 | 441 | struct xfs_log_item *intent) |
3cfce1e3 | 442 | { |
13a83333 | 443 | xfs_rui_release(RUI_ITEM(intent)); |
3cfce1e3 CH |
444 | } |
445 | ||
446 | /* Cancel a deferred rmap update. */ | |
447 | STATIC void | |
448 | xfs_rmap_update_cancel_item( | |
449 | struct list_head *item) | |
450 | { | |
451 | struct xfs_rmap_intent *rmap; | |
452 | ||
453 | rmap = container_of(item, struct xfs_rmap_intent, ri_list); | |
454 | kmem_free(rmap); | |
455 | } | |
456 | ||
457 | const struct xfs_defer_op_type xfs_rmap_update_defer_type = { | |
458 | .max_items = XFS_RUI_MAX_FAST_EXTENTS, | |
3cfce1e3 CH |
459 | .create_intent = xfs_rmap_update_create_intent, |
460 | .abort_intent = xfs_rmap_update_abort_intent, | |
3cfce1e3 CH |
461 | .create_done = xfs_rmap_update_create_done, |
462 | .finish_item = xfs_rmap_update_finish_item, | |
3ec1b26c | 463 | .finish_cleanup = xfs_rmap_finish_one_cleanup, |
3cfce1e3 CH |
464 | .cancel_item = xfs_rmap_update_cancel_item, |
465 | }; | |
466 | ||
9e88b5d8 DW |
467 | /* |
468 | * Process an rmap update intent item that was recovered from the log. | |
469 | * We need to update the rmapbt. | |
470 | */ | |
471 | int | |
472 | xfs_rui_recover( | |
473 | struct xfs_mount *mp, | |
474 | struct xfs_rui_log_item *ruip) | |
475 | { | |
476 | int i; | |
477 | int error = 0; | |
478 | struct xfs_map_extent *rmap; | |
479 | xfs_fsblock_t startblock_fsb; | |
480 | bool op_ok; | |
9c194644 DW |
481 | struct xfs_rud_log_item *rudp; |
482 | enum xfs_rmap_intent_type type; | |
483 | int whichfork; | |
484 | xfs_exntst_t state; | |
485 | struct xfs_trans *tp; | |
486 | struct xfs_btree_cur *rcur = NULL; | |
9e88b5d8 DW |
487 | |
488 | ASSERT(!test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags)); | |
489 | ||
490 | /* | |
491 | * First check the validity of the extents described by the | |
492 | * RUI. If any are bad, then assume that all are bad and | |
493 | * just toss the RUI. | |
494 | */ | |
495 | for (i = 0; i < ruip->rui_format.rui_nextents; i++) { | |
e127fafd | 496 | rmap = &ruip->rui_format.rui_extents[i]; |
9e88b5d8 DW |
497 | startblock_fsb = XFS_BB_TO_FSB(mp, |
498 | XFS_FSB_TO_DADDR(mp, rmap->me_startblock)); | |
499 | switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) { | |
500 | case XFS_RMAP_EXTENT_MAP: | |
0e07c039 | 501 | case XFS_RMAP_EXTENT_MAP_SHARED: |
9e88b5d8 | 502 | case XFS_RMAP_EXTENT_UNMAP: |
0e07c039 | 503 | case XFS_RMAP_EXTENT_UNMAP_SHARED: |
9e88b5d8 | 504 | case XFS_RMAP_EXTENT_CONVERT: |
0e07c039 | 505 | case XFS_RMAP_EXTENT_CONVERT_SHARED: |
9e88b5d8 DW |
506 | case XFS_RMAP_EXTENT_ALLOC: |
507 | case XFS_RMAP_EXTENT_FREE: | |
508 | op_ok = true; | |
509 | break; | |
510 | default: | |
511 | op_ok = false; | |
512 | break; | |
513 | } | |
e127fafd DW |
514 | if (!op_ok || startblock_fsb == 0 || |
515 | rmap->me_len == 0 || | |
516 | startblock_fsb >= mp->m_sb.sb_dblocks || | |
517 | rmap->me_len >= mp->m_sb.sb_agblocks || | |
9e88b5d8 DW |
518 | (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)) { |
519 | /* | |
520 | * This will pull the RUI from the AIL and | |
521 | * free the memory associated with it. | |
522 | */ | |
523 | set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags); | |
524 | xfs_rui_release(ruip); | |
895e196f | 525 | return -EFSCORRUPTED; |
9e88b5d8 DW |
526 | } |
527 | } | |
528 | ||
b31c2bdc DW |
529 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, |
530 | mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp); | |
9c194644 DW |
531 | if (error) |
532 | return error; | |
722e2517 | 533 | rudp = xfs_trans_get_rud(tp, ruip); |
9c194644 DW |
534 | |
535 | for (i = 0; i < ruip->rui_format.rui_nextents; i++) { | |
e127fafd | 536 | rmap = &ruip->rui_format.rui_extents[i]; |
9c194644 DW |
537 | state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ? |
538 | XFS_EXT_UNWRITTEN : XFS_EXT_NORM; | |
539 | whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ? | |
540 | XFS_ATTR_FORK : XFS_DATA_FORK; | |
541 | switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) { | |
542 | case XFS_RMAP_EXTENT_MAP: | |
543 | type = XFS_RMAP_MAP; | |
544 | break; | |
ceeb9c83 DW |
545 | case XFS_RMAP_EXTENT_MAP_SHARED: |
546 | type = XFS_RMAP_MAP_SHARED; | |
547 | break; | |
9c194644 DW |
548 | case XFS_RMAP_EXTENT_UNMAP: |
549 | type = XFS_RMAP_UNMAP; | |
550 | break; | |
ceeb9c83 DW |
551 | case XFS_RMAP_EXTENT_UNMAP_SHARED: |
552 | type = XFS_RMAP_UNMAP_SHARED; | |
553 | break; | |
9c194644 DW |
554 | case XFS_RMAP_EXTENT_CONVERT: |
555 | type = XFS_RMAP_CONVERT; | |
556 | break; | |
3f165b33 DW |
557 | case XFS_RMAP_EXTENT_CONVERT_SHARED: |
558 | type = XFS_RMAP_CONVERT_SHARED; | |
559 | break; | |
9c194644 DW |
560 | case XFS_RMAP_EXTENT_ALLOC: |
561 | type = XFS_RMAP_ALLOC; | |
562 | break; | |
563 | case XFS_RMAP_EXTENT_FREE: | |
564 | type = XFS_RMAP_FREE; | |
565 | break; | |
566 | default: | |
a5155b87 | 567 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); |
9c194644 DW |
568 | error = -EFSCORRUPTED; |
569 | goto abort_error; | |
570 | } | |
571 | error = xfs_trans_log_finish_rmap_update(tp, rudp, type, | |
572 | rmap->me_owner, whichfork, | |
573 | rmap->me_startoff, rmap->me_startblock, | |
574 | rmap->me_len, state, &rcur); | |
575 | if (error) | |
576 | goto abort_error; | |
577 | ||
578 | } | |
579 | ||
580 | xfs_rmap_finish_one_cleanup(tp, rcur, error); | |
9e88b5d8 | 581 | set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags); |
9c194644 DW |
582 | error = xfs_trans_commit(tp); |
583 | return error; | |
584 | ||
585 | abort_error: | |
586 | xfs_rmap_finish_one_cleanup(tp, rcur, error); | |
587 | xfs_trans_cancel(tp); | |
9e88b5d8 DW |
588 | return error; |
589 | } | |
86ffa471 | 590 | |
07590a9d DW |
591 | /* |
592 | * This routine is called to create an in-core extent rmap update | |
593 | * item from the rui format structure which was logged on disk. | |
594 | * It allocates an in-core rui, copies the extents from the format | |
595 | * structure into it, and adds the rui to the AIL with the given | |
596 | * LSN. | |
597 | */ | |
598 | STATIC int | |
599 | xlog_recover_rui_commit_pass2( | |
600 | struct xlog *log, | |
601 | struct list_head *buffer_list, | |
602 | struct xlog_recover_item *item, | |
603 | xfs_lsn_t lsn) | |
604 | { | |
605 | int error; | |
606 | struct xfs_mount *mp = log->l_mp; | |
607 | struct xfs_rui_log_item *ruip; | |
608 | struct xfs_rui_log_format *rui_formatp; | |
609 | ||
610 | rui_formatp = item->ri_buf[0].i_addr; | |
611 | ||
612 | ruip = xfs_rui_init(mp, rui_formatp->rui_nextents); | |
613 | error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format); | |
614 | if (error) { | |
615 | xfs_rui_item_free(ruip); | |
616 | return error; | |
617 | } | |
618 | atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents); | |
619 | ||
620 | spin_lock(&log->l_ailp->ail_lock); | |
621 | /* | |
622 | * The RUI has two references. One for the RUD and one for RUI to ensure | |
623 | * it makes it into the AIL. Insert the RUI into the AIL directly and | |
624 | * drop the RUI reference. Note that xfs_trans_ail_update() drops the | |
625 | * AIL lock. | |
626 | */ | |
627 | xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn); | |
628 | xfs_rui_release(ruip); | |
629 | return 0; | |
630 | } | |
631 | ||
86ffa471 DW |
632 | const struct xlog_recover_item_ops xlog_rui_item_ops = { |
633 | .item_type = XFS_LI_RUI, | |
07590a9d | 634 | .commit_pass2 = xlog_recover_rui_commit_pass2, |
86ffa471 DW |
635 | }; |
636 | ||
07590a9d DW |
637 | /* |
638 | * This routine is called when an RUD format structure is found in a committed | |
639 | * transaction in the log. Its purpose is to cancel the corresponding RUI if it | |
640 | * was still in the log. To do this it searches the AIL for the RUI with an id | |
641 | * equal to that in the RUD format structure. If we find it we drop the RUD | |
642 | * reference, which removes the RUI from the AIL and frees it. | |
643 | */ | |
644 | STATIC int | |
645 | xlog_recover_rud_commit_pass2( | |
646 | struct xlog *log, | |
647 | struct list_head *buffer_list, | |
648 | struct xlog_recover_item *item, | |
649 | xfs_lsn_t lsn) | |
650 | { | |
651 | struct xfs_rud_log_format *rud_formatp; | |
652 | struct xfs_rui_log_item *ruip = NULL; | |
653 | struct xfs_log_item *lip; | |
654 | uint64_t rui_id; | |
655 | struct xfs_ail_cursor cur; | |
656 | struct xfs_ail *ailp = log->l_ailp; | |
657 | ||
658 | rud_formatp = item->ri_buf[0].i_addr; | |
659 | ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format)); | |
660 | rui_id = rud_formatp->rud_rui_id; | |
661 | ||
662 | /* | |
663 | * Search for the RUI with the id in the RUD format structure in the | |
664 | * AIL. | |
665 | */ | |
666 | spin_lock(&ailp->ail_lock); | |
667 | lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); | |
668 | while (lip != NULL) { | |
669 | if (lip->li_type == XFS_LI_RUI) { | |
670 | ruip = (struct xfs_rui_log_item *)lip; | |
671 | if (ruip->rui_format.rui_id == rui_id) { | |
672 | /* | |
673 | * Drop the RUD reference to the RUI. This | |
674 | * removes the RUI from the AIL and frees it. | |
675 | */ | |
676 | spin_unlock(&ailp->ail_lock); | |
677 | xfs_rui_release(ruip); | |
678 | spin_lock(&ailp->ail_lock); | |
679 | break; | |
680 | } | |
681 | } | |
682 | lip = xfs_trans_ail_cursor_next(ailp, &cur); | |
683 | } | |
684 | ||
685 | xfs_trans_ail_cursor_done(&cur); | |
686 | spin_unlock(&ailp->ail_lock); | |
687 | ||
688 | return 0; | |
689 | } | |
690 | ||
86ffa471 DW |
691 | const struct xlog_recover_item_ops xlog_rud_item_ops = { |
692 | .item_type = XFS_LI_RUD, | |
07590a9d | 693 | .commit_pass2 = xlog_recover_rud_commit_pass2, |
86ffa471 | 694 | }; |