2 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_trans.h"
29 #include "xfs_trans_priv.h"
30 #include "xfs_rmap_item.h"
31 #include "xfs_alloc.h"
35 * This routine is called to allocate an "rmap update intent"
36 * log item that will hold nextents worth of extents. The
37 * caller must use all nextents extents, because we are not
38 * flexible about this at all.
40 STATIC struct xfs_rui_log_item *
45 struct xfs_rui_log_item *ruip;
50 ruip = xfs_rui_init(tp->t_mountp, nextents);
54 * Get a log_item_desc to point at the new item.
56 xfs_trans_add_item(tp, &ruip->rui_item);
60 /* Set the map extent flags for this reverse mapping. */
62 xfs_trans_set_rmap_flags(
63 struct xfs_map_extent *rmap,
64 enum xfs_rmap_intent_type type,
69 if (state == XFS_EXT_UNWRITTEN)
70 rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
71 if (whichfork == XFS_ATTR_FORK)
72 rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
75 rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
78 rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
80 case XFS_RMAP_CONVERT:
81 rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
84 rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
87 rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
95 * This routine is called to indicate that the described reverse
96 * mapping is to be logged as needing to be updated. It should be
97 * called once for each mapping.
100 xfs_trans_log_start_rmap_update(
101 struct xfs_trans *tp,
102 struct xfs_rui_log_item *ruip,
103 enum xfs_rmap_intent_type type,
106 xfs_fileoff_t startoff,
107 xfs_fsblock_t startblock,
108 xfs_filblks_t blockcount,
112 struct xfs_map_extent *rmap;
114 tp->t_flags |= XFS_TRANS_DIRTY;
115 ruip->rui_item.li_desc->lid_flags |= XFS_LID_DIRTY;
118 * atomic_inc_return gives us the value after the increment;
119 * we want to use it as an array index so we need to subtract 1 from
122 next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
123 ASSERT(next_extent < ruip->rui_format.rui_nextents);
124 rmap = &(ruip->rui_format.rui_extents[next_extent]);
125 rmap->me_owner = owner;
126 rmap->me_startblock = startblock;
127 rmap->me_startoff = startoff;
128 rmap->me_len = blockcount;
129 xfs_trans_set_rmap_flags(rmap, type, whichfork, state);
133 * This routine is called to allocate an "rmap update done"
134 * log item that will hold nextents worth of extents. The
135 * caller must use all nextents extents, because we are not
136 * flexible about this at all.
138 struct xfs_rud_log_item *
140 struct xfs_trans *tp,
141 struct xfs_rui_log_item *ruip,
144 struct xfs_rud_log_item *rudp;
147 ASSERT(nextents > 0);
149 rudp = xfs_rud_init(tp->t_mountp, ruip, nextents);
150 ASSERT(rudp != NULL);
153 * Get a log_item_desc to point at the new item.
155 xfs_trans_add_item(tp, &rudp->rud_item);
160 * Finish an rmap update and log it to the RUD. Note that the transaction is
161 * marked dirty regardless of whether the rmap update succeeds or fails to
162 * support the RUI/RUD lifecycle rules.
165 xfs_trans_log_finish_rmap_update(
166 struct xfs_trans *tp,
167 struct xfs_rud_log_item *rudp,
168 enum xfs_rmap_intent_type type,
171 xfs_fileoff_t startoff,
172 xfs_fsblock_t startblock,
173 xfs_filblks_t blockcount,
177 struct xfs_map_extent *rmap;
180 /* XXX: actually finish the rmap update here */
181 error = -EFSCORRUPTED;
184 * Mark the transaction dirty, even on error. This ensures the
185 * transaction is aborted, which:
187 * 1.) releases the RUI and frees the RUD
188 * 2.) shuts down the filesystem
190 tp->t_flags |= XFS_TRANS_DIRTY;
191 rudp->rud_item.li_desc->lid_flags |= XFS_LID_DIRTY;
193 next_extent = rudp->rud_next_extent;
194 ASSERT(next_extent < rudp->rud_format.rud_nextents);
195 rmap = &(rudp->rud_format.rud_extents[next_extent]);
196 rmap->me_owner = owner;
197 rmap->me_startblock = startblock;
198 rmap->me_startoff = startoff;
199 rmap->me_len = blockcount;
200 xfs_trans_set_rmap_flags(rmap, type, whichfork, state);
201 rudp->rud_next_extent++;
206 /* Sort rmap intents by AG. */
208 xfs_rmap_update_diff_items(
213 struct xfs_mount *mp = priv;
214 struct xfs_rmap_intent *ra;
215 struct xfs_rmap_intent *rb;
217 ra = container_of(a, struct xfs_rmap_intent, ri_list);
218 rb = container_of(b, struct xfs_rmap_intent, ri_list);
219 return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
220 XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
225 xfs_rmap_update_create_intent(
226 struct xfs_trans *tp,
229 return xfs_trans_get_rui(tp, count);
232 /* Log rmap updates in the intent item. */
234 xfs_rmap_update_log_item(
235 struct xfs_trans *tp,
237 struct list_head *item)
239 struct xfs_rmap_intent *rmap;
241 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
242 xfs_trans_log_start_rmap_update(tp, intent, rmap->ri_type,
243 rmap->ri_owner, rmap->ri_whichfork,
244 rmap->ri_bmap.br_startoff,
245 rmap->ri_bmap.br_startblock,
246 rmap->ri_bmap.br_blockcount,
247 rmap->ri_bmap.br_state);
250 /* Get an RUD so we can process all the deferred rmap updates. */
252 xfs_rmap_update_create_done(
253 struct xfs_trans *tp,
257 return xfs_trans_get_rud(tp, intent, count);
260 /* Process a deferred rmap update. */
262 xfs_rmap_update_finish_item(
263 struct xfs_trans *tp,
264 struct xfs_defer_ops *dop,
265 struct list_head *item,
269 struct xfs_rmap_intent *rmap;
272 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
273 error = xfs_trans_log_finish_rmap_update(tp, done_item,
275 rmap->ri_owner, rmap->ri_whichfork,
276 rmap->ri_bmap.br_startoff,
277 rmap->ri_bmap.br_startblock,
278 rmap->ri_bmap.br_blockcount,
279 rmap->ri_bmap.br_state);
284 /* Clean up after processing deferred rmaps. */
286 xfs_rmap_update_finish_cleanup(
287 struct xfs_trans *tp,
293 /* Abort all pending RUIs. */
295 xfs_rmap_update_abort_intent(
298 xfs_rui_release(intent);
301 /* Cancel a deferred rmap update. */
303 xfs_rmap_update_cancel_item(
304 struct list_head *item)
306 struct xfs_rmap_intent *rmap;
308 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
312 static const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
313 .type = XFS_DEFER_OPS_TYPE_RMAP,
314 .max_items = XFS_RUI_MAX_FAST_EXTENTS,
315 .diff_items = xfs_rmap_update_diff_items,
316 .create_intent = xfs_rmap_update_create_intent,
317 .abort_intent = xfs_rmap_update_abort_intent,
318 .log_item = xfs_rmap_update_log_item,
319 .create_done = xfs_rmap_update_create_done,
320 .finish_item = xfs_rmap_update_finish_item,
321 .finish_cleanup = xfs_rmap_update_finish_cleanup,
322 .cancel_item = xfs_rmap_update_cancel_item,
325 /* Register the deferred op type. */
327 xfs_rmap_update_init_defer_op(void)
329 xfs_defer_init_op_type(&xfs_rmap_update_defer_type);