Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
71e330b5 DC |
2 | /* |
3 | * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. | |
71e330b5 DC |
4 | */ |
5 | ||
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
4fb6e8ad | 8 | #include "xfs_format.h" |
239880ef | 9 | #include "xfs_log_format.h" |
70a9883c | 10 | #include "xfs_shared.h" |
239880ef | 11 | #include "xfs_trans_resv.h" |
71e330b5 | 12 | #include "xfs_mount.h" |
efc27b52 | 13 | #include "xfs_extent_busy.h" |
239880ef DC |
14 | #include "xfs_trans.h" |
15 | #include "xfs_trans_priv.h" | |
16 | #include "xfs_log.h" | |
17 | #include "xfs_log_priv.h" | |
4560e78f CH |
18 | #include "xfs_trace.h" |
19 | ||
20 | struct workqueue_struct *xfs_discard_wq; | |
71e330b5 | 21 | |
71e330b5 DC |
22 | /* |
23 | * Allocate a new ticket. Failing to get a new ticket makes it really hard to | |
24 | * recover, so we don't allow failure here. Also, we allocate in a context that | |
25 | * we don't want to be issuing transactions from, so we need to tell the | |
26 | * allocation code this as well. | |
27 | * | |
28 | * We don't reserve any space for the ticket - we are going to steal whatever | |
29 | * space we require from transactions as they commit. To ensure we reserve all | |
30 | * the space required, we need to set the current reservation of the ticket to | |
31 | * zero so that we know to steal the initial transaction overhead from the | |
32 | * first transaction commit. | |
33 | */ | |
34 | static struct xlog_ticket * | |
35 | xlog_cil_ticket_alloc( | |
f7bdf03a | 36 | struct xlog *log) |
71e330b5 DC |
37 | { |
38 | struct xlog_ticket *tic; | |
39 | ||
ca4f2589 | 40 | tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0); |
71e330b5 DC |
41 | |
42 | /* | |
43 | * set the current reservation to zero so we know to steal the basic | |
44 | * transaction overhead reservation from the first transaction commit. | |
45 | */ | |
46 | tic->t_curr_res = 0; | |
47 | return tic; | |
48 | } | |
49 | ||
39823d0f DC |
50 | /* |
51 | * Unavoidable forward declaration - xlog_cil_push_work() calls | |
52 | * xlog_cil_ctx_alloc() itself. | |
53 | */ | |
54 | static void xlog_cil_push_work(struct work_struct *work); | |
55 | ||
56 | static struct xfs_cil_ctx * | |
57 | xlog_cil_ctx_alloc(void) | |
58 | { | |
59 | struct xfs_cil_ctx *ctx; | |
60 | ||
61 | ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS); | |
62 | INIT_LIST_HEAD(&ctx->committing); | |
63 | INIT_LIST_HEAD(&ctx->busy_extents); | |
64 | INIT_WORK(&ctx->push_work, xlog_cil_push_work); | |
65 | return ctx; | |
66 | } | |
67 | ||
68 | static void | |
69 | xlog_cil_ctx_switch( | |
70 | struct xfs_cil *cil, | |
71 | struct xfs_cil_ctx *ctx) | |
72 | { | |
73 | ctx->sequence = ++cil->xc_current_sequence; | |
74 | ctx->cil = cil; | |
75 | cil->xc_ctx = ctx; | |
76 | } | |
77 | ||
71e330b5 DC |
78 | /* |
79 | * After the first stage of log recovery is done, we know where the head and | |
80 | * tail of the log are. We need this log initialisation done before we can | |
81 | * initialise the first CIL checkpoint context. | |
82 | * | |
83 | * Here we allocate a log ticket to track space usage during a CIL push. This | |
84 | * ticket is passed to xlog_write() directly so that we don't slowly leak log | |
85 | * space by failing to account for space used by log headers and additional | |
86 | * region headers for split regions. | |
87 | */ | |
88 | void | |
89 | xlog_cil_init_post_recovery( | |
f7bdf03a | 90 | struct xlog *log) |
71e330b5 | 91 | { |
71e330b5 DC |
92 | log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); |
93 | log->l_cilp->xc_ctx->sequence = 1; | |
71e330b5 DC |
94 | } |
95 | ||
b1c5ebb2 DC |
96 | static inline int |
97 | xlog_cil_iovec_space( | |
98 | uint niovecs) | |
99 | { | |
100 | return round_up((sizeof(struct xfs_log_vec) + | |
101 | niovecs * sizeof(struct xfs_log_iovec)), | |
102 | sizeof(uint64_t)); | |
103 | } | |
104 | ||
105 | /* | |
106 | * Allocate or pin log vector buffers for CIL insertion. | |
107 | * | |
108 | * The CIL currently uses disposable buffers for copying a snapshot of the | |
109 | * modified items into the log during a push. The biggest problem with this is | |
110 | * the requirement to allocate the disposable buffer during the commit if: | |
111 | * a) does not exist; or | |
112 | * b) it is too small | |
113 | * | |
114 | * If we do this allocation within xlog_cil_insert_format_items(), it is done | |
115 | * under the xc_ctx_lock, which means that a CIL push cannot occur during | |
116 | * the memory allocation. This means that we have a potential deadlock situation | |
117 | * under low memory conditions when we have lots of dirty metadata pinned in | |
118 | * the CIL and we need a CIL commit to occur to free memory. | |
119 | * | |
120 | * To avoid this, we need to move the memory allocation outside the | |
121 | * xc_ctx_lock, but because the log vector buffers are disposable, that opens | |
122 | * up a TOCTOU race condition w.r.t. the CIL committing and removing the log | |
123 | * vector buffers between the check and the formatting of the item into the | |
124 | * log vector buffer within the xc_ctx_lock. | |
125 | * | |
126 | * Because the log vector buffer needs to be unchanged during the CIL push | |
127 | * process, we cannot share the buffer between the transaction commit (which | |
128 | * modifies the buffer) and the CIL push context that is writing the changes | |
129 | * into the log. This means skipping preallocation of buffer space is | |
130 | * unreliable, but we most definitely do not want to be allocating and freeing | |
131 | * buffers unnecessarily during commits when overwrites can be done safely. | |
132 | * | |
133 | * The simplest solution to this problem is to allocate a shadow buffer when a | |
134 | * log item is committed for the second time, and then to only use this buffer | |
135 | * if necessary. The buffer can remain attached to the log item until such time | |
136 | * it is needed, and this is the buffer that is reallocated to match the size of | |
137 | * the incoming modification. Then during the formatting of the item we can swap | |
138 | * the active buffer with the new one if we can't reuse the existing buffer. We | |
139 | * don't free the old buffer as it may be reused on the next modification if | |
140 | * it's size is right, otherwise we'll free and reallocate it at that point. | |
141 | * | |
142 | * This function builds a vector for the changes in each log item in the | |
143 | * transaction. It then works out the length of the buffer needed for each log | |
144 | * item, allocates them and attaches the vector to the log item in preparation | |
145 | * for the formatting step which occurs under the xc_ctx_lock. | |
146 | * | |
147 | * While this means the memory footprint goes up, it avoids the repeated | |
148 | * alloc/free pattern that repeated modifications of an item would otherwise | |
149 | * cause, and hence minimises the CPU overhead of such behaviour. | |
150 | */ | |
151 | static void | |
152 | xlog_cil_alloc_shadow_bufs( | |
153 | struct xlog *log, | |
154 | struct xfs_trans *tp) | |
155 | { | |
e6631f85 | 156 | struct xfs_log_item *lip; |
b1c5ebb2 | 157 | |
e6631f85 | 158 | list_for_each_entry(lip, &tp->t_items, li_trans) { |
b1c5ebb2 DC |
159 | struct xfs_log_vec *lv; |
160 | int niovecs = 0; | |
161 | int nbytes = 0; | |
162 | int buf_size; | |
163 | bool ordered = false; | |
164 | ||
165 | /* Skip items which aren't dirty in this transaction. */ | |
e6631f85 | 166 | if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) |
b1c5ebb2 DC |
167 | continue; |
168 | ||
169 | /* get number of vecs and size of data to be stored */ | |
170 | lip->li_ops->iop_size(lip, &niovecs, &nbytes); | |
171 | ||
172 | /* | |
173 | * Ordered items need to be tracked but we do not wish to write | |
174 | * them. We need a logvec to track the object, but we do not | |
175 | * need an iovec or buffer to be allocated for copying data. | |
176 | */ | |
177 | if (niovecs == XFS_LOG_VEC_ORDERED) { | |
178 | ordered = true; | |
179 | niovecs = 0; | |
180 | nbytes = 0; | |
181 | } | |
182 | ||
183 | /* | |
184 | * We 64-bit align the length of each iovec so that the start | |
185 | * of the next one is naturally aligned. We'll need to | |
186 | * account for that slack space here. Then round nbytes up | |
187 | * to 64-bit alignment so that the initial buffer alignment is | |
188 | * easy to calculate and verify. | |
189 | */ | |
190 | nbytes += niovecs * sizeof(uint64_t); | |
191 | nbytes = round_up(nbytes, sizeof(uint64_t)); | |
192 | ||
193 | /* | |
194 | * The data buffer needs to start 64-bit aligned, so round up | |
195 | * that space to ensure we can align it appropriately and not | |
196 | * overrun the buffer. | |
197 | */ | |
198 | buf_size = nbytes + xlog_cil_iovec_space(niovecs); | |
199 | ||
200 | /* | |
201 | * if we have no shadow buffer, or it is too small, we need to | |
202 | * reallocate it. | |
203 | */ | |
204 | if (!lip->li_lv_shadow || | |
205 | buf_size > lip->li_lv_shadow->lv_size) { | |
206 | ||
207 | /* | |
208 | * We free and allocate here as a realloc would copy | |
cf085a1b | 209 | * unnecessary data. We don't use kmem_zalloc() for the |
b1c5ebb2 DC |
210 | * same reason - we don't need to zero the data area in |
211 | * the buffer, only the log vector header and the iovec | |
212 | * storage. | |
213 | */ | |
214 | kmem_free(lip->li_lv_shadow); | |
215 | ||
d634525d DC |
216 | /* |
217 | * We are in transaction context, which means this | |
218 | * allocation will pick up GFP_NOFS from the | |
219 | * memalloc_nofs_save/restore context the transaction | |
220 | * holds. This means we can use GFP_KERNEL here so the | |
221 | * generic kvmalloc() code will run vmalloc on | |
222 | * contiguous page allocation failure as we require. | |
223 | */ | |
224 | lv = kvmalloc(buf_size, GFP_KERNEL); | |
b1c5ebb2 DC |
225 | memset(lv, 0, xlog_cil_iovec_space(niovecs)); |
226 | ||
227 | lv->lv_item = lip; | |
228 | lv->lv_size = buf_size; | |
229 | if (ordered) | |
230 | lv->lv_buf_len = XFS_LOG_VEC_ORDERED; | |
231 | else | |
232 | lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; | |
233 | lip->li_lv_shadow = lv; | |
234 | } else { | |
235 | /* same or smaller, optimise common overwrite case */ | |
236 | lv = lip->li_lv_shadow; | |
237 | if (ordered) | |
238 | lv->lv_buf_len = XFS_LOG_VEC_ORDERED; | |
239 | else | |
240 | lv->lv_buf_len = 0; | |
241 | lv->lv_bytes = 0; | |
242 | lv->lv_next = NULL; | |
243 | } | |
244 | ||
245 | /* Ensure the lv is set up according to ->iop_size */ | |
246 | lv->lv_niovecs = niovecs; | |
247 | ||
248 | /* The allocated data region lies beyond the iovec region */ | |
249 | lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); | |
250 | } | |
251 | ||
252 | } | |
253 | ||
991aaf65 DC |
254 | /* |
255 | * Prepare the log item for insertion into the CIL. Calculate the difference in | |
256 | * log space and vectors it will consume, and if it is a new item pin it as | |
257 | * well. | |
258 | */ | |
259 | STATIC void | |
260 | xfs_cil_prepare_item( | |
261 | struct xlog *log, | |
262 | struct xfs_log_vec *lv, | |
263 | struct xfs_log_vec *old_lv, | |
264 | int *diff_len, | |
265 | int *diff_iovecs) | |
266 | { | |
267 | /* Account for the new LV being passed in */ | |
268 | if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { | |
110dc24a | 269 | *diff_len += lv->lv_bytes; |
991aaf65 DC |
270 | *diff_iovecs += lv->lv_niovecs; |
271 | } | |
272 | ||
273 | /* | |
274 | * If there is no old LV, this is the first time we've seen the item in | |
275 | * this CIL context and so we need to pin it. If we are replacing the | |
b1c5ebb2 DC |
276 | * old_lv, then remove the space it accounts for and make it the shadow |
277 | * buffer for later freeing. In both cases we are now switching to the | |
b63da6c8 | 278 | * shadow buffer, so update the pointer to it appropriately. |
991aaf65 | 279 | */ |
b1c5ebb2 | 280 | if (!old_lv) { |
e8b78db7 CH |
281 | if (lv->lv_item->li_ops->iop_pin) |
282 | lv->lv_item->li_ops->iop_pin(lv->lv_item); | |
b1c5ebb2 DC |
283 | lv->lv_item->li_lv_shadow = NULL; |
284 | } else if (old_lv != lv) { | |
991aaf65 DC |
285 | ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); |
286 | ||
110dc24a | 287 | *diff_len -= old_lv->lv_bytes; |
991aaf65 | 288 | *diff_iovecs -= old_lv->lv_niovecs; |
b1c5ebb2 | 289 | lv->lv_item->li_lv_shadow = old_lv; |
991aaf65 DC |
290 | } |
291 | ||
292 | /* attach new log vector to log item */ | |
293 | lv->lv_item->li_lv = lv; | |
294 | ||
295 | /* | |
296 | * If this is the first time the item is being committed to the | |
297 | * CIL, store the sequence number on the log item so we can | |
298 | * tell in future commits whether this is the first checkpoint | |
299 | * the item is being committed into. | |
300 | */ | |
301 | if (!lv->lv_item->li_seq) | |
302 | lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; | |
303 | } | |
304 | ||
71e330b5 DC |
305 | /* |
306 | * Format log item into a flat buffers | |
307 | * | |
308 | * For delayed logging, we need to hold a formatted buffer containing all the | |
309 | * changes on the log item. This enables us to relog the item in memory and | |
310 | * write it out asynchronously without needing to relock the object that was | |
311 | * modified at the time it gets written into the iclog. | |
312 | * | |
b1c5ebb2 DC |
313 | * This function takes the prepared log vectors attached to each log item, and |
314 | * formats the changes into the log vector buffer. The buffer it uses is | |
315 | * dependent on the current state of the vector in the CIL - the shadow lv is | |
316 | * guaranteed to be large enough for the current modification, but we will only | |
317 | * use that if we can't reuse the existing lv. If we can't reuse the existing | |
318 | * lv, then simple swap it out for the shadow lv. We don't free it - that is | |
319 | * done lazily either by th enext modification or the freeing of the log item. | |
71e330b5 DC |
320 | * |
321 | * We don't set up region headers during this process; we simply copy the | |
322 | * regions into the flat buffer. We can do this because we still have to do a | |
323 | * formatting step to write the regions into the iclog buffer. Writing the | |
324 | * ophdrs during the iclog write means that we can support splitting large | |
325 | * regions across iclog boundares without needing a change in the format of the | |
326 | * item/region encapsulation. | |
327 | * | |
328 | * Hence what we need to do now is change the rewrite the vector array to point | |
329 | * to the copied region inside the buffer we just allocated. This allows us to | |
330 | * format the regions into the iclog as though they are being formatted | |
331 | * directly out of the objects themselves. | |
332 | */ | |
991aaf65 DC |
333 | static void |
334 | xlog_cil_insert_format_items( | |
335 | struct xlog *log, | |
336 | struct xfs_trans *tp, | |
337 | int *diff_len, | |
338 | int *diff_iovecs) | |
71e330b5 | 339 | { |
e6631f85 | 340 | struct xfs_log_item *lip; |
71e330b5 | 341 | |
0244b960 CH |
342 | |
343 | /* Bail out if we didn't find a log item. */ | |
344 | if (list_empty(&tp->t_items)) { | |
345 | ASSERT(0); | |
991aaf65 | 346 | return; |
0244b960 CH |
347 | } |
348 | ||
e6631f85 | 349 | list_for_each_entry(lip, &tp->t_items, li_trans) { |
7492c5b4 | 350 | struct xfs_log_vec *lv; |
b1c5ebb2 DC |
351 | struct xfs_log_vec *old_lv = NULL; |
352 | struct xfs_log_vec *shadow; | |
fd63875c | 353 | bool ordered = false; |
71e330b5 | 354 | |
0244b960 | 355 | /* Skip items which aren't dirty in this transaction. */ |
e6631f85 | 356 | if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) |
0244b960 CH |
357 | continue; |
358 | ||
fd63875c | 359 | /* |
b1c5ebb2 DC |
360 | * The formatting size information is already attached to |
361 | * the shadow lv on the log item. | |
fd63875c | 362 | */ |
b1c5ebb2 DC |
363 | shadow = lip->li_lv_shadow; |
364 | if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) | |
fd63875c | 365 | ordered = true; |
fd63875c | 366 | |
b1c5ebb2 DC |
367 | /* Skip items that do not have any vectors for writing */ |
368 | if (!shadow->lv_niovecs && !ordered) | |
369 | continue; | |
0244b960 | 370 | |
f5baac35 | 371 | /* compare to existing item size */ |
b1c5ebb2 DC |
372 | old_lv = lip->li_lv; |
373 | if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { | |
f5baac35 DC |
374 | /* same or smaller, optimise common overwrite case */ |
375 | lv = lip->li_lv; | |
376 | lv->lv_next = NULL; | |
377 | ||
378 | if (ordered) | |
379 | goto insert; | |
380 | ||
991aaf65 DC |
381 | /* |
382 | * set the item up as though it is a new insertion so | |
383 | * that the space reservation accounting is correct. | |
384 | */ | |
385 | *diff_iovecs -= lv->lv_niovecs; | |
110dc24a | 386 | *diff_len -= lv->lv_bytes; |
b1c5ebb2 DC |
387 | |
388 | /* Ensure the lv is set up according to ->iop_size */ | |
389 | lv->lv_niovecs = shadow->lv_niovecs; | |
390 | ||
391 | /* reset the lv buffer information for new formatting */ | |
392 | lv->lv_buf_len = 0; | |
393 | lv->lv_bytes = 0; | |
394 | lv->lv_buf = (char *)lv + | |
395 | xlog_cil_iovec_space(lv->lv_niovecs); | |
9597df6b | 396 | } else { |
b1c5ebb2 DC |
397 | /* switch to shadow buffer! */ |
398 | lv = shadow; | |
9597df6b | 399 | lv->lv_item = lip; |
9597df6b CH |
400 | if (ordered) { |
401 | /* track as an ordered logvec */ | |
402 | ASSERT(lip->li_lv == NULL); | |
9597df6b CH |
403 | goto insert; |
404 | } | |
f5baac35 DC |
405 | } |
406 | ||
3895e51f | 407 | ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); |
bde7cff6 | 408 | lip->li_ops->iop_format(lip, lv); |
7492c5b4 | 409 | insert: |
991aaf65 | 410 | xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); |
3b93c7aa | 411 | } |
d1583a38 DC |
412 | } |
413 | ||
414 | /* | |
415 | * Insert the log items into the CIL and calculate the difference in space | |
416 | * consumed by the item. Add the space to the checkpoint ticket and calculate | |
417 | * if the change requires additional log metadata. If it does, take that space | |
42b2aa86 | 418 | * as well. Remove the amount of space we added to the checkpoint ticket from |
d1583a38 DC |
419 | * the current transaction ticket so that the accounting works out correctly. |
420 | */ | |
3b93c7aa DC |
421 | static void |
422 | xlog_cil_insert_items( | |
f7bdf03a | 423 | struct xlog *log, |
991aaf65 | 424 | struct xfs_trans *tp) |
3b93c7aa | 425 | { |
d1583a38 DC |
426 | struct xfs_cil *cil = log->l_cilp; |
427 | struct xfs_cil_ctx *ctx = cil->xc_ctx; | |
e6631f85 | 428 | struct xfs_log_item *lip; |
d1583a38 DC |
429 | int len = 0; |
430 | int diff_iovecs = 0; | |
431 | int iclog_space; | |
e2f23426 | 432 | int iovhdr_res = 0, split_res = 0, ctx_res = 0; |
3b93c7aa | 433 | |
991aaf65 | 434 | ASSERT(tp); |
d1583a38 DC |
435 | |
436 | /* | |
d1583a38 DC |
437 | * We can do this safely because the context can't checkpoint until we |
438 | * are done so it doesn't matter exactly how we update the CIL. | |
439 | */ | |
991aaf65 DC |
440 | xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs); |
441 | ||
d1583a38 | 442 | spin_lock(&cil->xc_cil_lock); |
d1583a38 | 443 | |
fd63875c | 444 | /* account for space used by new iovec headers */ |
e2f23426 BF |
445 | iovhdr_res = diff_iovecs * sizeof(xlog_op_header_t); |
446 | len += iovhdr_res; | |
d1583a38 DC |
447 | ctx->nvecs += diff_iovecs; |
448 | ||
991aaf65 DC |
449 | /* attach the transaction to the CIL if it has any busy extents */ |
450 | if (!list_empty(&tp->t_busy)) | |
451 | list_splice_init(&tp->t_busy, &ctx->busy_extents); | |
452 | ||
d1583a38 DC |
453 | /* |
454 | * Now transfer enough transaction reservation to the context ticket | |
455 | * for the checkpoint. The context ticket is special - the unit | |
456 | * reservation has to grow as well as the current reservation as we | |
457 | * steal from tickets so we can correctly determine the space used | |
458 | * during the transaction commit. | |
459 | */ | |
460 | if (ctx->ticket->t_curr_res == 0) { | |
e2f23426 BF |
461 | ctx_res = ctx->ticket->t_unit_res; |
462 | ctx->ticket->t_curr_res = ctx_res; | |
463 | tp->t_ticket->t_curr_res -= ctx_res; | |
d1583a38 DC |
464 | } |
465 | ||
466 | /* do we need space for more log record headers? */ | |
467 | iclog_space = log->l_iclog_size - log->l_iclog_hsize; | |
468 | if (len > 0 && (ctx->space_used / iclog_space != | |
469 | (ctx->space_used + len) / iclog_space)) { | |
e2f23426 | 470 | split_res = (len + iclog_space - 1) / iclog_space; |
d1583a38 | 471 | /* need to take into account split region headers, too */ |
e2f23426 BF |
472 | split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header); |
473 | ctx->ticket->t_unit_res += split_res; | |
474 | ctx->ticket->t_curr_res += split_res; | |
475 | tp->t_ticket->t_curr_res -= split_res; | |
991aaf65 | 476 | ASSERT(tp->t_ticket->t_curr_res >= len); |
d1583a38 | 477 | } |
991aaf65 | 478 | tp->t_ticket->t_curr_res -= len; |
d1583a38 DC |
479 | ctx->space_used += len; |
480 | ||
d4ca1d55 BF |
481 | /* |
482 | * If we've overrun the reservation, dump the tx details before we move | |
483 | * the log items. Shutdown is imminent... | |
484 | */ | |
485 | if (WARN_ON(tp->t_ticket->t_curr_res < 0)) { | |
486 | xfs_warn(log->l_mp, "Transaction log reservation overrun:"); | |
487 | xfs_warn(log->l_mp, | |
488 | " log items: %d bytes (iov hdrs: %d bytes)", | |
489 | len, iovhdr_res); | |
490 | xfs_warn(log->l_mp, " split region headers: %d bytes", | |
491 | split_res); | |
492 | xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res); | |
493 | xlog_print_trans(tp); | |
494 | } | |
495 | ||
e2f23426 BF |
496 | /* |
497 | * Now (re-)position everything modified at the tail of the CIL. | |
498 | * We do this here so we only need to take the CIL lock once during | |
499 | * the transaction commit. | |
500 | */ | |
e6631f85 | 501 | list_for_each_entry(lip, &tp->t_items, li_trans) { |
e2f23426 BF |
502 | |
503 | /* Skip items which aren't dirty in this transaction. */ | |
e6631f85 | 504 | if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) |
e2f23426 BF |
505 | continue; |
506 | ||
507 | /* | |
508 | * Only move the item if it isn't already at the tail. This is | |
509 | * to prevent a transient list_empty() state when reinserting | |
510 | * an item that is already the only item in the CIL. | |
511 | */ | |
512 | if (!list_is_last(&lip->li_cil, &cil->xc_cil)) | |
513 | list_move_tail(&lip->li_cil, &cil->xc_cil); | |
514 | } | |
515 | ||
d1583a38 | 516 | spin_unlock(&cil->xc_cil_lock); |
d4ca1d55 BF |
517 | |
518 | if (tp->t_ticket->t_curr_res < 0) | |
519 | xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); | |
71e330b5 DC |
520 | } |
521 | ||
522 | static void | |
523 | xlog_cil_free_logvec( | |
524 | struct xfs_log_vec *log_vector) | |
525 | { | |
526 | struct xfs_log_vec *lv; | |
527 | ||
528 | for (lv = log_vector; lv; ) { | |
529 | struct xfs_log_vec *next = lv->lv_next; | |
71e330b5 DC |
530 | kmem_free(lv); |
531 | lv = next; | |
532 | } | |
533 | } | |
534 | ||
4560e78f CH |
535 | static void |
536 | xlog_discard_endio_work( | |
537 | struct work_struct *work) | |
538 | { | |
539 | struct xfs_cil_ctx *ctx = | |
540 | container_of(work, struct xfs_cil_ctx, discard_endio_work); | |
541 | struct xfs_mount *mp = ctx->cil->xc_log->l_mp; | |
542 | ||
543 | xfs_extent_busy_clear(mp, &ctx->busy_extents, false); | |
544 | kmem_free(ctx); | |
545 | } | |
546 | ||
547 | /* | |
548 | * Queue up the actual completion to a thread to avoid IRQ-safe locking for | |
549 | * pagb_lock. Note that we need a unbounded workqueue, otherwise we might | |
550 | * get the execution delayed up to 30 seconds for weird reasons. | |
551 | */ | |
552 | static void | |
553 | xlog_discard_endio( | |
554 | struct bio *bio) | |
555 | { | |
556 | struct xfs_cil_ctx *ctx = bio->bi_private; | |
557 | ||
558 | INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); | |
559 | queue_work(xfs_discard_wq, &ctx->discard_endio_work); | |
ea7bd56f | 560 | bio_put(bio); |
4560e78f CH |
561 | } |
562 | ||
563 | static void | |
564 | xlog_discard_busy_extents( | |
565 | struct xfs_mount *mp, | |
566 | struct xfs_cil_ctx *ctx) | |
567 | { | |
568 | struct list_head *list = &ctx->busy_extents; | |
569 | struct xfs_extent_busy *busyp; | |
570 | struct bio *bio = NULL; | |
571 | struct blk_plug plug; | |
572 | int error = 0; | |
573 | ||
0560f31a | 574 | ASSERT(xfs_has_discard(mp)); |
4560e78f CH |
575 | |
576 | blk_start_plug(&plug); | |
577 | list_for_each_entry(busyp, list, list) { | |
578 | trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, | |
579 | busyp->length); | |
580 | ||
581 | error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, | |
582 | XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), | |
583 | XFS_FSB_TO_BB(mp, busyp->length), | |
584 | GFP_NOFS, 0, &bio); | |
585 | if (error && error != -EOPNOTSUPP) { | |
586 | xfs_info(mp, | |
587 | "discard failed for extent [0x%llx,%u], error %d", | |
588 | (unsigned long long)busyp->bno, | |
589 | busyp->length, | |
590 | error); | |
591 | break; | |
592 | } | |
593 | } | |
594 | ||
595 | if (bio) { | |
596 | bio->bi_private = ctx; | |
597 | bio->bi_end_io = xlog_discard_endio; | |
598 | submit_bio(bio); | |
599 | } else { | |
600 | xlog_discard_endio_work(&ctx->discard_endio_work); | |
601 | } | |
602 | blk_finish_plug(&plug); | |
603 | } | |
604 | ||
71e330b5 DC |
605 | /* |
606 | * Mark all items committed and clear busy extents. We free the log vector | |
607 | * chains in a separate pass so that we unpin the log items as quickly as | |
608 | * possible. | |
609 | */ | |
610 | static void | |
611 | xlog_cil_committed( | |
12e6a0f4 | 612 | struct xfs_cil_ctx *ctx) |
71e330b5 | 613 | { |
e84661aa | 614 | struct xfs_mount *mp = ctx->cil->xc_log->l_mp; |
2039a272 | 615 | bool abort = xlog_is_shutdown(ctx->cil->xc_log); |
71e330b5 | 616 | |
545aa41f BF |
617 | /* |
618 | * If the I/O failed, we're aborting the commit and already shutdown. | |
619 | * Wake any commit waiters before aborting the log items so we don't | |
620 | * block async log pushers on callbacks. Async log pushers explicitly do | |
621 | * not wait on log force completion because they may be holding locks | |
622 | * required to unpin items. | |
623 | */ | |
624 | if (abort) { | |
625 | spin_lock(&ctx->cil->xc_push_lock); | |
68a74dca | 626 | wake_up_all(&ctx->cil->xc_start_wait); |
545aa41f BF |
627 | wake_up_all(&ctx->cil->xc_commit_wait); |
628 | spin_unlock(&ctx->cil->xc_push_lock); | |
629 | } | |
630 | ||
0e57f6a3 DC |
631 | xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, |
632 | ctx->start_lsn, abort); | |
71e330b5 | 633 | |
4ecbfe63 DC |
634 | xfs_extent_busy_sort(&ctx->busy_extents); |
635 | xfs_extent_busy_clear(mp, &ctx->busy_extents, | |
0560f31a | 636 | xfs_has_discard(mp) && !abort); |
71e330b5 | 637 | |
4bb928cd | 638 | spin_lock(&ctx->cil->xc_push_lock); |
71e330b5 | 639 | list_del(&ctx->committing); |
4bb928cd | 640 | spin_unlock(&ctx->cil->xc_push_lock); |
71e330b5 DC |
641 | |
642 | xlog_cil_free_logvec(ctx->lv_chain); | |
e84661aa | 643 | |
4560e78f CH |
644 | if (!list_empty(&ctx->busy_extents)) |
645 | xlog_discard_busy_extents(mp, ctx); | |
646 | else | |
647 | kmem_free(ctx); | |
71e330b5 DC |
648 | } |
649 | ||
89ae379d CH |
650 | void |
651 | xlog_cil_process_committed( | |
12e6a0f4 | 652 | struct list_head *list) |
89ae379d CH |
653 | { |
654 | struct xfs_cil_ctx *ctx; | |
655 | ||
656 | while ((ctx = list_first_entry_or_null(list, | |
657 | struct xfs_cil_ctx, iclog_entry))) { | |
658 | list_del(&ctx->iclog_entry); | |
12e6a0f4 | 659 | xlog_cil_committed(ctx); |
89ae379d CH |
660 | } |
661 | } | |
662 | ||
c45aba40 DC |
663 | /* |
664 | * Record the LSN of the iclog we were just granted space to start writing into. | |
665 | * If the context doesn't have a start_lsn recorded, then this iclog will | |
666 | * contain the start record for the checkpoint. Otherwise this write contains | |
667 | * the commit record for the checkpoint. | |
668 | */ | |
669 | void | |
670 | xlog_cil_set_ctx_write_state( | |
671 | struct xfs_cil_ctx *ctx, | |
672 | struct xlog_in_core *iclog) | |
673 | { | |
674 | struct xfs_cil *cil = ctx->cil; | |
675 | xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); | |
676 | ||
677 | ASSERT(!ctx->commit_lsn); | |
caa80090 DC |
678 | if (!ctx->start_lsn) { |
679 | spin_lock(&cil->xc_push_lock); | |
68a74dca DC |
680 | /* |
681 | * The LSN we need to pass to the log items on transaction | |
682 | * commit is the LSN reported by the first log vector write, not | |
683 | * the commit lsn. If we use the commit record lsn then we can | |
684 | * move the tail beyond the grant write head. | |
685 | */ | |
c45aba40 | 686 | ctx->start_lsn = lsn; |
68a74dca | 687 | wake_up_all(&cil->xc_start_wait); |
caa80090 DC |
688 | spin_unlock(&cil->xc_push_lock); |
689 | return; | |
690 | } | |
691 | ||
692 | /* | |
693 | * Take a reference to the iclog for the context so that we still hold | |
694 | * it when xlog_write is done and has released it. This means the | |
695 | * context controls when the iclog is released for IO. | |
696 | */ | |
697 | atomic_inc(&iclog->ic_refcnt); | |
698 | ||
699 | /* | |
700 | * xlog_state_get_iclog_space() guarantees there is enough space in the | |
701 | * iclog for an entire commit record, so we can attach the context | |
702 | * callbacks now. This needs to be done before we make the commit_lsn | |
703 | * visible to waiters so that checkpoints with commit records in the | |
704 | * same iclog order their IO completion callbacks in the same order that | |
705 | * the commit records appear in the iclog. | |
706 | */ | |
707 | spin_lock(&cil->xc_log->l_icloglock); | |
708 | list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks); | |
709 | spin_unlock(&cil->xc_log->l_icloglock); | |
710 | ||
711 | /* | |
712 | * Now we can record the commit LSN and wake anyone waiting for this | |
713 | * sequence to have the ordered commit record assigned to a physical | |
714 | * location in the log. | |
715 | */ | |
716 | spin_lock(&cil->xc_push_lock); | |
717 | ctx->commit_iclog = iclog; | |
718 | ctx->commit_lsn = lsn; | |
719 | wake_up_all(&cil->xc_commit_wait); | |
c45aba40 DC |
720 | spin_unlock(&cil->xc_push_lock); |
721 | } | |
722 | ||
723 | ||
2ce82b72 | 724 | /* |
bf034bc8 DC |
725 | * Ensure that the order of log writes follows checkpoint sequence order. This |
726 | * relies on the context LSN being zero until the log write has guaranteed the | |
727 | * LSN that the log write will start at via xlog_state_get_iclog_space(). | |
728 | */ | |
68a74dca DC |
729 | enum _record_type { |
730 | _START_RECORD, | |
731 | _COMMIT_RECORD, | |
732 | }; | |
733 | ||
bf034bc8 DC |
734 | static int |
735 | xlog_cil_order_write( | |
736 | struct xfs_cil *cil, | |
68a74dca DC |
737 | xfs_csn_t sequence, |
738 | enum _record_type record) | |
bf034bc8 DC |
739 | { |
740 | struct xfs_cil_ctx *ctx; | |
741 | ||
742 | restart: | |
743 | spin_lock(&cil->xc_push_lock); | |
744 | list_for_each_entry(ctx, &cil->xc_committing, committing) { | |
745 | /* | |
746 | * Avoid getting stuck in this loop because we were woken by the | |
747 | * shutdown, but then went back to sleep once already in the | |
748 | * shutdown state. | |
749 | */ | |
750 | if (xlog_is_shutdown(cil->xc_log)) { | |
751 | spin_unlock(&cil->xc_push_lock); | |
752 | return -EIO; | |
753 | } | |
754 | ||
755 | /* | |
756 | * Higher sequences will wait for this one so skip them. | |
757 | * Don't wait for our own sequence, either. | |
758 | */ | |
759 | if (ctx->sequence >= sequence) | |
760 | continue; | |
68a74dca DC |
761 | |
762 | /* Wait until the LSN for the record has been recorded. */ | |
763 | switch (record) { | |
764 | case _START_RECORD: | |
765 | if (!ctx->start_lsn) { | |
766 | xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock); | |
767 | goto restart; | |
768 | } | |
769 | break; | |
770 | case _COMMIT_RECORD: | |
771 | if (!ctx->commit_lsn) { | |
772 | xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); | |
773 | goto restart; | |
774 | } | |
775 | break; | |
bf034bc8 DC |
776 | } |
777 | } | |
778 | spin_unlock(&cil->xc_push_lock); | |
779 | return 0; | |
780 | } | |
781 | ||
68a74dca DC |
782 | /* |
783 | * Write out the log vector change now attached to the CIL context. This will | |
784 | * write a start record that needs to be strictly ordered in ascending CIL | |
785 | * sequence order so that log recovery will always use in-order start LSNs when | |
786 | * replaying checkpoints. | |
787 | */ | |
788 | static int | |
789 | xlog_cil_write_chain( | |
790 | struct xfs_cil_ctx *ctx, | |
791 | struct xfs_log_vec *chain) | |
792 | { | |
793 | struct xlog *log = ctx->cil->xc_log; | |
794 | int error; | |
795 | ||
796 | error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD); | |
797 | if (error) | |
798 | return error; | |
799 | return xlog_write(log, ctx, chain, ctx->ticket, XLOG_START_TRANS); | |
800 | } | |
801 | ||
bf034bc8 DC |
802 | /* |
803 | * Write out the commit record of a checkpoint transaction to close off a | |
804 | * running log write. These commit records are strictly ordered in ascending CIL | |
805 | * sequence order so that log recovery will always replay the checkpoints in the | |
806 | * correct order. | |
2ce82b72 DC |
807 | */ |
808 | static int | |
809 | xlog_cil_write_commit_record( | |
caa80090 | 810 | struct xfs_cil_ctx *ctx) |
2ce82b72 | 811 | { |
c45aba40 DC |
812 | struct xlog *log = ctx->cil->xc_log; |
813 | struct xfs_log_iovec reg = { | |
2ce82b72 DC |
814 | .i_addr = NULL, |
815 | .i_len = 0, | |
816 | .i_type = XLOG_REG_TYPE_COMMIT, | |
817 | }; | |
c45aba40 | 818 | struct xfs_log_vec vec = { |
2ce82b72 DC |
819 | .lv_niovecs = 1, |
820 | .lv_iovecp = ®, | |
821 | }; | |
c45aba40 | 822 | int error; |
2ce82b72 DC |
823 | |
824 | if (xlog_is_shutdown(log)) | |
825 | return -EIO; | |
826 | ||
68a74dca DC |
827 | error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD); |
828 | if (error) | |
829 | return error; | |
830 | ||
caa80090 | 831 | error = xlog_write(log, ctx, &vec, ctx->ticket, XLOG_COMMIT_TRANS); |
2ce82b72 DC |
832 | if (error) |
833 | xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); | |
834 | return error; | |
835 | } | |
836 | ||
71e330b5 | 837 | /* |
c7cc296d CH |
838 | * Push the Committed Item List to the log. |
839 | * | |
840 | * If the current sequence is the same as xc_push_seq we need to do a flush. If | |
841 | * xc_push_seq is less than the current sequence, then it has already been | |
a44f13ed DC |
842 | * flushed and we don't need to do anything - the caller will wait for it to |
843 | * complete if necessary. | |
844 | * | |
c7cc296d CH |
845 | * xc_push_seq is checked unlocked against the sequence number for a match. |
846 | * Hence we can allow log forces to run racily and not issue pushes for the | |
847 | * same sequence twice. If we get a race between multiple pushes for the same | |
848 | * sequence they will block on the first one and then abort, hence avoiding | |
849 | * needless pushes. | |
71e330b5 | 850 | */ |
c7cc296d CH |
851 | static void |
852 | xlog_cil_push_work( | |
853 | struct work_struct *work) | |
71e330b5 | 854 | { |
39823d0f DC |
855 | struct xfs_cil_ctx *ctx = |
856 | container_of(work, struct xfs_cil_ctx, push_work); | |
857 | struct xfs_cil *cil = ctx->cil; | |
c7cc296d | 858 | struct xlog *log = cil->xc_log; |
71e330b5 | 859 | struct xfs_log_vec *lv; |
71e330b5 | 860 | struct xfs_cil_ctx *new_ctx; |
71e330b5 | 861 | struct xlog_ticket *tic; |
71e330b5 | 862 | int num_iovecs; |
71e330b5 DC |
863 | int error = 0; |
864 | struct xfs_trans_header thdr; | |
865 | struct xfs_log_iovec lhdr; | |
866 | struct xfs_log_vec lvhdr = { NULL }; | |
0dc8f7f1 | 867 | xfs_lsn_t preflush_tail_lsn; |
0dc8f7f1 | 868 | xfs_csn_t push_seq; |
bad77c37 DC |
869 | struct bio bio; |
870 | DECLARE_COMPLETION_ONSTACK(bdev_flush); | |
0020a190 | 871 | bool push_commit_stable; |
71e330b5 | 872 | |
39823d0f | 873 | new_ctx = xlog_cil_ctx_alloc(); |
71e330b5 DC |
874 | new_ctx->ticket = xlog_cil_ticket_alloc(log); |
875 | ||
4c2d542f | 876 | down_write(&cil->xc_ctx_lock); |
71e330b5 | 877 | |
4bb928cd | 878 | spin_lock(&cil->xc_push_lock); |
4c2d542f DC |
879 | push_seq = cil->xc_push_seq; |
880 | ASSERT(push_seq <= ctx->sequence); | |
0020a190 DC |
881 | push_commit_stable = cil->xc_push_commit_stable; |
882 | cil->xc_push_commit_stable = false; | |
71e330b5 | 883 | |
0e7ab7ef | 884 | /* |
19f4e7cc DC |
885 | * As we are about to switch to a new, empty CIL context, we no longer |
886 | * need to throttle tasks on CIL space overruns. Wake any waiters that | |
887 | * the hard push throttle may have caught so they can start committing | |
888 | * to the new context. The ctx->xc_push_lock provides the serialisation | |
889 | * necessary for safely using the lockless waitqueue_active() check in | |
890 | * this context. | |
0e7ab7ef | 891 | */ |
19f4e7cc | 892 | if (waitqueue_active(&cil->xc_push_wait)) |
c7f87f39 | 893 | wake_up_all(&cil->xc_push_wait); |
0e7ab7ef | 894 | |
4c2d542f DC |
895 | /* |
896 | * Check if we've anything to push. If there is nothing, then we don't | |
897 | * move on to a new sequence number and so we have to be able to push | |
898 | * this sequence again later. | |
899 | */ | |
900 | if (list_empty(&cil->xc_cil)) { | |
901 | cil->xc_push_seq = 0; | |
4bb928cd | 902 | spin_unlock(&cil->xc_push_lock); |
a44f13ed | 903 | goto out_skip; |
4c2d542f | 904 | } |
4c2d542f | 905 | |
a44f13ed | 906 | |
cf085a1b | 907 | /* check for a previously pushed sequence */ |
39823d0f | 908 | if (push_seq < ctx->sequence) { |
8af3dcd3 | 909 | spin_unlock(&cil->xc_push_lock); |
df806158 | 910 | goto out_skip; |
8af3dcd3 DC |
911 | } |
912 | ||
913 | /* | |
914 | * We are now going to push this context, so add it to the committing | |
915 | * list before we do anything else. This ensures that anyone waiting on | |
916 | * this push can easily detect the difference between a "push in | |
917 | * progress" and "CIL is empty, nothing to do". | |
918 | * | |
919 | * IOWs, a wait loop can now check for: | |
920 | * the current sequence not being found on the committing list; | |
921 | * an empty CIL; and | |
922 | * an unchanged sequence number | |
923 | * to detect a push that had nothing to do and therefore does not need | |
924 | * waiting on. If the CIL is not empty, we get put on the committing | |
925 | * list before emptying the CIL and bumping the sequence number. Hence | |
926 | * an empty CIL and an unchanged sequence number means we jumped out | |
927 | * above after doing nothing. | |
928 | * | |
929 | * Hence the waiter will either find the commit sequence on the | |
930 | * committing list or the sequence number will be unchanged and the CIL | |
931 | * still dirty. In that latter case, the push has not yet started, and | |
932 | * so the waiter will have to continue trying to check the CIL | |
933 | * committing list until it is found. In extreme cases of delay, the | |
934 | * sequence may fully commit between the attempts the wait makes to wait | |
935 | * on the commit sequence. | |
936 | */ | |
937 | list_add(&ctx->committing, &cil->xc_committing); | |
938 | spin_unlock(&cil->xc_push_lock); | |
df806158 | 939 | |
71e330b5 | 940 | /* |
bad77c37 DC |
941 | * The CIL is stable at this point - nothing new will be added to it |
942 | * because we hold the flush lock exclusively. Hence we can now issue | |
943 | * a cache flush to ensure all the completed metadata in the journal we | |
944 | * are about to overwrite is on stable storage. | |
0dc8f7f1 DC |
945 | * |
946 | * Because we are issuing this cache flush before we've written the | |
947 | * tail lsn to the iclog, we can have metadata IO completions move the | |
948 | * tail forwards between the completion of this flush and the iclog | |
949 | * being written. In this case, we need to re-issue the cache flush | |
950 | * before the iclog write. To detect whether the log tail moves, sample | |
951 | * the tail LSN *before* we issue the flush. | |
bad77c37 | 952 | */ |
0dc8f7f1 | 953 | preflush_tail_lsn = atomic64_read(&log->l_tail_lsn); |
bad77c37 DC |
954 | xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev, |
955 | &bdev_flush); | |
956 | ||
957 | /* | |
958 | * Pull all the log vectors off the items in the CIL, and remove the | |
959 | * items from the CIL. We don't need the CIL lock here because it's only | |
960 | * needed on the transaction commit side which is currently locked out | |
961 | * by the flush lock. | |
71e330b5 DC |
962 | */ |
963 | lv = NULL; | |
71e330b5 | 964 | num_iovecs = 0; |
71e330b5 DC |
965 | while (!list_empty(&cil->xc_cil)) { |
966 | struct xfs_log_item *item; | |
71e330b5 DC |
967 | |
968 | item = list_first_entry(&cil->xc_cil, | |
969 | struct xfs_log_item, li_cil); | |
970 | list_del_init(&item->li_cil); | |
971 | if (!ctx->lv_chain) | |
972 | ctx->lv_chain = item->li_lv; | |
973 | else | |
974 | lv->lv_next = item->li_lv; | |
975 | lv = item->li_lv; | |
976 | item->li_lv = NULL; | |
71e330b5 | 977 | num_iovecs += lv->lv_niovecs; |
71e330b5 DC |
978 | } |
979 | ||
980 | /* | |
39823d0f | 981 | * Switch the contexts so we can drop the context lock and move out |
71e330b5 DC |
982 | * of a shared context. We can't just go straight to the commit record, |
983 | * though - we need to synchronise with previous and future commits so | |
984 | * that the commit records are correctly ordered in the log to ensure | |
985 | * that we process items during log IO completion in the correct order. | |
986 | * | |
987 | * For example, if we get an EFI in one checkpoint and the EFD in the | |
988 | * next (e.g. due to log forces), we do not want the checkpoint with | |
989 | * the EFD to be committed before the checkpoint with the EFI. Hence | |
990 | * we must strictly order the commit records of the checkpoints so | |
991 | * that: a) the checkpoint callbacks are attached to the iclogs in the | |
992 | * correct order; and b) the checkpoints are replayed in correct order | |
993 | * in log recovery. | |
994 | * | |
995 | * Hence we need to add this context to the committing context list so | |
996 | * that higher sequences will wait for us to write out a commit record | |
997 | * before they do. | |
f876e446 | 998 | * |
5f9b4b0d | 999 | * xfs_log_force_seq requires us to mirror the new sequence into the cil |
f876e446 DC |
1000 | * structure atomically with the addition of this sequence to the |
1001 | * committing list. This also ensures that we can do unlocked checks | |
1002 | * against the current sequence in log forces without risking | |
1003 | * deferencing a freed context pointer. | |
71e330b5 | 1004 | */ |
4bb928cd | 1005 | spin_lock(&cil->xc_push_lock); |
39823d0f | 1006 | xlog_cil_ctx_switch(cil, new_ctx); |
4bb928cd | 1007 | spin_unlock(&cil->xc_push_lock); |
71e330b5 DC |
1008 | up_write(&cil->xc_ctx_lock); |
1009 | ||
1010 | /* | |
1011 | * Build a checkpoint transaction header and write it to the log to | |
1012 | * begin the transaction. We need to account for the space used by the | |
1013 | * transaction header here as it is not accounted for in xlog_write(). | |
1014 | * | |
1015 | * The LSN we need to pass to the log items on transaction commit is | |
1016 | * the LSN reported by the first log vector write. If we use the commit | |
1017 | * record lsn then we can move the tail beyond the grant write head. | |
1018 | */ | |
1019 | tic = ctx->ticket; | |
1020 | thdr.th_magic = XFS_TRANS_HEADER_MAGIC; | |
1021 | thdr.th_type = XFS_TRANS_CHECKPOINT; | |
1022 | thdr.th_tid = tic->t_tid; | |
1023 | thdr.th_num_items = num_iovecs; | |
4e0d5f92 | 1024 | lhdr.i_addr = &thdr; |
71e330b5 DC |
1025 | lhdr.i_len = sizeof(xfs_trans_header_t); |
1026 | lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; | |
1027 | tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); | |
1028 | ||
1029 | lvhdr.lv_niovecs = 1; | |
1030 | lvhdr.lv_iovecp = &lhdr; | |
1031 | lvhdr.lv_next = ctx->lv_chain; | |
1032 | ||
bad77c37 DC |
1033 | /* |
1034 | * Before we format and submit the first iclog, we have to ensure that | |
1035 | * the metadata writeback ordering cache flush is complete. | |
1036 | */ | |
1037 | wait_for_completion(&bdev_flush); | |
1038 | ||
68a74dca | 1039 | error = xlog_cil_write_chain(ctx, &lvhdr); |
bf034bc8 DC |
1040 | if (error) |
1041 | goto out_abort_free_ticket; | |
71e330b5 | 1042 | |
caa80090 | 1043 | error = xlog_cil_write_commit_record(ctx); |
dd401770 DC |
1044 | if (error) |
1045 | goto out_abort_free_ticket; | |
1046 | ||
8b41e3f9 | 1047 | xfs_log_ticket_ungrant(log, tic); |
71e330b5 | 1048 | |
a79b28c2 | 1049 | /* |
1effb72a DC |
1050 | * If the checkpoint spans multiple iclogs, wait for all previous iclogs |
1051 | * to complete before we submit the commit_iclog. We can't use state | |
1052 | * checks for this - ACTIVE can be either a past completed iclog or a | |
1053 | * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a | |
1054 | * past or future iclog awaiting IO or ordered IO completion to be run. | |
1055 | * In the latter case, if it's a future iclog and we wait on it, the we | |
1056 | * will hang because it won't get processed through to ic_force_wait | |
1057 | * wakeup until this commit_iclog is written to disk. Hence we use the | |
1058 | * iclog header lsn and compare it to the commit lsn to determine if we | |
1059 | * need to wait on iclogs or not. | |
a79b28c2 | 1060 | */ |
caa80090 | 1061 | spin_lock(&log->l_icloglock); |
c45aba40 | 1062 | if (ctx->start_lsn != ctx->commit_lsn) { |
1effb72a DC |
1063 | xfs_lsn_t plsn; |
1064 | ||
caa80090 | 1065 | plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn); |
c45aba40 | 1066 | if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) { |
1effb72a DC |
1067 | /* |
1068 | * Waiting on ic_force_wait orders the completion of | |
1069 | * iclogs older than ic_prev. Hence we only need to wait | |
1070 | * on the most recent older iclog here. | |
1071 | */ | |
caa80090 | 1072 | xlog_wait_on_iclog(ctx->commit_iclog->ic_prev); |
1effb72a DC |
1073 | spin_lock(&log->l_icloglock); |
1074 | } | |
1075 | ||
1076 | /* | |
1077 | * We need to issue a pre-flush so that the ordering for this | |
1078 | * checkpoint is correctly preserved down to stable storage. | |
1079 | */ | |
caa80090 | 1080 | ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; |
a79b28c2 DC |
1081 | } |
1082 | ||
eef983ff DC |
1083 | /* |
1084 | * The commit iclog must be written to stable storage to guarantee | |
1085 | * journal IO vs metadata writeback IO is correctly ordered on stable | |
1086 | * storage. | |
0020a190 DC |
1087 | * |
1088 | * If the push caller needs the commit to be immediately stable and the | |
1089 | * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it | |
1090 | * will be written when released, switch it's state to WANT_SYNC right | |
1091 | * now. | |
eef983ff | 1092 | */ |
caa80090 | 1093 | ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; |
0020a190 DC |
1094 | if (push_commit_stable && |
1095 | ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE) | |
1096 | xlog_state_switch_iclogs(log, ctx->commit_iclog, 0); | |
caa80090 | 1097 | xlog_state_release_iclog(log, ctx->commit_iclog, preflush_tail_lsn); |
502a01fa DC |
1098 | |
1099 | /* Not safe to reference ctx now! */ | |
1100 | ||
eef983ff | 1101 | spin_unlock(&log->l_icloglock); |
c7cc296d | 1102 | return; |
71e330b5 DC |
1103 | |
1104 | out_skip: | |
1105 | up_write(&cil->xc_ctx_lock); | |
1106 | xfs_log_ticket_put(new_ctx->ticket); | |
1107 | kmem_free(new_ctx); | |
c7cc296d | 1108 | return; |
71e330b5 | 1109 | |
7db37c5e | 1110 | out_abort_free_ticket: |
8b41e3f9 | 1111 | xfs_log_ticket_ungrant(log, tic); |
2039a272 | 1112 | ASSERT(xlog_is_shutdown(log)); |
caa80090 DC |
1113 | if (!ctx->commit_iclog) { |
1114 | xlog_cil_committed(ctx); | |
1115 | return; | |
1116 | } | |
1117 | spin_lock(&log->l_icloglock); | |
1118 | xlog_state_release_iclog(log, ctx->commit_iclog, 0); | |
1119 | /* Not safe to reference ctx now! */ | |
1120 | spin_unlock(&log->l_icloglock); | |
4c2d542f DC |
1121 | } |
1122 | ||
1123 | /* | |
1124 | * We need to push CIL every so often so we don't cache more than we can fit in | |
1125 | * the log. The limit really is that a checkpoint can't be more than half the | |
1126 | * log (the current checkpoint is not allowed to overwrite the previous | |
1127 | * checkpoint), but commit latency and memory usage limit this to a smaller | |
1128 | * size. | |
1129 | */ | |
1130 | static void | |
1131 | xlog_cil_push_background( | |
0e7ab7ef | 1132 | struct xlog *log) __releases(cil->xc_ctx_lock) |
4c2d542f DC |
1133 | { |
1134 | struct xfs_cil *cil = log->l_cilp; | |
1135 | ||
1136 | /* | |
1137 | * The cil won't be empty because we are called while holding the | |
1138 | * context lock so whatever we added to the CIL will still be there | |
1139 | */ | |
1140 | ASSERT(!list_empty(&cil->xc_cil)); | |
1141 | ||
1142 | /* | |
19f4e7cc | 1143 | * Don't do a background push if we haven't used up all the |
4c2d542f DC |
1144 | * space available yet. |
1145 | */ | |
0e7ab7ef DC |
1146 | if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) { |
1147 | up_read(&cil->xc_ctx_lock); | |
4c2d542f | 1148 | return; |
0e7ab7ef | 1149 | } |
4c2d542f | 1150 | |
4bb928cd | 1151 | spin_lock(&cil->xc_push_lock); |
4c2d542f DC |
1152 | if (cil->xc_push_seq < cil->xc_current_sequence) { |
1153 | cil->xc_push_seq = cil->xc_current_sequence; | |
33c0dd78 | 1154 | queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); |
4c2d542f | 1155 | } |
0e7ab7ef DC |
1156 | |
1157 | /* | |
1158 | * Drop the context lock now, we can't hold that if we need to sleep | |
1159 | * because we are over the blocking threshold. The push_lock is still | |
1160 | * held, so blocking threshold sleep/wakeup is still correctly | |
1161 | * serialised here. | |
1162 | */ | |
1163 | up_read(&cil->xc_ctx_lock); | |
1164 | ||
1165 | /* | |
1166 | * If we are well over the space limit, throttle the work that is being | |
19f4e7cc DC |
1167 | * done until the push work on this context has begun. Enforce the hard |
1168 | * throttle on all transaction commits once it has been activated, even | |
1169 | * if the committing transactions have resulted in the space usage | |
1170 | * dipping back down under the hard limit. | |
1171 | * | |
1172 | * The ctx->xc_push_lock provides the serialisation necessary for safely | |
1173 | * using the lockless waitqueue_active() check in this context. | |
0e7ab7ef | 1174 | */ |
19f4e7cc DC |
1175 | if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) || |
1176 | waitqueue_active(&cil->xc_push_wait)) { | |
0e7ab7ef DC |
1177 | trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket); |
1178 | ASSERT(cil->xc_ctx->space_used < log->l_logsize); | |
c7f87f39 | 1179 | xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock); |
0e7ab7ef DC |
1180 | return; |
1181 | } | |
1182 | ||
4bb928cd | 1183 | spin_unlock(&cil->xc_push_lock); |
4c2d542f DC |
1184 | |
1185 | } | |
1186 | ||
f876e446 DC |
1187 | /* |
1188 | * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence | |
1189 | * number that is passed. When it returns, the work will be queued for | |
0020a190 DC |
1190 | * @push_seq, but it won't be completed. |
1191 | * | |
1192 | * If the caller is performing a synchronous force, we will flush the workqueue | |
1193 | * to get previously queued work moving to minimise the wait time they will | |
1194 | * undergo waiting for all outstanding pushes to complete. The caller is | |
1195 | * expected to do the required waiting for push_seq to complete. | |
1196 | * | |
1197 | * If the caller is performing an async push, we need to ensure that the | |
1198 | * checkpoint is fully flushed out of the iclogs when we finish the push. If we | |
1199 | * don't do this, then the commit record may remain sitting in memory in an | |
1200 | * ACTIVE iclog. This then requires another full log force to push to disk, | |
1201 | * which defeats the purpose of having an async, non-blocking CIL force | |
1202 | * mechanism. Hence in this case we need to pass a flag to the push work to | |
1203 | * indicate it needs to flush the commit record itself. | |
f876e446 | 1204 | */ |
4c2d542f | 1205 | static void |
f876e446 | 1206 | xlog_cil_push_now( |
f7bdf03a | 1207 | struct xlog *log, |
0020a190 DC |
1208 | xfs_lsn_t push_seq, |
1209 | bool async) | |
4c2d542f DC |
1210 | { |
1211 | struct xfs_cil *cil = log->l_cilp; | |
1212 | ||
1213 | if (!cil) | |
1214 | return; | |
1215 | ||
1216 | ASSERT(push_seq && push_seq <= cil->xc_current_sequence); | |
1217 | ||
1218 | /* start on any pending background push to minimise wait time on it */ | |
0020a190 | 1219 | if (!async) |
33c0dd78 | 1220 | flush_workqueue(cil->xc_push_wq); |
4c2d542f DC |
1221 | |
1222 | /* | |
1223 | * If the CIL is empty or we've already pushed the sequence then | |
1224 | * there's no work we need to do. | |
1225 | */ | |
4bb928cd | 1226 | spin_lock(&cil->xc_push_lock); |
4c2d542f | 1227 | if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) { |
4bb928cd | 1228 | spin_unlock(&cil->xc_push_lock); |
4c2d542f DC |
1229 | return; |
1230 | } | |
1231 | ||
1232 | cil->xc_push_seq = push_seq; | |
0020a190 | 1233 | cil->xc_push_commit_stable = async; |
33c0dd78 | 1234 | queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); |
4bb928cd | 1235 | spin_unlock(&cil->xc_push_lock); |
4c2d542f DC |
1236 | } |
1237 | ||
2c6e24ce DC |
1238 | bool |
1239 | xlog_cil_empty( | |
1240 | struct xlog *log) | |
1241 | { | |
1242 | struct xfs_cil *cil = log->l_cilp; | |
1243 | bool empty = false; | |
1244 | ||
1245 | spin_lock(&cil->xc_push_lock); | |
1246 | if (list_empty(&cil->xc_cil)) | |
1247 | empty = true; | |
1248 | spin_unlock(&cil->xc_push_lock); | |
1249 | return empty; | |
1250 | } | |
1251 | ||
a44f13ed DC |
1252 | /* |
1253 | * Commit a transaction with the given vector to the Committed Item List. | |
1254 | * | |
1255 | * To do this, we need to format the item, pin it in memory if required and | |
1256 | * account for the space used by the transaction. Once we have done that we | |
1257 | * need to release the unused reservation for the transaction, attach the | |
1258 | * transaction to the checkpoint context so we carry the busy extents through | |
1259 | * to checkpoint completion, and then unlock all the items in the transaction. | |
1260 | * | |
a44f13ed DC |
1261 | * Called with the context lock already held in read mode to lock out |
1262 | * background commit, returns without it held once background commits are | |
1263 | * allowed again. | |
1264 | */ | |
c6f97264 | 1265 | void |
5f9b4b0d DC |
1266 | xlog_cil_commit( |
1267 | struct xlog *log, | |
a44f13ed | 1268 | struct xfs_trans *tp, |
5f9b4b0d | 1269 | xfs_csn_t *commit_seq, |
70393313 | 1270 | bool regrant) |
a44f13ed | 1271 | { |
991aaf65 | 1272 | struct xfs_cil *cil = log->l_cilp; |
195cd83d | 1273 | struct xfs_log_item *lip, *next; |
a44f13ed | 1274 | |
b1c5ebb2 DC |
1275 | /* |
1276 | * Do all necessary memory allocation before we lock the CIL. | |
1277 | * This ensures the allocation does not deadlock with a CIL | |
1278 | * push in memory reclaim (e.g. from kswapd). | |
1279 | */ | |
1280 | xlog_cil_alloc_shadow_bufs(log, tp); | |
1281 | ||
f5baac35 | 1282 | /* lock out background commit */ |
991aaf65 | 1283 | down_read(&cil->xc_ctx_lock); |
f5baac35 | 1284 | |
991aaf65 | 1285 | xlog_cil_insert_items(log, tp); |
a44f13ed | 1286 | |
2039a272 | 1287 | if (regrant && !xlog_is_shutdown(log)) |
8b41e3f9 CH |
1288 | xfs_log_ticket_regrant(log, tp->t_ticket); |
1289 | else | |
1290 | xfs_log_ticket_ungrant(log, tp->t_ticket); | |
ba18781b | 1291 | tp->t_ticket = NULL; |
a44f13ed DC |
1292 | xfs_trans_unreserve_and_mod_sb(tp); |
1293 | ||
1294 | /* | |
1295 | * Once all the items of the transaction have been copied to the CIL, | |
195cd83d | 1296 | * the items can be unlocked and possibly freed. |
a44f13ed DC |
1297 | * |
1298 | * This needs to be done before we drop the CIL context lock because we | |
1299 | * have to update state in the log items and unlock them before they go | |
1300 | * to disk. If we don't, then the CIL checkpoint can race with us and | |
1301 | * we can run checkpoint completion before we've updated and unlocked | |
1302 | * the log items. This affects (at least) processing of stale buffers, | |
1303 | * inodes and EFIs. | |
1304 | */ | |
195cd83d CH |
1305 | trace_xfs_trans_commit_items(tp, _RET_IP_); |
1306 | list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { | |
1307 | xfs_trans_del_item(lip); | |
1308 | if (lip->li_ops->iop_committing) | |
5f9b4b0d | 1309 | lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence); |
195cd83d | 1310 | } |
5f9b4b0d DC |
1311 | if (commit_seq) |
1312 | *commit_seq = cil->xc_ctx->sequence; | |
a44f13ed | 1313 | |
0e7ab7ef DC |
1314 | /* xlog_cil_push_background() releases cil->xc_ctx_lock */ |
1315 | xlog_cil_push_background(log); | |
a44f13ed DC |
1316 | } |
1317 | ||
0020a190 DC |
1318 | /* |
1319 | * Flush the CIL to stable storage but don't wait for it to complete. This | |
1320 | * requires the CIL push to ensure the commit record for the push hits the disk, | |
1321 | * but otherwise is no different to a push done from a log force. | |
1322 | */ | |
1323 | void | |
1324 | xlog_cil_flush( | |
1325 | struct xlog *log) | |
1326 | { | |
1327 | xfs_csn_t seq = log->l_cilp->xc_current_sequence; | |
1328 | ||
1329 | trace_xfs_log_force(log->l_mp, seq, _RET_IP_); | |
1330 | xlog_cil_push_now(log, seq, true); | |
1331 | } | |
1332 | ||
71e330b5 DC |
1333 | /* |
1334 | * Conditionally push the CIL based on the sequence passed in. | |
1335 | * | |
0020a190 DC |
1336 | * We only need to push if we haven't already pushed the sequence number given. |
1337 | * Hence the only time we will trigger a push here is if the push sequence is | |
1338 | * the same as the current context. | |
71e330b5 DC |
1339 | * |
1340 | * We return the current commit lsn to allow the callers to determine if a | |
1341 | * iclog flush is necessary following this call. | |
71e330b5 DC |
1342 | */ |
1343 | xfs_lsn_t | |
5f9b4b0d | 1344 | xlog_cil_force_seq( |
f7bdf03a | 1345 | struct xlog *log, |
5f9b4b0d | 1346 | xfs_csn_t sequence) |
71e330b5 DC |
1347 | { |
1348 | struct xfs_cil *cil = log->l_cilp; | |
1349 | struct xfs_cil_ctx *ctx; | |
1350 | xfs_lsn_t commit_lsn = NULLCOMMITLSN; | |
1351 | ||
a44f13ed DC |
1352 | ASSERT(sequence <= cil->xc_current_sequence); |
1353 | ||
0020a190 DC |
1354 | if (!sequence) |
1355 | sequence = cil->xc_current_sequence; | |
1356 | trace_xfs_log_force(log->l_mp, sequence, _RET_IP_); | |
1357 | ||
a44f13ed DC |
1358 | /* |
1359 | * check to see if we need to force out the current context. | |
1360 | * xlog_cil_push() handles racing pushes for the same sequence, | |
1361 | * so no need to deal with it here. | |
1362 | */ | |
f876e446 | 1363 | restart: |
0020a190 | 1364 | xlog_cil_push_now(log, sequence, false); |
71e330b5 DC |
1365 | |
1366 | /* | |
1367 | * See if we can find a previous sequence still committing. | |
71e330b5 DC |
1368 | * We need to wait for all previous sequence commits to complete |
1369 | * before allowing the force of push_seq to go ahead. Hence block | |
1370 | * on commits for those as well. | |
1371 | */ | |
4bb928cd | 1372 | spin_lock(&cil->xc_push_lock); |
71e330b5 | 1373 | list_for_each_entry(ctx, &cil->xc_committing, committing) { |
ac983517 DC |
1374 | /* |
1375 | * Avoid getting stuck in this loop because we were woken by the | |
1376 | * shutdown, but then went back to sleep once already in the | |
1377 | * shutdown state. | |
1378 | */ | |
2039a272 | 1379 | if (xlog_is_shutdown(log)) |
ac983517 | 1380 | goto out_shutdown; |
a44f13ed | 1381 | if (ctx->sequence > sequence) |
71e330b5 DC |
1382 | continue; |
1383 | if (!ctx->commit_lsn) { | |
1384 | /* | |
1385 | * It is still being pushed! Wait for the push to | |
1386 | * complete, then start again from the beginning. | |
1387 | */ | |
0020a190 | 1388 | XFS_STATS_INC(log->l_mp, xs_log_force_sleep); |
4bb928cd | 1389 | xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); |
71e330b5 DC |
1390 | goto restart; |
1391 | } | |
a44f13ed | 1392 | if (ctx->sequence != sequence) |
71e330b5 DC |
1393 | continue; |
1394 | /* found it! */ | |
1395 | commit_lsn = ctx->commit_lsn; | |
1396 | } | |
f876e446 DC |
1397 | |
1398 | /* | |
1399 | * The call to xlog_cil_push_now() executes the push in the background. | |
1400 | * Hence by the time we have got here it our sequence may not have been | |
1401 | * pushed yet. This is true if the current sequence still matches the | |
1402 | * push sequence after the above wait loop and the CIL still contains | |
8af3dcd3 DC |
1403 | * dirty objects. This is guaranteed by the push code first adding the |
1404 | * context to the committing list before emptying the CIL. | |
f876e446 | 1405 | * |
8af3dcd3 DC |
1406 | * Hence if we don't find the context in the committing list and the |
1407 | * current sequence number is unchanged then the CIL contents are | |
1408 | * significant. If the CIL is empty, if means there was nothing to push | |
1409 | * and that means there is nothing to wait for. If the CIL is not empty, | |
1410 | * it means we haven't yet started the push, because if it had started | |
1411 | * we would have found the context on the committing list. | |
f876e446 | 1412 | */ |
f876e446 DC |
1413 | if (sequence == cil->xc_current_sequence && |
1414 | !list_empty(&cil->xc_cil)) { | |
1415 | spin_unlock(&cil->xc_push_lock); | |
1416 | goto restart; | |
1417 | } | |
1418 | ||
4bb928cd | 1419 | spin_unlock(&cil->xc_push_lock); |
71e330b5 | 1420 | return commit_lsn; |
ac983517 DC |
1421 | |
1422 | /* | |
1423 | * We detected a shutdown in progress. We need to trigger the log force | |
1424 | * to pass through it's iclog state machine error handling, even though | |
1425 | * we are already in a shutdown state. Hence we can't return | |
1426 | * NULLCOMMITLSN here as that has special meaning to log forces (i.e. | |
1427 | * LSN is already stable), so we return a zero LSN instead. | |
1428 | */ | |
1429 | out_shutdown: | |
1430 | spin_unlock(&cil->xc_push_lock); | |
1431 | return 0; | |
71e330b5 | 1432 | } |
ccf7c23f DC |
1433 | |
1434 | /* | |
1435 | * Check if the current log item was first committed in this sequence. | |
1436 | * We can't rely on just the log item being in the CIL, we have to check | |
1437 | * the recorded commit sequence number. | |
1438 | * | |
1439 | * Note: for this to be used in a non-racy manner, it has to be called with | |
1440 | * CIL flushing locked out. As a result, it should only be used during the | |
1441 | * transaction commit process when deciding what to format into the item. | |
1442 | */ | |
1443 | bool | |
1444 | xfs_log_item_in_current_chkpt( | |
1445 | struct xfs_log_item *lip) | |
1446 | { | |
5f9b4b0d | 1447 | struct xfs_cil_ctx *ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; |
ccf7c23f | 1448 | |
ccf7c23f DC |
1449 | if (list_empty(&lip->li_cil)) |
1450 | return false; | |
1451 | ||
ccf7c23f DC |
1452 | /* |
1453 | * li_seq is written on the first commit of a log item to record the | |
1454 | * first checkpoint it is written to. Hence if it is different to the | |
1455 | * current sequence, we're in a new checkpoint. | |
1456 | */ | |
5f9b4b0d | 1457 | return lip->li_seq == ctx->sequence; |
ccf7c23f | 1458 | } |
4c2d542f DC |
1459 | |
1460 | /* | |
1461 | * Perform initial CIL structure initialisation. | |
1462 | */ | |
1463 | int | |
1464 | xlog_cil_init( | |
f7bdf03a | 1465 | struct xlog *log) |
4c2d542f DC |
1466 | { |
1467 | struct xfs_cil *cil; | |
1468 | struct xfs_cil_ctx *ctx; | |
1469 | ||
707e0dda | 1470 | cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL); |
4c2d542f | 1471 | if (!cil) |
2451337d | 1472 | return -ENOMEM; |
33c0dd78 DC |
1473 | /* |
1474 | * Limit the CIL pipeline depth to 4 concurrent works to bound the | |
1475 | * concurrency the log spinlocks will be exposed to. | |
1476 | */ | |
1477 | cil->xc_push_wq = alloc_workqueue("xfs-cil/%s", | |
1478 | XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND), | |
1479 | 4, log->l_mp->m_super->s_id); | |
1480 | if (!cil->xc_push_wq) | |
1481 | goto out_destroy_cil; | |
4c2d542f | 1482 | |
4c2d542f DC |
1483 | INIT_LIST_HEAD(&cil->xc_cil); |
1484 | INIT_LIST_HEAD(&cil->xc_committing); | |
1485 | spin_lock_init(&cil->xc_cil_lock); | |
4bb928cd | 1486 | spin_lock_init(&cil->xc_push_lock); |
c7f87f39 | 1487 | init_waitqueue_head(&cil->xc_push_wait); |
4c2d542f | 1488 | init_rwsem(&cil->xc_ctx_lock); |
68a74dca | 1489 | init_waitqueue_head(&cil->xc_start_wait); |
4c2d542f | 1490 | init_waitqueue_head(&cil->xc_commit_wait); |
4c2d542f DC |
1491 | cil->xc_log = log; |
1492 | log->l_cilp = cil; | |
39823d0f DC |
1493 | |
1494 | ctx = xlog_cil_ctx_alloc(); | |
1495 | xlog_cil_ctx_switch(cil, ctx); | |
1496 | ||
4c2d542f | 1497 | return 0; |
33c0dd78 DC |
1498 | |
1499 | out_destroy_cil: | |
1500 | kmem_free(cil); | |
1501 | return -ENOMEM; | |
4c2d542f DC |
1502 | } |
1503 | ||
1504 | void | |
1505 | xlog_cil_destroy( | |
f7bdf03a | 1506 | struct xlog *log) |
4c2d542f DC |
1507 | { |
1508 | if (log->l_cilp->xc_ctx) { | |
1509 | if (log->l_cilp->xc_ctx->ticket) | |
1510 | xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); | |
1511 | kmem_free(log->l_cilp->xc_ctx); | |
1512 | } | |
1513 | ||
1514 | ASSERT(list_empty(&log->l_cilp->xc_cil)); | |
33c0dd78 | 1515 | destroy_workqueue(log->l_cilp->xc_push_wq); |
4c2d542f DC |
1516 | kmem_free(log->l_cilp); |
1517 | } | |
1518 |