Commit | Line | Data |
---|---|---|
b3b94faa DT |
1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
da6dd40d | 3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. |
b3b94faa DT |
4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | |
6 | * modify, copy, or redistribute it subject to the terms and conditions | |
e9fc2aa0 | 7 | * of the GNU General Public License version 2. |
b3b94faa DT |
8 | */ |
9 | ||
10 | #include <linux/sched.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/completion.h> | |
14 | #include <linux/buffer_head.h> | |
5c676f6d | 15 | #include <linux/gfs2_ondisk.h> |
71b86f56 | 16 | #include <linux/crc32.h> |
c1696fb8 | 17 | #include <linux/crc32c.h> |
a25311c8 | 18 | #include <linux/delay.h> |
ec69b188 SW |
19 | #include <linux/kthread.h> |
20 | #include <linux/freezer.h> | |
254db57f | 21 | #include <linux/bio.h> |
885bceca | 22 | #include <linux/blkdev.h> |
4667a0ec | 23 | #include <linux/writeback.h> |
4a36d08d | 24 | #include <linux/list_sort.h> |
b3b94faa DT |
25 | |
26 | #include "gfs2.h" | |
5c676f6d | 27 | #include "incore.h" |
b3b94faa DT |
28 | #include "bmap.h" |
29 | #include "glock.h" | |
30 | #include "log.h" | |
31 | #include "lops.h" | |
32 | #include "meta_io.h" | |
5c676f6d | 33 | #include "util.h" |
71b86f56 | 34 | #include "dir.h" |
63997775 | 35 | #include "trace_gfs2.h" |
b3b94faa | 36 | |
b3b94faa DT |
37 | /** |
38 | * gfs2_struct2blk - compute stuff | |
39 | * @sdp: the filesystem | |
40 | * @nstruct: the number of structures | |
41 | * @ssize: the size of the structures | |
42 | * | |
43 | * Compute the number of log descriptor blocks needed to hold a certain number | |
44 | * of structures of a certain size. | |
45 | * | |
46 | * Returns: the number of blocks needed (minimum is always 1) | |
47 | */ | |
48 | ||
49 | unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, | |
50 | unsigned int ssize) | |
51 | { | |
52 | unsigned int blks; | |
53 | unsigned int first, second; | |
54 | ||
55 | blks = 1; | |
faa31ce8 | 56 | first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize; |
b3b94faa DT |
57 | |
58 | if (nstruct > first) { | |
568f4c96 SW |
59 | second = (sdp->sd_sb.sb_bsize - |
60 | sizeof(struct gfs2_meta_header)) / ssize; | |
5c676f6d | 61 | blks += DIV_ROUND_UP(nstruct - first, second); |
b3b94faa DT |
62 | } |
63 | ||
64 | return blks; | |
65 | } | |
66 | ||
1e1a3d03 SW |
67 | /** |
68 | * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters | |
69 | * @mapping: The associated mapping (maybe NULL) | |
70 | * @bd: The gfs2_bufdata to remove | |
71 | * | |
c618e87a | 72 | * The ail lock _must_ be held when calling this function |
1e1a3d03 SW |
73 | * |
74 | */ | |
75 | ||
9bc980cd | 76 | static void gfs2_remove_from_ail(struct gfs2_bufdata *bd) |
1e1a3d03 | 77 | { |
16ca9412 | 78 | bd->bd_tr = NULL; |
1ad38c43 SW |
79 | list_del_init(&bd->bd_ail_st_list); |
80 | list_del_init(&bd->bd_ail_gl_list); | |
1e1a3d03 | 81 | atomic_dec(&bd->bd_gl->gl_ail_count); |
1e1a3d03 SW |
82 | brelse(bd->bd_bh); |
83 | } | |
84 | ||
ddacfaf7 SW |
85 | /** |
86 | * gfs2_ail1_start_one - Start I/O on a part of the AIL | |
87 | * @sdp: the filesystem | |
4667a0ec SW |
88 | * @wbc: The writeback control structure |
89 | * @ai: The ail structure | |
ddacfaf7 SW |
90 | * |
91 | */ | |
92 | ||
4f1de018 SW |
93 | static int gfs2_ail1_start_one(struct gfs2_sbd *sdp, |
94 | struct writeback_control *wbc, | |
16ca9412 | 95 | struct gfs2_trans *tr) |
d6a079e8 DC |
96 | __releases(&sdp->sd_ail_lock) |
97 | __acquires(&sdp->sd_ail_lock) | |
ddacfaf7 | 98 | { |
5ac048bb | 99 | struct gfs2_glock *gl = NULL; |
4667a0ec | 100 | struct address_space *mapping; |
ddacfaf7 SW |
101 | struct gfs2_bufdata *bd, *s; |
102 | struct buffer_head *bh; | |
ddacfaf7 | 103 | |
16ca9412 | 104 | list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) { |
4667a0ec | 105 | bh = bd->bd_bh; |
ddacfaf7 | 106 | |
16ca9412 | 107 | gfs2_assert(sdp, bd->bd_tr == tr); |
ddacfaf7 | 108 | |
4667a0ec SW |
109 | if (!buffer_busy(bh)) { |
110 | if (!buffer_uptodate(bh)) | |
111 | gfs2_io_error_bh(sdp, bh); | |
16ca9412 | 112 | list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); |
4667a0ec SW |
113 | continue; |
114 | } | |
115 | ||
116 | if (!buffer_dirty(bh)) | |
117 | continue; | |
118 | if (gl == bd->bd_gl) | |
119 | continue; | |
120 | gl = bd->bd_gl; | |
16ca9412 | 121 | list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list); |
4667a0ec | 122 | mapping = bh->b_page->mapping; |
4f1de018 SW |
123 | if (!mapping) |
124 | continue; | |
4667a0ec SW |
125 | spin_unlock(&sdp->sd_ail_lock); |
126 | generic_writepages(mapping, wbc); | |
127 | spin_lock(&sdp->sd_ail_lock); | |
128 | if (wbc->nr_to_write <= 0) | |
129 | break; | |
4f1de018 | 130 | return 1; |
4667a0ec | 131 | } |
4f1de018 SW |
132 | |
133 | return 0; | |
4667a0ec | 134 | } |
ddacfaf7 | 135 | |
ddacfaf7 | 136 | |
4667a0ec SW |
137 | /** |
138 | * gfs2_ail1_flush - start writeback of some ail1 entries | |
139 | * @sdp: The super block | |
140 | * @wbc: The writeback control structure | |
141 | * | |
142 | * Writes back some ail1 entries, according to the limits in the | |
143 | * writeback control structure | |
144 | */ | |
ddacfaf7 | 145 | |
4667a0ec SW |
146 | void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc) |
147 | { | |
148 | struct list_head *head = &sdp->sd_ail1_list; | |
16ca9412 | 149 | struct gfs2_trans *tr; |
885bceca | 150 | struct blk_plug plug; |
ddacfaf7 | 151 | |
c83ae9ca | 152 | trace_gfs2_ail_flush(sdp, wbc, 1); |
885bceca | 153 | blk_start_plug(&plug); |
4667a0ec | 154 | spin_lock(&sdp->sd_ail_lock); |
4f1de018 | 155 | restart: |
16ca9412 | 156 | list_for_each_entry_reverse(tr, head, tr_list) { |
4667a0ec | 157 | if (wbc->nr_to_write <= 0) |
ddacfaf7 | 158 | break; |
16ca9412 | 159 | if (gfs2_ail1_start_one(sdp, wbc, tr)) |
4f1de018 | 160 | goto restart; |
4667a0ec SW |
161 | } |
162 | spin_unlock(&sdp->sd_ail_lock); | |
885bceca | 163 | blk_finish_plug(&plug); |
c83ae9ca | 164 | trace_gfs2_ail_flush(sdp, wbc, 0); |
4667a0ec SW |
165 | } |
166 | ||
167 | /** | |
168 | * gfs2_ail1_start - start writeback of all ail1 entries | |
169 | * @sdp: The superblock | |
170 | */ | |
171 | ||
172 | static void gfs2_ail1_start(struct gfs2_sbd *sdp) | |
173 | { | |
174 | struct writeback_control wbc = { | |
175 | .sync_mode = WB_SYNC_NONE, | |
176 | .nr_to_write = LONG_MAX, | |
177 | .range_start = 0, | |
178 | .range_end = LLONG_MAX, | |
179 | }; | |
180 | ||
181 | return gfs2_ail1_flush(sdp, &wbc); | |
ddacfaf7 SW |
182 | } |
183 | ||
184 | /** | |
185 | * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced | |
186 | * @sdp: the filesystem | |
187 | * @ai: the AIL entry | |
188 | * | |
189 | */ | |
190 | ||
16ca9412 | 191 | static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
ddacfaf7 SW |
192 | { |
193 | struct gfs2_bufdata *bd, *s; | |
194 | struct buffer_head *bh; | |
195 | ||
16ca9412 | 196 | list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, |
ddacfaf7 SW |
197 | bd_ail_st_list) { |
198 | bh = bd->bd_bh; | |
16ca9412 | 199 | gfs2_assert(sdp, bd->bd_tr == tr); |
4667a0ec SW |
200 | if (buffer_busy(bh)) |
201 | continue; | |
ddacfaf7 SW |
202 | if (!buffer_uptodate(bh)) |
203 | gfs2_io_error_bh(sdp, bh); | |
16ca9412 | 204 | list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); |
ddacfaf7 SW |
205 | } |
206 | ||
ddacfaf7 SW |
207 | } |
208 | ||
4667a0ec SW |
209 | /** |
210 | * gfs2_ail1_empty - Try to empty the ail1 lists | |
211 | * @sdp: The superblock | |
212 | * | |
213 | * Tries to empty the ail1 lists, starting with the oldest first | |
214 | */ | |
b3b94faa | 215 | |
4667a0ec | 216 | static int gfs2_ail1_empty(struct gfs2_sbd *sdp) |
b3b94faa | 217 | { |
16ca9412 | 218 | struct gfs2_trans *tr, *s; |
5d054964 | 219 | int oldest_tr = 1; |
b3b94faa DT |
220 | int ret; |
221 | ||
d6a079e8 | 222 | spin_lock(&sdp->sd_ail_lock); |
16ca9412 BM |
223 | list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) { |
224 | gfs2_ail1_empty_one(sdp, tr); | |
5d054964 | 225 | if (list_empty(&tr->tr_ail1_list) && oldest_tr) |
16ca9412 | 226 | list_move(&tr->tr_list, &sdp->sd_ail2_list); |
4667a0ec | 227 | else |
5d054964 | 228 | oldest_tr = 0; |
b3b94faa | 229 | } |
b3b94faa | 230 | ret = list_empty(&sdp->sd_ail1_list); |
d6a079e8 | 231 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
232 | |
233 | return ret; | |
234 | } | |
235 | ||
26b06a69 SW |
236 | static void gfs2_ail1_wait(struct gfs2_sbd *sdp) |
237 | { | |
16ca9412 | 238 | struct gfs2_trans *tr; |
26b06a69 SW |
239 | struct gfs2_bufdata *bd; |
240 | struct buffer_head *bh; | |
241 | ||
242 | spin_lock(&sdp->sd_ail_lock); | |
16ca9412 BM |
243 | list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) { |
244 | list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) { | |
26b06a69 SW |
245 | bh = bd->bd_bh; |
246 | if (!buffer_locked(bh)) | |
247 | continue; | |
248 | get_bh(bh); | |
249 | spin_unlock(&sdp->sd_ail_lock); | |
250 | wait_on_buffer(bh); | |
251 | brelse(bh); | |
252 | return; | |
253 | } | |
254 | } | |
255 | spin_unlock(&sdp->sd_ail_lock); | |
256 | } | |
ddacfaf7 SW |
257 | |
258 | /** | |
259 | * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced | |
260 | * @sdp: the filesystem | |
261 | * @ai: the AIL entry | |
262 | * | |
263 | */ | |
264 | ||
16ca9412 | 265 | static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
ddacfaf7 | 266 | { |
16ca9412 | 267 | struct list_head *head = &tr->tr_ail2_list; |
ddacfaf7 SW |
268 | struct gfs2_bufdata *bd; |
269 | ||
270 | while (!list_empty(head)) { | |
271 | bd = list_entry(head->prev, struct gfs2_bufdata, | |
272 | bd_ail_st_list); | |
16ca9412 | 273 | gfs2_assert(sdp, bd->bd_tr == tr); |
f91a0d3e | 274 | gfs2_remove_from_ail(bd); |
ddacfaf7 SW |
275 | } |
276 | } | |
277 | ||
b3b94faa DT |
278 | static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) |
279 | { | |
16ca9412 | 280 | struct gfs2_trans *tr, *safe; |
b3b94faa DT |
281 | unsigned int old_tail = sdp->sd_log_tail; |
282 | int wrap = (new_tail < old_tail); | |
283 | int a, b, rm; | |
284 | ||
d6a079e8 | 285 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa | 286 | |
16ca9412 BM |
287 | list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) { |
288 | a = (old_tail <= tr->tr_first); | |
289 | b = (tr->tr_first < new_tail); | |
b3b94faa DT |
290 | rm = (wrap) ? (a || b) : (a && b); |
291 | if (!rm) | |
292 | continue; | |
293 | ||
16ca9412 BM |
294 | gfs2_ail2_empty_one(sdp, tr); |
295 | list_del(&tr->tr_list); | |
296 | gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list)); | |
297 | gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list)); | |
298 | kfree(tr); | |
b3b94faa DT |
299 | } |
300 | ||
d6a079e8 | 301 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
302 | } |
303 | ||
24972557 BM |
304 | /** |
305 | * gfs2_log_release - Release a given number of log blocks | |
306 | * @sdp: The GFS2 superblock | |
307 | * @blks: The number of blocks | |
308 | * | |
309 | */ | |
310 | ||
311 | void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) | |
312 | { | |
313 | ||
314 | atomic_add(blks, &sdp->sd_log_blks_free); | |
315 | trace_gfs2_log_blocks(sdp, blks); | |
316 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= | |
317 | sdp->sd_jdesc->jd_blocks); | |
318 | up_read(&sdp->sd_log_flush_lock); | |
319 | } | |
320 | ||
b3b94faa DT |
321 | /** |
322 | * gfs2_log_reserve - Make a log reservation | |
323 | * @sdp: The GFS2 superblock | |
324 | * @blks: The number of blocks to reserve | |
325 | * | |
89918647 | 326 | * Note that we never give out the last few blocks of the journal. Thats |
2332c443 | 327 | * due to the fact that there is a small number of header blocks |
b004157a SW |
328 | * associated with each log flush. The exact number can't be known until |
329 | * flush time, so we ensure that we have just enough free blocks at all | |
330 | * times to avoid running out during a log flush. | |
331 | * | |
5e687eac BM |
332 | * We no longer flush the log here, instead we wake up logd to do that |
333 | * for us. To avoid the thundering herd and to ensure that we deal fairly | |
334 | * with queued waiters, we use an exclusive wait. This means that when we | |
335 | * get woken with enough journal space to get our reservation, we need to | |
336 | * wake the next waiter on the list. | |
337 | * | |
b3b94faa DT |
338 | * Returns: errno |
339 | */ | |
340 | ||
341 | int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) | |
342 | { | |
2e60d768 | 343 | int ret = 0; |
5d054964 | 344 | unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); |
5e687eac BM |
345 | unsigned wanted = blks + reserved_blks; |
346 | DEFINE_WAIT(wait); | |
347 | int did_wait = 0; | |
348 | unsigned int free_blocks; | |
b3b94faa DT |
349 | |
350 | if (gfs2_assert_warn(sdp, blks) || | |
351 | gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) | |
352 | return -EINVAL; | |
f07b3520 | 353 | atomic_add(blks, &sdp->sd_log_blks_needed); |
5e687eac BM |
354 | retry: |
355 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
356 | if (unlikely(free_blocks <= wanted)) { | |
357 | do { | |
358 | prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait, | |
359 | TASK_UNINTERRUPTIBLE); | |
360 | wake_up(&sdp->sd_logd_waitq); | |
361 | did_wait = 1; | |
362 | if (atomic_read(&sdp->sd_log_blks_free) <= wanted) | |
363 | io_schedule(); | |
364 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
365 | } while(free_blocks <= wanted); | |
366 | finish_wait(&sdp->sd_log_waitq, &wait); | |
b3b94faa | 367 | } |
2e60d768 | 368 | atomic_inc(&sdp->sd_reserving_log); |
5e687eac | 369 | if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks, |
2e60d768 BM |
370 | free_blocks - blks) != free_blocks) { |
371 | if (atomic_dec_and_test(&sdp->sd_reserving_log)) | |
372 | wake_up(&sdp->sd_reserving_log_wait); | |
5e687eac | 373 | goto retry; |
2e60d768 | 374 | } |
f07b3520 | 375 | atomic_sub(blks, &sdp->sd_log_blks_needed); |
63997775 | 376 | trace_gfs2_log_blocks(sdp, -blks); |
5e687eac BM |
377 | |
378 | /* | |
379 | * If we waited, then so might others, wake them up _after_ we get | |
380 | * our share of the log. | |
381 | */ | |
382 | if (unlikely(did_wait)) | |
383 | wake_up(&sdp->sd_log_waitq); | |
484adff8 SW |
384 | |
385 | down_read(&sdp->sd_log_flush_lock); | |
24972557 BM |
386 | if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { |
387 | gfs2_log_release(sdp, blks); | |
2e60d768 | 388 | ret = -EROFS; |
24972557 | 389 | } |
2e60d768 BM |
390 | if (atomic_dec_and_test(&sdp->sd_reserving_log)) |
391 | wake_up(&sdp->sd_reserving_log_wait); | |
392 | return ret; | |
b3b94faa DT |
393 | } |
394 | ||
b3b94faa DT |
395 | /** |
396 | * log_distance - Compute distance between two journal blocks | |
397 | * @sdp: The GFS2 superblock | |
398 | * @newer: The most recent journal block of the pair | |
399 | * @older: The older journal block of the pair | |
400 | * | |
401 | * Compute the distance (in the journal direction) between two | |
402 | * blocks in the journal | |
403 | * | |
404 | * Returns: the distance in blocks | |
405 | */ | |
406 | ||
faa31ce8 | 407 | static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer, |
b3b94faa DT |
408 | unsigned int older) |
409 | { | |
410 | int dist; | |
411 | ||
412 | dist = newer - older; | |
413 | if (dist < 0) | |
414 | dist += sdp->sd_jdesc->jd_blocks; | |
415 | ||
416 | return dist; | |
417 | } | |
418 | ||
2332c443 RP |
419 | /** |
420 | * calc_reserved - Calculate the number of blocks to reserve when | |
421 | * refunding a transaction's unused buffers. | |
422 | * @sdp: The GFS2 superblock | |
423 | * | |
424 | * This is complex. We need to reserve room for all our currently used | |
425 | * metadata buffers (e.g. normal file I/O rewriting file time stamps) and | |
426 | * all our journaled data buffers for journaled files (e.g. files in the | |
427 | * meta_fs like rindex, or files for which chattr +j was done.) | |
428 | * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush | |
429 | * will count it as free space (sd_log_blks_free) and corruption will follow. | |
430 | * | |
431 | * We can have metadata bufs and jdata bufs in the same journal. So each | |
432 | * type gets its own log header, for which we need to reserve a block. | |
433 | * In fact, each type has the potential for needing more than one header | |
434 | * in cases where we have more buffers than will fit on a journal page. | |
435 | * Metadata journal entries take up half the space of journaled buffer entries. | |
436 | * Thus, metadata entries have buf_limit (502) and journaled buffers have | |
437 | * databuf_limit (251) before they cause a wrap around. | |
438 | * | |
439 | * Also, we need to reserve blocks for revoke journal entries and one for an | |
440 | * overall header for the lot. | |
441 | * | |
442 | * Returns: the number of blocks reserved | |
443 | */ | |
444 | static unsigned int calc_reserved(struct gfs2_sbd *sdp) | |
445 | { | |
446 | unsigned int reserved = 0; | |
022ef4fe SW |
447 | unsigned int mbuf; |
448 | unsigned int dbuf; | |
449 | struct gfs2_trans *tr = sdp->sd_log_tr; | |
2332c443 | 450 | |
022ef4fe SW |
451 | if (tr) { |
452 | mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; | |
453 | dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; | |
454 | reserved = mbuf + dbuf; | |
455 | /* Account for header blocks */ | |
456 | reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp)); | |
457 | reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp)); | |
458 | } | |
2332c443 | 459 | |
2e95e3f6 | 460 | if (sdp->sd_log_commited_revoke > 0) |
022ef4fe | 461 | reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke, |
2332c443 | 462 | sizeof(u64)); |
2332c443 RP |
463 | /* One for the overall header */ |
464 | if (reserved) | |
465 | reserved++; | |
466 | return reserved; | |
467 | } | |
468 | ||
b3b94faa DT |
469 | static unsigned int current_tail(struct gfs2_sbd *sdp) |
470 | { | |
16ca9412 | 471 | struct gfs2_trans *tr; |
b3b94faa DT |
472 | unsigned int tail; |
473 | ||
d6a079e8 | 474 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa | 475 | |
faa31ce8 | 476 | if (list_empty(&sdp->sd_ail1_list)) { |
b3b94faa | 477 | tail = sdp->sd_log_head; |
faa31ce8 | 478 | } else { |
16ca9412 BM |
479 | tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans, |
480 | tr_list); | |
481 | tail = tr->tr_first; | |
b3b94faa DT |
482 | } |
483 | ||
d6a079e8 | 484 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
485 | |
486 | return tail; | |
487 | } | |
488 | ||
2332c443 | 489 | static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) |
b3b94faa DT |
490 | { |
491 | unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); | |
492 | ||
493 | ail2_empty(sdp, new_tail); | |
494 | ||
fd041f0b | 495 | atomic_add(dist, &sdp->sd_log_blks_free); |
63997775 | 496 | trace_gfs2_log_blocks(sdp, dist); |
5e687eac BM |
497 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
498 | sdp->sd_jdesc->jd_blocks); | |
b3b94faa DT |
499 | |
500 | sdp->sd_log_tail = new_tail; | |
501 | } | |
502 | ||
b3b94faa | 503 | |
34cc1781 | 504 | static void log_flush_wait(struct gfs2_sbd *sdp) |
b3b94faa | 505 | { |
16615be1 SW |
506 | DEFINE_WAIT(wait); |
507 | ||
508 | if (atomic_read(&sdp->sd_log_in_flight)) { | |
509 | do { | |
510 | prepare_to_wait(&sdp->sd_log_flush_wait, &wait, | |
511 | TASK_UNINTERRUPTIBLE); | |
512 | if (atomic_read(&sdp->sd_log_in_flight)) | |
513 | io_schedule(); | |
514 | } while(atomic_read(&sdp->sd_log_in_flight)); | |
515 | finish_wait(&sdp->sd_log_flush_wait, &wait); | |
b3b94faa | 516 | } |
b3b94faa DT |
517 | } |
518 | ||
45138990 | 519 | static int ip_cmp(void *priv, struct list_head *a, struct list_head *b) |
4a36d08d | 520 | { |
45138990 | 521 | struct gfs2_inode *ipa, *ipb; |
4a36d08d | 522 | |
45138990 SW |
523 | ipa = list_entry(a, struct gfs2_inode, i_ordered); |
524 | ipb = list_entry(b, struct gfs2_inode, i_ordered); | |
4a36d08d | 525 | |
45138990 | 526 | if (ipa->i_no_addr < ipb->i_no_addr) |
4a36d08d | 527 | return -1; |
45138990 | 528 | if (ipa->i_no_addr > ipb->i_no_addr) |
4a36d08d BP |
529 | return 1; |
530 | return 0; | |
531 | } | |
532 | ||
d7b616e2 SW |
533 | static void gfs2_ordered_write(struct gfs2_sbd *sdp) |
534 | { | |
45138990 | 535 | struct gfs2_inode *ip; |
d7b616e2 SW |
536 | LIST_HEAD(written); |
537 | ||
45138990 SW |
538 | spin_lock(&sdp->sd_ordered_lock); |
539 | list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp); | |
d7b616e2 | 540 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
45138990 | 541 | ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); |
1f23bc78 AD |
542 | if (ip->i_inode.i_mapping->nrpages == 0) { |
543 | test_and_clear_bit(GIF_ORDERED, &ip->i_flags); | |
544 | list_del(&ip->i_ordered); | |
d7b616e2 | 545 | continue; |
1f23bc78 AD |
546 | } |
547 | list_move(&ip->i_ordered, &written); | |
45138990 SW |
548 | spin_unlock(&sdp->sd_ordered_lock); |
549 | filemap_fdatawrite(ip->i_inode.i_mapping); | |
550 | spin_lock(&sdp->sd_ordered_lock); | |
d7b616e2 SW |
551 | } |
552 | list_splice(&written, &sdp->sd_log_le_ordered); | |
45138990 | 553 | spin_unlock(&sdp->sd_ordered_lock); |
d7b616e2 SW |
554 | } |
555 | ||
556 | static void gfs2_ordered_wait(struct gfs2_sbd *sdp) | |
557 | { | |
45138990 | 558 | struct gfs2_inode *ip; |
d7b616e2 | 559 | |
45138990 | 560 | spin_lock(&sdp->sd_ordered_lock); |
d7b616e2 | 561 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
45138990 SW |
562 | ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); |
563 | list_del(&ip->i_ordered); | |
564 | WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags)); | |
565 | if (ip->i_inode.i_mapping->nrpages == 0) | |
d7b616e2 | 566 | continue; |
45138990 SW |
567 | spin_unlock(&sdp->sd_ordered_lock); |
568 | filemap_fdatawait(ip->i_inode.i_mapping); | |
569 | spin_lock(&sdp->sd_ordered_lock); | |
d7b616e2 | 570 | } |
45138990 SW |
571 | spin_unlock(&sdp->sd_ordered_lock); |
572 | } | |
573 | ||
574 | void gfs2_ordered_del_inode(struct gfs2_inode *ip) | |
575 | { | |
576 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | |
577 | ||
578 | spin_lock(&sdp->sd_ordered_lock); | |
579 | if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags)) | |
580 | list_del(&ip->i_ordered); | |
581 | spin_unlock(&sdp->sd_ordered_lock); | |
d7b616e2 SW |
582 | } |
583 | ||
5d054964 BM |
584 | void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) |
585 | { | |
586 | struct buffer_head *bh = bd->bd_bh; | |
587 | struct gfs2_glock *gl = bd->bd_gl; | |
588 | ||
5d054964 BM |
589 | bh->b_private = NULL; |
590 | bd->bd_blkno = bh->b_blocknr; | |
9290a9a7 BP |
591 | gfs2_remove_from_ail(bd); /* drops ref on bh */ |
592 | bd->bd_bh = NULL; | |
5d054964 BM |
593 | bd->bd_ops = &gfs2_revoke_lops; |
594 | sdp->sd_log_num_revoke++; | |
595 | atomic_inc(&gl->gl_revokes); | |
596 | set_bit(GLF_LFLUSH, &gl->gl_flags); | |
597 | list_add(&bd->bd_list, &sdp->sd_log_le_revoke); | |
598 | } | |
599 | ||
600 | void gfs2_write_revokes(struct gfs2_sbd *sdp) | |
601 | { | |
602 | struct gfs2_trans *tr; | |
603 | struct gfs2_bufdata *bd, *tmp; | |
604 | int have_revokes = 0; | |
605 | int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); | |
606 | ||
607 | gfs2_ail1_empty(sdp); | |
608 | spin_lock(&sdp->sd_ail_lock); | |
609 | list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { | |
610 | list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) { | |
611 | if (list_empty(&bd->bd_list)) { | |
612 | have_revokes = 1; | |
613 | goto done; | |
614 | } | |
615 | } | |
616 | } | |
617 | done: | |
618 | spin_unlock(&sdp->sd_ail_lock); | |
619 | if (have_revokes == 0) | |
620 | return; | |
621 | while (sdp->sd_log_num_revoke > max_revokes) | |
622 | max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); | |
623 | max_revokes -= sdp->sd_log_num_revoke; | |
624 | if (!sdp->sd_log_num_revoke) { | |
625 | atomic_dec(&sdp->sd_log_blks_free); | |
626 | /* If no blocks have been reserved, we need to also | |
627 | * reserve a block for the header */ | |
628 | if (!sdp->sd_log_blks_reserved) | |
629 | atomic_dec(&sdp->sd_log_blks_free); | |
630 | } | |
631 | gfs2_log_lock(sdp); | |
632 | spin_lock(&sdp->sd_ail_lock); | |
633 | list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { | |
634 | list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) { | |
635 | if (max_revokes == 0) | |
636 | goto out_of_blocks; | |
637 | if (!list_empty(&bd->bd_list)) | |
638 | continue; | |
639 | gfs2_add_revoke(sdp, bd); | |
640 | max_revokes--; | |
641 | } | |
642 | } | |
643 | out_of_blocks: | |
644 | spin_unlock(&sdp->sd_ail_lock); | |
645 | gfs2_log_unlock(sdp); | |
646 | ||
647 | if (!sdp->sd_log_num_revoke) { | |
648 | atomic_inc(&sdp->sd_log_blks_free); | |
649 | if (!sdp->sd_log_blks_reserved) | |
650 | atomic_inc(&sdp->sd_log_blks_free); | |
651 | } | |
652 | } | |
653 | ||
34cc1781 | 654 | /** |
588bff95 | 655 | * write_log_header - Write a journal log header buffer at sd_log_flush_head |
34cc1781 | 656 | * @sdp: The GFS2 superblock |
c1696fb8 | 657 | * @jd: journal descriptor of the journal to which we are writing |
588bff95 BP |
658 | * @seq: sequence number |
659 | * @tail: tail of the log | |
c1696fb8 | 660 | * @flags: log header flags GFS2_LOG_HEAD_* |
588bff95 | 661 | * @op_flags: flags to pass to the bio |
34cc1781 SW |
662 | * |
663 | * Returns: the initialized log buffer descriptor | |
664 | */ | |
665 | ||
c1696fb8 BP |
666 | void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, |
667 | u64 seq, u32 tail, u32 flags, int op_flags) | |
34cc1781 | 668 | { |
34cc1781 | 669 | struct gfs2_log_header *lh; |
c1696fb8 | 670 | u32 hash, crc; |
e8c92ed7 | 671 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
c1696fb8 BP |
672 | struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; |
673 | struct timespec64 tv; | |
674 | struct super_block *sb = sdp->sd_vfs; | |
675 | u64 addr; | |
588bff95 | 676 | |
e8c92ed7 SW |
677 | lh = page_address(page); |
678 | clear_page(lh); | |
34cc1781 | 679 | |
34cc1781 SW |
680 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
681 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); | |
682 | lh->lh_header.__pad0 = cpu_to_be64(0); | |
683 | lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); | |
684 | lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); | |
588bff95 | 685 | lh->lh_sequence = cpu_to_be64(seq); |
34cc1781 SW |
686 | lh->lh_flags = cpu_to_be32(flags); |
687 | lh->lh_tail = cpu_to_be32(tail); | |
688 | lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); | |
c1696fb8 | 689 | hash = ~crc32(~0, lh, LH_V1_SIZE); |
34cc1781 SW |
690 | lh->lh_hash = cpu_to_be32(hash); |
691 | ||
c1696fb8 BP |
692 | tv = current_kernel_time64(); |
693 | lh->lh_nsec = cpu_to_be32(tv.tv_nsec); | |
694 | lh->lh_sec = cpu_to_be64(tv.tv_sec); | |
695 | addr = gfs2_log_bmap(sdp); | |
696 | lh->lh_addr = cpu_to_be64(addr); | |
697 | lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr); | |
698 | ||
699 | /* We may only write local statfs, quota, etc., when writing to our | |
700 | own journal. The values are left 0 when recovering a journal | |
701 | different from our own. */ | |
702 | if (!(flags & GFS2_LOG_HEAD_RECOVERY)) { | |
703 | lh->lh_statfs_addr = | |
704 | cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr); | |
705 | lh->lh_quota_addr = | |
706 | cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr); | |
707 | ||
708 | spin_lock(&sdp->sd_statfs_spin); | |
709 | lh->lh_local_total = cpu_to_be64(l_sc->sc_total); | |
710 | lh->lh_local_free = cpu_to_be64(l_sc->sc_free); | |
711 | lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes); | |
712 | spin_unlock(&sdp->sd_statfs_spin); | |
713 | } | |
714 | ||
715 | BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE); | |
716 | ||
717 | crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4, | |
718 | sb->s_blocksize - LH_V1_SIZE - 4); | |
719 | lh->lh_crc = cpu_to_be32(crc); | |
720 | ||
721 | gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr); | |
588bff95 BP |
722 | gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags); |
723 | log_flush_wait(sdp); | |
724 | } | |
725 | ||
726 | /** | |
727 | * log_write_header - Get and initialize a journal header buffer | |
728 | * @sdp: The GFS2 superblock | |
c1696fb8 | 729 | * @flags: The log header flags, including log header origin |
588bff95 BP |
730 | * |
731 | * Returns: the initialized log buffer descriptor | |
732 | */ | |
733 | ||
734 | static void log_write_header(struct gfs2_sbd *sdp, u32 flags) | |
735 | { | |
736 | unsigned int tail; | |
737 | int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC; | |
738 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); | |
739 | ||
740 | gfs2_assert_withdraw(sdp, (state != SFS_FROZEN)); | |
741 | tail = current_tail(sdp); | |
742 | ||
34cc1781 SW |
743 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { |
744 | gfs2_ordered_wait(sdp); | |
745 | log_flush_wait(sdp); | |
70fd7614 | 746 | op_flags = REQ_SYNC | REQ_META | REQ_PRIO; |
34cc1781 | 747 | } |
e8c92ed7 | 748 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); |
c1696fb8 BP |
749 | gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail, |
750 | flags, op_flags); | |
34cc1781 SW |
751 | |
752 | if (sdp->sd_log_tail != tail) | |
753 | log_pull_tail(sdp, tail); | |
34cc1781 SW |
754 | } |
755 | ||
b3b94faa | 756 | /** |
b09e593d | 757 | * gfs2_log_flush - flush incore transaction(s) |
b3b94faa DT |
758 | * @sdp: the filesystem |
759 | * @gl: The glock structure to flush. If NULL, flush the whole incore log | |
805c0907 | 760 | * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags |
b3b94faa DT |
761 | * |
762 | */ | |
763 | ||
c1696fb8 | 764 | void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) |
b3b94faa | 765 | { |
16ca9412 | 766 | struct gfs2_trans *tr; |
2e60d768 | 767 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
b3b94faa | 768 | |
484adff8 | 769 | down_write(&sdp->sd_log_flush_lock); |
f55ab26a | 770 | |
2bcd610d SW |
771 | /* Log might have been flushed while we waited for the flush lock */ |
772 | if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) { | |
773 | up_write(&sdp->sd_log_flush_lock); | |
774 | return; | |
f55ab26a | 775 | } |
805c0907 | 776 | trace_gfs2_log_flush(sdp, 1, flags); |
f55ab26a | 777 | |
c1696fb8 | 778 | if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN) |
400ac52e BM |
779 | clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); |
780 | ||
b1ab1e44 | 781 | sdp->sd_log_flush_head = sdp->sd_log_head; |
16ca9412 BM |
782 | tr = sdp->sd_log_tr; |
783 | if (tr) { | |
784 | sdp->sd_log_tr = NULL; | |
785 | INIT_LIST_HEAD(&tr->tr_ail1_list); | |
786 | INIT_LIST_HEAD(&tr->tr_ail2_list); | |
b1ab1e44 | 787 | tr->tr_first = sdp->sd_log_flush_head; |
2e60d768 BM |
788 | if (unlikely (state == SFS_FROZEN)) |
789 | gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new); | |
16ca9412 | 790 | } |
b3b94faa | 791 | |
2e60d768 BM |
792 | if (unlikely(state == SFS_FROZEN)) |
793 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); | |
b3b94faa DT |
794 | gfs2_assert_withdraw(sdp, |
795 | sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke); | |
796 | ||
d7b616e2 | 797 | gfs2_ordered_write(sdp); |
d69a3c65 | 798 | lops_before_commit(sdp, tr); |
e1b1afa6 | 799 | gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0); |
d7b616e2 | 800 | |
34cc1781 | 801 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { |
428fd95d | 802 | log_flush_wait(sdp); |
c1696fb8 | 803 | log_write_header(sdp, flags); |
34cc1781 | 804 | } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ |
fd041f0b | 805 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ |
63997775 | 806 | trace_gfs2_log_blocks(sdp, -1); |
c1696fb8 | 807 | log_write_header(sdp, flags); |
2332c443 | 808 | } |
16ca9412 | 809 | lops_after_commit(sdp, tr); |
b09e593d | 810 | |
fe1a698f SW |
811 | gfs2_log_lock(sdp); |
812 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
faa31ce8 | 813 | sdp->sd_log_blks_reserved = 0; |
faa31ce8 | 814 | sdp->sd_log_commited_revoke = 0; |
b3b94faa | 815 | |
d6a079e8 | 816 | spin_lock(&sdp->sd_ail_lock); |
16ca9412 BM |
817 | if (tr && !list_empty(&tr->tr_ail1_list)) { |
818 | list_add(&tr->tr_list, &sdp->sd_ail1_list); | |
819 | tr = NULL; | |
b3b94faa | 820 | } |
d6a079e8 | 821 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa | 822 | gfs2_log_unlock(sdp); |
24972557 | 823 | |
c1696fb8 | 824 | if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) { |
24972557 BM |
825 | if (!sdp->sd_log_idle) { |
826 | for (;;) { | |
827 | gfs2_ail1_start(sdp); | |
828 | gfs2_ail1_wait(sdp); | |
829 | if (gfs2_ail1_empty(sdp)) | |
830 | break; | |
831 | } | |
832 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ | |
833 | trace_gfs2_log_blocks(sdp, -1); | |
c1696fb8 | 834 | log_write_header(sdp, flags); |
24972557 BM |
835 | sdp->sd_log_head = sdp->sd_log_flush_head; |
836 | } | |
c1696fb8 BP |
837 | if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN | |
838 | GFS2_LOG_HEAD_FLUSH_FREEZE)) | |
24972557 | 839 | gfs2_log_shutdown(sdp); |
c1696fb8 | 840 | if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE) |
2e60d768 | 841 | atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); |
24972557 BM |
842 | } |
843 | ||
805c0907 | 844 | trace_gfs2_log_flush(sdp, 0, flags); |
484adff8 | 845 | up_write(&sdp->sd_log_flush_lock); |
b3b94faa | 846 | |
16ca9412 | 847 | kfree(tr); |
b3b94faa DT |
848 | } |
849 | ||
d69a3c65 SW |
850 | /** |
851 | * gfs2_merge_trans - Merge a new transaction into a cached transaction | |
852 | * @old: Original transaction to be expanded | |
853 | * @new: New transaction to be merged | |
854 | */ | |
855 | ||
856 | static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) | |
857 | { | |
9862ca05 | 858 | WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags)); |
d69a3c65 SW |
859 | |
860 | old->tr_num_buf_new += new->tr_num_buf_new; | |
861 | old->tr_num_databuf_new += new->tr_num_databuf_new; | |
862 | old->tr_num_buf_rm += new->tr_num_buf_rm; | |
863 | old->tr_num_databuf_rm += new->tr_num_databuf_rm; | |
864 | old->tr_num_revoke += new->tr_num_revoke; | |
865 | old->tr_num_revoke_rm += new->tr_num_revoke_rm; | |
866 | ||
867 | list_splice_tail_init(&new->tr_databuf, &old->tr_databuf); | |
868 | list_splice_tail_init(&new->tr_buf, &old->tr_buf); | |
869 | } | |
870 | ||
b3b94faa DT |
871 | static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
872 | { | |
2332c443 | 873 | unsigned int reserved; |
ac39aadd | 874 | unsigned int unused; |
022ef4fe | 875 | unsigned int maxres; |
b3b94faa DT |
876 | |
877 | gfs2_log_lock(sdp); | |
878 | ||
022ef4fe SW |
879 | if (sdp->sd_log_tr) { |
880 | gfs2_merge_trans(sdp->sd_log_tr, tr); | |
881 | } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { | |
9862ca05 | 882 | gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); |
022ef4fe | 883 | sdp->sd_log_tr = tr; |
9862ca05 | 884 | set_bit(TR_ATTACHED, &tr->tr_flags); |
022ef4fe SW |
885 | } |
886 | ||
b3b94faa | 887 | sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; |
2332c443 | 888 | reserved = calc_reserved(sdp); |
022ef4fe SW |
889 | maxres = sdp->sd_log_blks_reserved + tr->tr_reserved; |
890 | gfs2_assert_withdraw(sdp, maxres >= reserved); | |
891 | unused = maxres - reserved; | |
ac39aadd | 892 | atomic_add(unused, &sdp->sd_log_blks_free); |
63997775 | 893 | trace_gfs2_log_blocks(sdp, unused); |
fd041f0b | 894 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
2332c443 | 895 | sdp->sd_jdesc->jd_blocks); |
b3b94faa DT |
896 | sdp->sd_log_blks_reserved = reserved; |
897 | ||
898 | gfs2_log_unlock(sdp); | |
899 | } | |
900 | ||
901 | /** | |
902 | * gfs2_log_commit - Commit a transaction to the log | |
903 | * @sdp: the filesystem | |
904 | * @tr: the transaction | |
905 | * | |
5e687eac BM |
906 | * We wake up gfs2_logd if the number of pinned blocks exceed thresh1 |
907 | * or the total number of used blocks (pinned blocks plus AIL blocks) | |
908 | * is greater than thresh2. | |
909 | * | |
910 | * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of | |
911 | * journal size. | |
912 | * | |
b3b94faa DT |
913 | * Returns: errno |
914 | */ | |
915 | ||
916 | void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |
917 | { | |
918 | log_refund(sdp, tr); | |
b3b94faa | 919 | |
5e687eac BM |
920 | if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) || |
921 | ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) > | |
922 | atomic_read(&sdp->sd_log_thresh2))) | |
923 | wake_up(&sdp->sd_logd_waitq); | |
b3b94faa DT |
924 | } |
925 | ||
926 | /** | |
927 | * gfs2_log_shutdown - write a shutdown header into a journal | |
928 | * @sdp: the filesystem | |
929 | * | |
930 | */ | |
931 | ||
932 | void gfs2_log_shutdown(struct gfs2_sbd *sdp) | |
933 | { | |
b3b94faa | 934 | gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved); |
b3b94faa | 935 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); |
b3b94faa DT |
936 | gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); |
937 | ||
938 | sdp->sd_log_flush_head = sdp->sd_log_head; | |
b3b94faa | 939 | |
805c0907 | 940 | log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN); |
b3b94faa | 941 | |
a74604be SW |
942 | gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); |
943 | gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list)); | |
b3b94faa DT |
944 | |
945 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
b3b94faa | 946 | sdp->sd_log_tail = sdp->sd_log_head; |
a25311c8 SW |
947 | } |
948 | ||
5e687eac BM |
949 | static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) |
950 | { | |
f07b3520 BP |
951 | return (atomic_read(&sdp->sd_log_pinned) + |
952 | atomic_read(&sdp->sd_log_blks_needed) >= | |
953 | atomic_read(&sdp->sd_log_thresh1)); | |
5e687eac BM |
954 | } |
955 | ||
956 | static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) | |
957 | { | |
958 | unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); | |
b066a4ee AD |
959 | |
960 | if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags)) | |
961 | return 1; | |
962 | ||
f07b3520 BP |
963 | return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >= |
964 | atomic_read(&sdp->sd_log_thresh2); | |
5e687eac | 965 | } |
ec69b188 SW |
966 | |
967 | /** | |
968 | * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks | |
969 | * @sdp: Pointer to GFS2 superblock | |
970 | * | |
971 | * Also, periodically check to make sure that we're using the most recent | |
972 | * journal index. | |
973 | */ | |
974 | ||
975 | int gfs2_logd(void *data) | |
976 | { | |
977 | struct gfs2_sbd *sdp = data; | |
5e687eac BM |
978 | unsigned long t = 1; |
979 | DEFINE_WAIT(wait); | |
b63f5e84 | 980 | bool did_flush; |
ec69b188 SW |
981 | |
982 | while (!kthread_should_stop()) { | |
ec69b188 | 983 | |
942b0cdd BP |
984 | /* Check for errors writing to the journal */ |
985 | if (sdp->sd_log_error) { | |
986 | gfs2_lm_withdraw(sdp, | |
987 | "GFS2: fsid=%s: error %d: " | |
988 | "withdrawing the file system to " | |
989 | "prevent further damage.\n", | |
990 | sdp->sd_fsname, sdp->sd_log_error); | |
991 | } | |
992 | ||
b63f5e84 | 993 | did_flush = false; |
5e687eac | 994 | if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { |
4667a0ec | 995 | gfs2_ail1_empty(sdp); |
805c0907 BP |
996 | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | |
997 | GFS2_LFC_LOGD_JFLUSH_REQD); | |
b63f5e84 | 998 | did_flush = true; |
5e687eac | 999 | } |
ec69b188 | 1000 | |
5e687eac BM |
1001 | if (gfs2_ail_flush_reqd(sdp)) { |
1002 | gfs2_ail1_start(sdp); | |
26b06a69 | 1003 | gfs2_ail1_wait(sdp); |
4667a0ec | 1004 | gfs2_ail1_empty(sdp); |
805c0907 BP |
1005 | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | |
1006 | GFS2_LFC_LOGD_AIL_FLUSH_REQD); | |
b63f5e84 | 1007 | did_flush = true; |
ec69b188 SW |
1008 | } |
1009 | ||
b63f5e84 | 1010 | if (!gfs2_ail_flush_reqd(sdp) || did_flush) |
26b06a69 SW |
1011 | wake_up(&sdp->sd_log_waitq); |
1012 | ||
ec69b188 | 1013 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; |
a0acae0e TH |
1014 | |
1015 | try_to_freeze(); | |
5e687eac BM |
1016 | |
1017 | do { | |
1018 | prepare_to_wait(&sdp->sd_logd_waitq, &wait, | |
5f487490 | 1019 | TASK_INTERRUPTIBLE); |
5e687eac BM |
1020 | if (!gfs2_ail_flush_reqd(sdp) && |
1021 | !gfs2_jrnl_flush_reqd(sdp) && | |
1022 | !kthread_should_stop()) | |
1023 | t = schedule_timeout(t); | |
1024 | } while(t && !gfs2_ail_flush_reqd(sdp) && | |
1025 | !gfs2_jrnl_flush_reqd(sdp) && | |
1026 | !kthread_should_stop()); | |
1027 | finish_wait(&sdp->sd_logd_waitq, &wait); | |
ec69b188 SW |
1028 | } |
1029 | ||
1030 | return 0; | |
1031 | } | |
1032 |