Merge tag 'spi-v3.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[linux-2.6-block.git] / drivers / md / bcache / journal.c
CommitLineData
cafe5635
KO
1/*
2 * bcache journalling code, for btree insertions
3 *
4 * Copyright 2012 Google, Inc.
5 */
6
7#include "bcache.h"
8#include "btree.h"
9#include "debug.h"
cafe5635 10
c37511b8
KO
11#include <trace/events/bcache.h>
12
cafe5635
KO
13/*
14 * Journal replay/recovery:
15 *
16 * This code is all driven from run_cache_set(); we first read the journal
17 * entries, do some other stuff, then we mark all the keys in the journal
18 * entries (same as garbage collection would), then we replay them - reinserting
19 * them into the cache in precisely the same order as they appear in the
20 * journal.
21 *
22 * We only journal keys that go in leaf nodes, which simplifies things quite a
23 * bit.
24 */
25
26static void journal_read_endio(struct bio *bio, int error)
27{
28 struct closure *cl = bio->bi_private;
29 closure_put(cl);
30}
31
32static int journal_read_bucket(struct cache *ca, struct list_head *list,
c18536a7 33 unsigned bucket_index)
cafe5635
KO
34{
35 struct journal_device *ja = &ca->journal;
36 struct bio *bio = &ja->bio;
37
38 struct journal_replay *i;
39 struct jset *j, *data = ca->set->journal.w[0].data;
c18536a7 40 struct closure cl;
cafe5635
KO
41 unsigned len, left, offset = 0;
42 int ret = 0;
43 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
44
c18536a7
KO
45 closure_init_stack(&cl);
46
b3fa7e77 47 pr_debug("reading %u", bucket_index);
cafe5635
KO
48
49 while (offset < ca->sb.bucket_size) {
50reread: left = ca->sb.bucket_size - offset;
b3fa7e77 51 len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
cafe5635
KO
52
53 bio_reset(bio);
4f024f37 54 bio->bi_iter.bi_sector = bucket + offset;
cafe5635
KO
55 bio->bi_bdev = ca->bdev;
56 bio->bi_rw = READ;
4f024f37 57 bio->bi_iter.bi_size = len << 9;
cafe5635
KO
58
59 bio->bi_end_io = journal_read_endio;
c18536a7 60 bio->bi_private = &cl;
169ef1cf 61 bch_bio_map(bio, data);
cafe5635 62
c18536a7
KO
63 closure_bio_submit(bio, &cl, ca);
64 closure_sync(&cl);
cafe5635
KO
65
66 /* This function could be simpler now since we no longer write
67 * journal entries that overlap bucket boundaries; this means
68 * the start of a bucket will always have a valid journal entry
69 * if it has any journal entries at all.
70 */
71
72 j = data;
73 while (len) {
74 struct list_head *where;
75 size_t blocks, bytes = set_bytes(j);
76
b3fa7e77
KO
77 if (j->magic != jset_magic(&ca->sb)) {
78 pr_debug("%u: bad magic", bucket_index);
cafe5635 79 return ret;
b3fa7e77 80 }
cafe5635 81
b3fa7e77
KO
82 if (bytes > left << 9 ||
83 bytes > PAGE_SIZE << JSET_BITS) {
84 pr_info("%u: too big, %zu bytes, offset %u",
85 bucket_index, bytes, offset);
cafe5635 86 return ret;
b3fa7e77 87 }
cafe5635
KO
88
89 if (bytes > len << 9)
90 goto reread;
91
b3fa7e77
KO
92 if (j->csum != csum_set(j)) {
93 pr_info("%u: bad csum, %zu bytes, offset %u",
94 bucket_index, bytes, offset);
cafe5635 95 return ret;
b3fa7e77 96 }
cafe5635 97
ee811287 98 blocks = set_blocks(j, block_bytes(ca->set));
cafe5635
KO
99
100 while (!list_empty(list)) {
101 i = list_first_entry(list,
102 struct journal_replay, list);
103 if (i->j.seq >= j->last_seq)
104 break;
105 list_del(&i->list);
106 kfree(i);
107 }
108
109 list_for_each_entry_reverse(i, list, list) {
110 if (j->seq == i->j.seq)
111 goto next_set;
112
113 if (j->seq < i->j.last_seq)
114 goto next_set;
115
116 if (j->seq > i->j.seq) {
117 where = &i->list;
118 goto add;
119 }
120 }
121
122 where = list;
123add:
124 i = kmalloc(offsetof(struct journal_replay, j) +
125 bytes, GFP_KERNEL);
126 if (!i)
127 return -ENOMEM;
128 memcpy(&i->j, j, bytes);
129 list_add(&i->list, where);
130 ret = 1;
131
132 ja->seq[bucket_index] = j->seq;
133next_set:
134 offset += blocks * ca->sb.block_size;
135 len -= blocks * ca->sb.block_size;
136 j = ((void *) j) + blocks * block_bytes(ca);
137 }
138 }
139
140 return ret;
141}
142
c18536a7 143int bch_journal_read(struct cache_set *c, struct list_head *list)
cafe5635
KO
144{
145#define read_bucket(b) \
146 ({ \
c18536a7 147 int ret = journal_read_bucket(ca, list, b); \
cafe5635
KO
148 __set_bit(b, bitmap); \
149 if (ret < 0) \
150 return ret; \
151 ret; \
152 })
153
154 struct cache *ca;
155 unsigned iter;
156
157 for_each_cache(ca, c, iter) {
158 struct journal_device *ja = &ca->journal;
159 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
160 unsigned i, l, r, m;
161 uint64_t seq;
162
163 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
164 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
165
c426c4fd
KO
166 /*
167 * Read journal buckets ordered by golden ratio hash to quickly
cafe5635
KO
168 * find a sequence of buckets with valid journal entries
169 */
170 for (i = 0; i < ca->sb.njournal_buckets; i++) {
171 l = (i * 2654435769U) % ca->sb.njournal_buckets;
172
173 if (test_bit(l, bitmap))
174 break;
175
176 if (read_bucket(l))
177 goto bsearch;
178 }
179
c426c4fd
KO
180 /*
181 * If that fails, check all the buckets we haven't checked
cafe5635
KO
182 * already
183 */
184 pr_debug("falling back to linear search");
185
c426c4fd
KO
186 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
187 l < ca->sb.njournal_buckets;
188 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
cafe5635
KO
189 if (read_bucket(l))
190 goto bsearch;
c426c4fd
KO
191
192 if (list_empty(list))
193 continue;
cafe5635
KO
194bsearch:
195 /* Binary search */
196 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
197 pr_debug("starting binary search, l %u r %u", l, r);
198
199 while (l + 1 < r) {
faa56736
KO
200 seq = list_entry(list->prev, struct journal_replay,
201 list)->j.seq;
202
cafe5635 203 m = (l + r) >> 1;
faa56736 204 read_bucket(m);
cafe5635 205
faa56736
KO
206 if (seq != list_entry(list->prev, struct journal_replay,
207 list)->j.seq)
cafe5635
KO
208 l = m;
209 else
210 r = m;
211 }
212
c426c4fd
KO
213 /*
214 * Read buckets in reverse order until we stop finding more
cafe5635
KO
215 * journal entries
216 */
c426c4fd
KO
217 pr_debug("finishing up: m %u njournal_buckets %u",
218 m, ca->sb.njournal_buckets);
cafe5635
KO
219 l = m;
220
221 while (1) {
222 if (!l--)
223 l = ca->sb.njournal_buckets - 1;
224
225 if (l == m)
226 break;
227
228 if (test_bit(l, bitmap))
229 continue;
230
231 if (!read_bucket(l))
232 break;
233 }
234
235 seq = 0;
236
237 for (i = 0; i < ca->sb.njournal_buckets; i++)
238 if (ja->seq[i] > seq) {
239 seq = ja->seq[i];
27201cfd
KO
240 /*
241 * When journal_reclaim() goes to allocate for
242 * the first time, it'll use the bucket after
243 * ja->cur_idx
244 */
245 ja->cur_idx = i;
246 ja->last_idx = ja->discard_idx = (i + 1) %
247 ca->sb.njournal_buckets;
cafe5635
KO
248
249 }
250 }
251
c426c4fd
KO
252 if (!list_empty(list))
253 c->journal.seq = list_entry(list->prev,
254 struct journal_replay,
255 list)->j.seq;
cafe5635
KO
256
257 return 0;
258#undef read_bucket
259}
260
261void bch_journal_mark(struct cache_set *c, struct list_head *list)
262{
263 atomic_t p = { 0 };
264 struct bkey *k;
265 struct journal_replay *i;
266 struct journal *j = &c->journal;
267 uint64_t last = j->seq;
268
269 /*
270 * journal.pin should never fill up - we never write a journal
271 * entry when it would fill up. But if for some reason it does, we
272 * iterate over the list in reverse order so that we can just skip that
273 * refcount instead of bugging.
274 */
275
276 list_for_each_entry_reverse(i, list, list) {
277 BUG_ON(last < i->j.seq);
278 i->pin = NULL;
279
280 while (last-- != i->j.seq)
281 if (fifo_free(&j->pin) > 1) {
282 fifo_push_front(&j->pin, p);
283 atomic_set(&fifo_front(&j->pin), 0);
284 }
285
286 if (fifo_free(&j->pin) > 1) {
287 fifo_push_front(&j->pin, p);
288 i->pin = &fifo_front(&j->pin);
289 atomic_set(i->pin, 1);
290 }
291
292 for (k = i->j.start;
fafff81c 293 k < bset_bkey_last(&i->j);
cafe5635
KO
294 k = bkey_next(k)) {
295 unsigned j;
296
487dded8
KO
297 for (j = 0; j < KEY_PTRS(k); j++)
298 if (ptr_available(c, k, j))
299 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
65ddf45a 300
487dded8 301 bch_initial_mark_key(c, 0, k);
cafe5635
KO
302 }
303 }
304}
305
c18536a7 306int bch_journal_replay(struct cache_set *s, struct list_head *list)
cafe5635
KO
307{
308 int ret = 0, keys = 0, entries = 0;
309 struct bkey *k;
310 struct journal_replay *i =
311 list_entry(list->prev, struct journal_replay, list);
312
313 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
0b93207a
KO
314 struct keylist keylist;
315
cafe5635
KO
316 list_for_each_entry(i, list, list) {
317 BUG_ON(i->pin && atomic_read(i->pin) != 1);
318
77c320eb
KO
319 cache_set_err_on(n != i->j.seq, s,
320"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
321 n, i->j.seq - 1, start, end);
cafe5635
KO
322
323 for (k = i->j.start;
fafff81c 324 k < bset_bkey_last(&i->j);
cafe5635 325 k = bkey_next(k)) {
c37511b8
KO
326 trace_bcache_journal_replay_key(k);
327
c13f3af9 328 bch_keylist_init_single(&keylist, k);
cafe5635 329
cc7b8819 330 ret = bch_btree_insert(s, &keylist, i->pin, NULL);
cafe5635
KO
331 if (ret)
332 goto err;
333
0b93207a 334 BUG_ON(!bch_keylist_empty(&keylist));
cafe5635
KO
335 keys++;
336
337 cond_resched();
338 }
339
340 if (i->pin)
341 atomic_dec(i->pin);
342 n = i->j.seq + 1;
343 entries++;
344 }
345
346 pr_info("journal replay done, %i keys in %i entries, seq %llu",
347 keys, entries, end);
b54d6934 348err:
cafe5635
KO
349 while (!list_empty(list)) {
350 i = list_first_entry(list, struct journal_replay, list);
351 list_del(&i->list);
352 kfree(i);
353 }
b54d6934 354
cafe5635
KO
355 return ret;
356}
357
358/* Journalling */
359
360static void btree_flush_write(struct cache_set *c)
361{
362 /*
363 * Try to find the btree node with that references the oldest journal
364 * entry, best is our current candidate and is locked if non NULL:
365 */
a34a8bfd
KO
366 struct btree *b, *best;
367 unsigned i;
368retry:
369 best = NULL;
370
371 for_each_cached_btree(b, c, i)
372 if (btree_current_write(b)->journal) {
373 if (!best)
374 best = b;
375 else if (journal_pin_cmp(c,
c18536a7
KO
376 btree_current_write(best)->journal,
377 btree_current_write(b)->journal)) {
a34a8bfd
KO
378 best = b;
379 }
380 }
cafe5635 381
a34a8bfd
KO
382 b = best;
383 if (b) {
2a285686 384 mutex_lock(&b->write_lock);
a34a8bfd 385 if (!btree_current_write(b)->journal) {
2a285686 386 mutex_unlock(&b->write_lock);
a34a8bfd
KO
387 /* We raced */
388 goto retry;
cafe5635
KO
389 }
390
2a285686
KO
391 __bch_btree_node_write(b, NULL);
392 mutex_unlock(&b->write_lock);
cafe5635 393 }
cafe5635
KO
394}
395
396#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
397
398static void journal_discard_endio(struct bio *bio, int error)
399{
400 struct journal_device *ja =
401 container_of(bio, struct journal_device, discard_bio);
402 struct cache *ca = container_of(ja, struct cache, journal);
403
404 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
405
406 closure_wake_up(&ca->set->journal.wait);
407 closure_put(&ca->set->cl);
408}
409
410static void journal_discard_work(struct work_struct *work)
411{
412 struct journal_device *ja =
413 container_of(work, struct journal_device, discard_work);
414
415 submit_bio(0, &ja->discard_bio);
416}
417
418static void do_journal_discard(struct cache *ca)
419{
420 struct journal_device *ja = &ca->journal;
421 struct bio *bio = &ja->discard_bio;
422
423 if (!ca->discard) {
424 ja->discard_idx = ja->last_idx;
425 return;
426 }
427
6d9d21e3 428 switch (atomic_read(&ja->discard_in_flight)) {
cafe5635
KO
429 case DISCARD_IN_FLIGHT:
430 return;
431
432 case DISCARD_DONE:
433 ja->discard_idx = (ja->discard_idx + 1) %
434 ca->sb.njournal_buckets;
435
436 atomic_set(&ja->discard_in_flight, DISCARD_READY);
437 /* fallthrough */
438
439 case DISCARD_READY:
440 if (ja->discard_idx == ja->last_idx)
441 return;
442
443 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
444
445 bio_init(bio);
4f024f37 446 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
b1a67b0f 447 ca->sb.d[ja->discard_idx]);
cafe5635
KO
448 bio->bi_bdev = ca->bdev;
449 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
450 bio->bi_max_vecs = 1;
451 bio->bi_io_vec = bio->bi_inline_vecs;
4f024f37 452 bio->bi_iter.bi_size = bucket_bytes(ca);
cafe5635
KO
453 bio->bi_end_io = journal_discard_endio;
454
455 closure_get(&ca->set->cl);
456 INIT_WORK(&ja->discard_work, journal_discard_work);
457 schedule_work(&ja->discard_work);
458 }
459}
460
461static void journal_reclaim(struct cache_set *c)
462{
463 struct bkey *k = &c->journal.key;
464 struct cache *ca;
465 uint64_t last_seq;
466 unsigned iter, n = 0;
467 atomic_t p;
468
469 while (!atomic_read(&fifo_front(&c->journal.pin)))
470 fifo_pop(&c->journal.pin, p);
471
472 last_seq = last_seq(&c->journal);
473
474 /* Update last_idx */
475
476 for_each_cache(ca, c, iter) {
477 struct journal_device *ja = &ca->journal;
478
479 while (ja->last_idx != ja->cur_idx &&
480 ja->seq[ja->last_idx] < last_seq)
481 ja->last_idx = (ja->last_idx + 1) %
482 ca->sb.njournal_buckets;
483 }
484
485 for_each_cache(ca, c, iter)
486 do_journal_discard(ca);
487
488 if (c->journal.blocks_free)
a34a8bfd 489 goto out;
cafe5635
KO
490
491 /*
492 * Allocate:
493 * XXX: Sort by free journal space
494 */
495
496 for_each_cache(ca, c, iter) {
497 struct journal_device *ja = &ca->journal;
498 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
499
500 /* No space available on this device */
501 if (next == ja->discard_idx)
502 continue;
503
504 ja->cur_idx = next;
505 k->ptr[n++] = PTR(0,
506 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
507 ca->sb.nr_this_dev);
508 }
509
510 bkey_init(k);
511 SET_KEY_PTRS(k, n);
512
513 if (n)
514 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
a34a8bfd 515out:
cafe5635
KO
516 if (!journal_full(&c->journal))
517 __closure_wake_up(&c->journal.wait);
518}
519
520void bch_journal_next(struct journal *j)
521{
522 atomic_t p = { 1 };
523
524 j->cur = (j->cur == j->w)
525 ? &j->w[1]
526 : &j->w[0];
527
528 /*
529 * The fifo_push() needs to happen at the same time as j->seq is
530 * incremented for last_seq() to be calculated correctly
531 */
532 BUG_ON(!fifo_push(&j->pin, p));
533 atomic_set(&fifo_back(&j->pin), 1);
534
535 j->cur->data->seq = ++j->seq;
dabb4433 536 j->cur->dirty = false;
cafe5635
KO
537 j->cur->need_write = false;
538 j->cur->data->keys = 0;
539
540 if (fifo_full(&j->pin))
541 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
542}
543
544static void journal_write_endio(struct bio *bio, int error)
545{
546 struct journal_write *w = bio->bi_private;
547
548 cache_set_err_on(error, w->c, "journal io error");
7857d5d4 549 closure_put(&w->c->journal.io);
cafe5635
KO
550}
551
552static void journal_write(struct closure *);
553
554static void journal_write_done(struct closure *cl)
555{
7857d5d4 556 struct journal *j = container_of(cl, struct journal, io);
cafe5635
KO
557 struct journal_write *w = (j->cur == j->w)
558 ? &j->w[1]
559 : &j->w[0];
560
561 __closure_wake_up(&w->wait);
7857d5d4 562 continue_at_nobarrier(cl, journal_write, system_wq);
cafe5635
KO
563}
564
cb7a583e
KO
565static void journal_write_unlock(struct closure *cl)
566{
567 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
568
569 c->journal.io_in_flight = 0;
570 spin_unlock(&c->journal.lock);
571}
572
cafe5635 573static void journal_write_unlocked(struct closure *cl)
c19ed23a 574 __releases(c->journal.lock)
cafe5635 575{
7857d5d4 576 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
cafe5635
KO
577 struct cache *ca;
578 struct journal_write *w = c->journal.cur;
579 struct bkey *k = &c->journal.key;
ee811287
KO
580 unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
581 c->sb.block_size;
cafe5635
KO
582
583 struct bio *bio;
584 struct bio_list list;
585 bio_list_init(&list);
586
587 if (!w->need_write) {
cb7a583e 588 closure_return_with_destructor(cl, journal_write_unlock);
cafe5635
KO
589 } else if (journal_full(&c->journal)) {
590 journal_reclaim(c);
591 spin_unlock(&c->journal.lock);
592
593 btree_flush_write(c);
594 continue_at(cl, journal_write, system_wq);
595 }
596
ee811287 597 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
cafe5635
KO
598
599 w->data->btree_level = c->root->level;
600
601 bkey_copy(&w->data->btree_root, &c->root->key);
602 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
603
604 for_each_cache(ca, c, i)
605 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
606
81ab4190 607 w->data->magic = jset_magic(&c->sb);
cafe5635
KO
608 w->data->version = BCACHE_JSET_VERSION;
609 w->data->last_seq = last_seq(&c->journal);
610 w->data->csum = csum_set(w->data);
611
612 for (i = 0; i < KEY_PTRS(k); i++) {
613 ca = PTR_CACHE(c, k, i);
614 bio = &ca->journal.bio;
615
616 atomic_long_add(sectors, &ca->meta_sectors_written);
617
618 bio_reset(bio);
4f024f37 619 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
cafe5635 620 bio->bi_bdev = ca->bdev;
e49c7c37 621 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
4f024f37 622 bio->bi_iter.bi_size = sectors << 9;
cafe5635
KO
623
624 bio->bi_end_io = journal_write_endio;
625 bio->bi_private = w;
169ef1cf 626 bch_bio_map(bio, w->data);
cafe5635
KO
627
628 trace_bcache_journal_write(bio);
629 bio_list_add(&list, bio);
630
631 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
632
633 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
634 }
635
636 atomic_dec_bug(&fifo_back(&c->journal.pin));
637 bch_journal_next(&c->journal);
638 journal_reclaim(c);
639
640 spin_unlock(&c->journal.lock);
641
642 while ((bio = bio_list_pop(&list)))
643 closure_bio_submit(bio, cl, c->cache[0]);
644
645 continue_at(cl, journal_write_done, NULL);
646}
647
648static void journal_write(struct closure *cl)
649{
7857d5d4 650 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
cafe5635
KO
651
652 spin_lock(&c->journal.lock);
653 journal_write_unlocked(cl);
654}
655
a34a8bfd 656static void journal_try_write(struct cache_set *c)
c19ed23a 657 __releases(c->journal.lock)
cafe5635 658{
7857d5d4
KO
659 struct closure *cl = &c->journal.io;
660 struct journal_write *w = c->journal.cur;
661
662 w->need_write = true;
cafe5635 663
cb7a583e
KO
664 if (!c->journal.io_in_flight) {
665 c->journal.io_in_flight = 1;
666 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
667 } else {
a34a8bfd 668 spin_unlock(&c->journal.lock);
cb7a583e 669 }
cafe5635
KO
670}
671
a34a8bfd
KO
672static struct journal_write *journal_wait_for_write(struct cache_set *c,
673 unsigned nkeys)
cafe5635 674{
a34a8bfd
KO
675 size_t sectors;
676 struct closure cl;
5775e213 677 bool wait = false;
cafe5635 678
a34a8bfd
KO
679 closure_init_stack(&cl);
680
681 spin_lock(&c->journal.lock);
682
683 while (1) {
684 struct journal_write *w = c->journal.cur;
685
686 sectors = __set_blocks(w->data, w->data->keys + nkeys,
ee811287 687 block_bytes(c)) * c->sb.block_size;
a34a8bfd
KO
688
689 if (sectors <= min_t(size_t,
690 c->journal.blocks_free * c->sb.block_size,
691 PAGE_SECTORS << JSET_BITS))
692 return w;
693
5775e213
KO
694 if (wait)
695 closure_wait(&c->journal.wait, &cl);
696
a34a8bfd 697 if (!journal_full(&c->journal)) {
5775e213
KO
698 if (wait)
699 trace_bcache_journal_entry_full(c);
a34a8bfd
KO
700
701 /*
702 * XXX: If we were inserting so many keys that they
703 * won't fit in an _empty_ journal write, we'll
704 * deadlock. For now, handle this in
705 * bch_keylist_realloc() - but something to think about.
706 */
707 BUG_ON(!w->data->keys);
708
a34a8bfd
KO
709 journal_try_write(c); /* unlocks */
710 } else {
5775e213
KO
711 if (wait)
712 trace_bcache_journal_full(c);
a34a8bfd 713
a34a8bfd
KO
714 journal_reclaim(c);
715 spin_unlock(&c->journal.lock);
cafe5635 716
a34a8bfd
KO
717 btree_flush_write(c);
718 }
cafe5635 719
a34a8bfd
KO
720 closure_sync(&cl);
721 spin_lock(&c->journal.lock);
5775e213 722 wait = true;
cafe5635
KO
723 }
724}
725
7857d5d4
KO
726static void journal_write_work(struct work_struct *work)
727{
728 struct cache_set *c = container_of(to_delayed_work(work),
729 struct cache_set,
730 journal.work);
731 spin_lock(&c->journal.lock);
dabb4433
KO
732 if (c->journal.cur->dirty)
733 journal_try_write(c);
734 else
735 spin_unlock(&c->journal.lock);
7857d5d4
KO
736}
737
cafe5635
KO
738/*
739 * Entry point to the journalling code - bio_insert() and btree_invalidate()
740 * pass bch_journal() a list of keys to be journalled, and then
741 * bch_journal() hands those same keys off to btree_insert_async()
742 */
743
a34a8bfd
KO
744atomic_t *bch_journal(struct cache_set *c,
745 struct keylist *keys,
746 struct closure *parent)
cafe5635 747{
cafe5635 748 struct journal_write *w;
a34a8bfd 749 atomic_t *ret;
cafe5635 750
a34a8bfd
KO
751 if (!CACHE_SYNC(&c->sb))
752 return NULL;
cafe5635 753
a34a8bfd 754 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
c37511b8 755
fafff81c 756 memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
a34a8bfd 757 w->data->keys += bch_keylist_nkeys(keys);
cafe5635 758
a34a8bfd
KO
759 ret = &fifo_back(&c->journal.pin);
760 atomic_inc(ret);
cafe5635 761
a34a8bfd
KO
762 if (parent) {
763 closure_wait(&w->wait, parent);
7857d5d4 764 journal_try_write(c);
dabb4433
KO
765 } else if (!w->dirty) {
766 w->dirty = true;
7857d5d4
KO
767 schedule_delayed_work(&c->journal.work,
768 msecs_to_jiffies(c->journal_delay_ms));
769 spin_unlock(&c->journal.lock);
770 } else {
771 spin_unlock(&c->journal.lock);
cafe5635 772 }
a34a8bfd
KO
773
774
775 return ret;
776}
777
778void bch_journal_meta(struct cache_set *c, struct closure *cl)
779{
780 struct keylist keys;
781 atomic_t *ref;
782
783 bch_keylist_init(&keys);
784
785 ref = bch_journal(c, &keys, cl);
786 if (ref)
787 atomic_dec_bug(ref);
cafe5635
KO
788}
789
790void bch_journal_free(struct cache_set *c)
791{
792 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
793 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
794 free_fifo(&c->journal.pin);
795}
796
797int bch_journal_alloc(struct cache_set *c)
798{
799 struct journal *j = &c->journal;
800
cafe5635 801 spin_lock_init(&j->lock);
7857d5d4 802 INIT_DELAYED_WORK(&j->work, journal_write_work);
cafe5635
KO
803
804 c->journal_delay_ms = 100;
805
806 j->w[0].c = c;
807 j->w[1].c = c;
808
809 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
810 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
811 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
812 return -ENOMEM;
813
814 return 0;
815}