2 * Moving/copying garbage collector
4 * Copyright 2012 Google, Inc.
12 #include <trace/events/bcache.h>
20 static bool moving_pred(struct keybuf *buf, struct bkey *k)
22 struct cache_set *c = container_of(buf, struct cache_set,
26 for (i = 0; i < KEY_PTRS(k); i++) {
27 struct cache *ca = PTR_CACHE(c, k, i);
28 struct bucket *g = PTR_BUCKET(c, k, i);
30 if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
37 /* Moving GC - IO loop */
39 static void moving_io_destructor(struct closure *cl)
41 struct moving_io *io = container_of(cl, struct moving_io, s.cl);
45 static void write_moving_finish(struct closure *cl)
47 struct moving_io *io = container_of(cl, struct moving_io, s.cl);
48 struct bio *bio = &io->bio.bio;
52 bio_for_each_segment_all(bv, bio, i)
53 __free_page(bv->bv_page);
55 if (io->s.op.insert_collision)
56 trace_bcache_gc_copy_collision(&io->w->key);
58 bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
60 up(&io->s.op.c->moving_in_flight);
62 closure_return_with_destructor(cl, moving_io_destructor);
65 static void read_moving_endio(struct bio *bio, int error)
67 struct moving_io *io = container_of(bio->bi_private,
68 struct moving_io, s.cl);
73 bch_bbio_endio(io->s.op.c, bio, error, "reading data to move");
76 static void moving_init(struct moving_io *io)
78 struct bio *bio = &io->bio.bio;
82 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
84 bio->bi_size = KEY_SIZE(&io->w->key) << 9;
85 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
87 bio->bi_private = &io->s.cl;
88 bio->bi_io_vec = bio->bi_inline_vecs;
89 bch_bio_map(bio, NULL);
92 static void write_moving(struct closure *cl)
94 struct search *s = container_of(cl, struct search, cl);
95 struct moving_io *io = container_of(s, struct moving_io, s);
100 io->bio.bio.bi_sector = KEY_START(&io->w->key);
102 s->op.write_prio = 1;
103 s->op.cache_bio = &io->bio.bio;
105 s->writeback = KEY_DIRTY(&io->w->key);
106 s->op.csum = KEY_CSUM(&io->w->key);
108 s->op.type = BTREE_REPLACE;
109 bkey_copy(&s->op.replace, &io->w->key);
111 closure_init(&s->op.cl, cl);
112 bch_data_insert(&s->op.cl);
115 continue_at(cl, write_moving_finish, system_wq);
118 static void read_moving_submit(struct closure *cl)
120 struct search *s = container_of(cl, struct search, cl);
121 struct moving_io *io = container_of(s, struct moving_io, s);
122 struct bio *bio = &io->bio.bio;
124 bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
126 continue_at(cl, write_moving, system_wq);
129 static void read_moving(struct cache_set *c)
131 struct keybuf_key *w;
132 struct moving_io *io;
136 closure_init_stack(&cl);
138 /* XXX: if we error, background writeback could stall indefinitely */
140 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
141 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
142 &MAX_KEY, moving_pred);
146 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
147 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
154 io->s.op.inode = KEY_INODE(&w->key);
161 bio->bi_end_io = read_moving_endio;
163 if (bio_alloc_pages(bio, GFP_KERNEL))
166 trace_bcache_gc_copy(&w->key);
168 down(&c->moving_in_flight);
169 closure_call(&io->s.cl, read_moving_submit, NULL, &cl);
173 err: if (!IS_ERR_OR_NULL(w->private))
176 bch_keybuf_del(&c->moving_gc_keys, w);
182 static bool bucket_cmp(struct bucket *l, struct bucket *r)
184 return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
187 static unsigned bucket_heap_top(struct cache *ca)
189 return GC_SECTORS_USED(heap_peek(&ca->heap));
192 void bch_moving_gc(struct cache_set *c)
198 if (!c->copy_gc_enabled)
201 mutex_lock(&c->bucket_lock);
203 for_each_cache(ca, c, i) {
204 unsigned sectors_to_move = 0;
205 unsigned reserve_sectors = ca->sb.bucket_size *
206 min(fifo_used(&ca->free), ca->free.size / 2);
210 for_each_bucket(b, ca) {
211 if (!GC_SECTORS_USED(b))
214 if (!heap_full(&ca->heap)) {
215 sectors_to_move += GC_SECTORS_USED(b);
216 heap_add(&ca->heap, b, bucket_cmp);
217 } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
218 sectors_to_move -= bucket_heap_top(ca);
219 sectors_to_move += GC_SECTORS_USED(b);
221 ca->heap.data[0] = b;
222 heap_sift(&ca->heap, 0, bucket_cmp);
226 while (sectors_to_move > reserve_sectors) {
227 heap_pop(&ca->heap, b, bucket_cmp);
228 sectors_to_move -= GC_SECTORS_USED(b);
231 ca->gc_move_threshold = bucket_heap_top(ca);
233 pr_debug("threshold %u", ca->gc_move_threshold);
236 mutex_unlock(&c->bucket_lock);
238 c->moving_gc_keys.last_scanned = ZERO_KEY;
243 void bch_moving_init_cache_set(struct cache_set *c)
245 bch_keybuf_init(&c->moving_gc_keys);
246 sema_init(&c->moving_in_flight, 64);