bcache: Track dirty data by stripe
[linux-2.6-block.git] / drivers / md / bcache / request.c
CommitLineData
cafe5635
KO
1/*
2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "btree.h"
11#include "debug.h"
12#include "request.h"
279afbad 13#include "writeback.h"
cafe5635
KO
14
15#include <linux/cgroup.h>
16#include <linux/module.h>
17#include <linux/hash.h>
18#include <linux/random.h>
19#include "blk-cgroup.h"
20
21#include <trace/events/bcache.h>
22
23#define CUTOFF_CACHE_ADD 95
24#define CUTOFF_CACHE_READA 90
25#define CUTOFF_WRITEBACK 50
26#define CUTOFF_WRITEBACK_SYNC 75
27
28struct kmem_cache *bch_search_cache;
29
30static void check_should_skip(struct cached_dev *, struct search *);
31
32/* Cgroup interface */
33
34#ifdef CONFIG_CGROUP_BCACHE
35static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
36
37static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
38{
39 struct cgroup_subsys_state *css;
40 return cgroup &&
41 (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
42 ? container_of(css, struct bch_cgroup, css)
43 : &bcache_default_cgroup;
44}
45
46struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
47{
48 struct cgroup_subsys_state *css = bio->bi_css
49 ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
50 : task_subsys_state(current, bcache_subsys_id);
51
52 return css
53 ? container_of(css, struct bch_cgroup, css)
54 : &bcache_default_cgroup;
55}
56
57static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
58 struct file *file,
59 char __user *buf, size_t nbytes, loff_t *ppos)
60{
61 char tmp[1024];
169ef1cf
KO
62 int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
63 cgroup_to_bcache(cgrp)->cache_mode + 1);
cafe5635
KO
64
65 if (len < 0)
66 return len;
67
68 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
69}
70
71static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
72 const char *buf)
73{
169ef1cf 74 int v = bch_read_string_list(buf, bch_cache_modes);
cafe5635
KO
75 if (v < 0)
76 return v;
77
78 cgroup_to_bcache(cgrp)->cache_mode = v - 1;
79 return 0;
80}
81
82static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
83{
84 return cgroup_to_bcache(cgrp)->verify;
85}
86
87static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
88{
89 cgroup_to_bcache(cgrp)->verify = val;
90 return 0;
91}
92
93static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
94{
95 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
96 return atomic_read(&bcachecg->stats.cache_hits);
97}
98
99static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
100{
101 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
102 return atomic_read(&bcachecg->stats.cache_misses);
103}
104
105static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
106 struct cftype *cft)
107{
108 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
109 return atomic_read(&bcachecg->stats.cache_bypass_hits);
110}
111
112static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
113 struct cftype *cft)
114{
115 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
116 return atomic_read(&bcachecg->stats.cache_bypass_misses);
117}
118
119static struct cftype bch_files[] = {
120 {
121 .name = "cache_mode",
122 .read = cache_mode_read,
123 .write_string = cache_mode_write,
124 },
125 {
126 .name = "verify",
127 .read_u64 = bch_verify_read,
128 .write_u64 = bch_verify_write,
129 },
130 {
131 .name = "cache_hits",
132 .read_u64 = bch_cache_hits_read,
133 },
134 {
135 .name = "cache_misses",
136 .read_u64 = bch_cache_misses_read,
137 },
138 {
139 .name = "cache_bypass_hits",
140 .read_u64 = bch_cache_bypass_hits_read,
141 },
142 {
143 .name = "cache_bypass_misses",
144 .read_u64 = bch_cache_bypass_misses_read,
145 },
146 { } /* terminate */
147};
148
149static void init_bch_cgroup(struct bch_cgroup *cg)
150{
151 cg->cache_mode = -1;
152}
153
154static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
155{
156 struct bch_cgroup *cg;
157
158 cg = kzalloc(sizeof(*cg), GFP_KERNEL);
159 if (!cg)
160 return ERR_PTR(-ENOMEM);
161 init_bch_cgroup(cg);
162 return &cg->css;
163}
164
165static void bcachecg_destroy(struct cgroup *cgroup)
166{
167 struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
168 free_css_id(&bcache_subsys, &cg->css);
169 kfree(cg);
170}
171
172struct cgroup_subsys bcache_subsys = {
173 .create = bcachecg_create,
174 .destroy = bcachecg_destroy,
175 .subsys_id = bcache_subsys_id,
176 .name = "bcache",
177 .module = THIS_MODULE,
178};
179EXPORT_SYMBOL_GPL(bcache_subsys);
180#endif
181
182static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
183{
184#ifdef CONFIG_CGROUP_BCACHE
185 int r = bch_bio_to_cgroup(bio)->cache_mode;
186 if (r >= 0)
187 return r;
188#endif
189 return BDEV_CACHE_MODE(&dc->sb);
190}
191
192static bool verify(struct cached_dev *dc, struct bio *bio)
193{
194#ifdef CONFIG_CGROUP_BCACHE
195 if (bch_bio_to_cgroup(bio)->verify)
196 return true;
197#endif
198 return dc->verify;
199}
200
201static void bio_csum(struct bio *bio, struct bkey *k)
202{
203 struct bio_vec *bv;
204 uint64_t csum = 0;
205 int i;
206
207 bio_for_each_segment(bv, bio, i) {
208 void *d = kmap(bv->bv_page) + bv->bv_offset;
169ef1cf 209 csum = bch_crc64_update(csum, d, bv->bv_len);
cafe5635
KO
210 kunmap(bv->bv_page);
211 }
212
213 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
214}
215
216/* Insert data into cache */
217
218static void bio_invalidate(struct closure *cl)
219{
220 struct btree_op *op = container_of(cl, struct btree_op, cl);
221 struct bio *bio = op->cache_bio;
222
223 pr_debug("invalidating %i sectors from %llu",
224 bio_sectors(bio), (uint64_t) bio->bi_sector);
225
226 while (bio_sectors(bio)) {
227 unsigned len = min(bio_sectors(bio), 1U << 14);
228
229 if (bch_keylist_realloc(&op->keys, 0, op->c))
230 goto out;
231
232 bio->bi_sector += len;
233 bio->bi_size -= len << 9;
234
235 bch_keylist_add(&op->keys,
236 &KEY(op->inode, bio->bi_sector, len));
237 }
238
239 op->insert_data_done = true;
240 bio_put(bio);
241out:
242 continue_at(cl, bch_journal, bcache_wq);
243}
244
245struct open_bucket {
246 struct list_head list;
247 struct task_struct *last;
248 unsigned sectors_free;
249 BKEY_PADDED(key);
250};
251
252void bch_open_buckets_free(struct cache_set *c)
253{
254 struct open_bucket *b;
255
256 while (!list_empty(&c->data_buckets)) {
257 b = list_first_entry(&c->data_buckets,
258 struct open_bucket, list);
259 list_del(&b->list);
260 kfree(b);
261 }
262}
263
264int bch_open_buckets_alloc(struct cache_set *c)
265{
266 int i;
267
268 spin_lock_init(&c->data_bucket_lock);
269
270 for (i = 0; i < 6; i++) {
271 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
272 if (!b)
273 return -ENOMEM;
274
275 list_add(&b->list, &c->data_buckets);
276 }
277
278 return 0;
279}
280
281/*
282 * We keep multiple buckets open for writes, and try to segregate different
283 * write streams for better cache utilization: first we look for a bucket where
284 * the last write to it was sequential with the current write, and failing that
285 * we look for a bucket that was last used by the same task.
286 *
287 * The ideas is if you've got multiple tasks pulling data into the cache at the
288 * same time, you'll get better cache utilization if you try to segregate their
289 * data and preserve locality.
290 *
291 * For example, say you've starting Firefox at the same time you're copying a
292 * bunch of files. Firefox will likely end up being fairly hot and stay in the
293 * cache awhile, but the data you copied might not be; if you wrote all that
294 * data to the same buckets it'd get invalidated at the same time.
295 *
296 * Both of those tasks will be doing fairly random IO so we can't rely on
297 * detecting sequential IO to segregate their data, but going off of the task
298 * should be a sane heuristic.
299 */
300static struct open_bucket *pick_data_bucket(struct cache_set *c,
301 const struct bkey *search,
302 struct task_struct *task,
303 struct bkey *alloc)
304{
305 struct open_bucket *ret, *ret_task = NULL;
306
307 list_for_each_entry_reverse(ret, &c->data_buckets, list)
308 if (!bkey_cmp(&ret->key, search))
309 goto found;
310 else if (ret->last == task)
311 ret_task = ret;
312
313 ret = ret_task ?: list_first_entry(&c->data_buckets,
314 struct open_bucket, list);
315found:
316 if (!ret->sectors_free && KEY_PTRS(alloc)) {
317 ret->sectors_free = c->sb.bucket_size;
318 bkey_copy(&ret->key, alloc);
319 bkey_init(alloc);
320 }
321
322 if (!ret->sectors_free)
323 ret = NULL;
324
325 return ret;
326}
327
328/*
329 * Allocates some space in the cache to write to, and k to point to the newly
330 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
331 * end of the newly allocated space).
332 *
333 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
334 * sectors were actually allocated.
335 *
336 * If s->writeback is true, will not fail.
337 */
338static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
339 struct search *s)
340{
341 struct cache_set *c = s->op.c;
342 struct open_bucket *b;
343 BKEY_PADDED(key) alloc;
344 struct closure cl, *w = NULL;
345 unsigned i;
346
347 if (s->writeback) {
348 closure_init_stack(&cl);
349 w = &cl;
350 }
351
352 /*
353 * We might have to allocate a new bucket, which we can't do with a
354 * spinlock held. So if we have to allocate, we drop the lock, allocate
355 * and then retry. KEY_PTRS() indicates whether alloc points to
356 * allocated bucket(s).
357 */
358
359 bkey_init(&alloc.key);
360 spin_lock(&c->data_bucket_lock);
361
362 while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
363 unsigned watermark = s->op.write_prio
364 ? WATERMARK_MOVINGGC
365 : WATERMARK_NONE;
366
367 spin_unlock(&c->data_bucket_lock);
368
369 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
370 return false;
371
372 spin_lock(&c->data_bucket_lock);
373 }
374
375 /*
376 * If we had to allocate, we might race and not need to allocate the
377 * second time we call find_data_bucket(). If we allocated a bucket but
378 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
379 */
380 if (KEY_PTRS(&alloc.key))
381 __bkey_put(c, &alloc.key);
382
383 for (i = 0; i < KEY_PTRS(&b->key); i++)
384 EBUG_ON(ptr_stale(c, &b->key, i));
385
386 /* Set up the pointer to the space we're allocating: */
387
388 for (i = 0; i < KEY_PTRS(&b->key); i++)
389 k->ptr[i] = b->key.ptr[i];
390
391 sectors = min(sectors, b->sectors_free);
392
393 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
394 SET_KEY_SIZE(k, sectors);
395 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
396
397 /*
398 * Move b to the end of the lru, and keep track of what this bucket was
399 * last used for:
400 */
401 list_move_tail(&b->list, &c->data_buckets);
402 bkey_copy_key(&b->key, k);
403 b->last = s->task;
404
405 b->sectors_free -= sectors;
406
407 for (i = 0; i < KEY_PTRS(&b->key); i++) {
408 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
409
410 atomic_long_add(sectors,
411 &PTR_CACHE(c, &b->key, i)->sectors_written);
412 }
413
414 if (b->sectors_free < c->sb.block_size)
415 b->sectors_free = 0;
416
417 /*
418 * k takes refcounts on the buckets it points to until it's inserted
419 * into the btree, but if we're done with this bucket we just transfer
420 * get_data_bucket()'s refcount.
421 */
422 if (b->sectors_free)
423 for (i = 0; i < KEY_PTRS(&b->key); i++)
424 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
425
426 spin_unlock(&c->data_bucket_lock);
427 return true;
428}
429
430static void bch_insert_data_error(struct closure *cl)
431{
432 struct btree_op *op = container_of(cl, struct btree_op, cl);
433
434 /*
435 * Our data write just errored, which means we've got a bunch of keys to
436 * insert that point to data that wasn't succesfully written.
437 *
438 * We don't have to insert those keys but we still have to invalidate
439 * that region of the cache - so, if we just strip off all the pointers
440 * from the keys we'll accomplish just that.
441 */
442
443 struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
444
445 while (src != op->keys.top) {
446 struct bkey *n = bkey_next(src);
447
448 SET_KEY_PTRS(src, 0);
449 bkey_copy(dst, src);
450
451 dst = bkey_next(dst);
452 src = n;
453 }
454
455 op->keys.top = dst;
456
457 bch_journal(cl);
458}
459
460static void bch_insert_data_endio(struct bio *bio, int error)
461{
462 struct closure *cl = bio->bi_private;
463 struct btree_op *op = container_of(cl, struct btree_op, cl);
464 struct search *s = container_of(op, struct search, op);
465
466 if (error) {
467 /* TODO: We could try to recover from this. */
468 if (s->writeback)
469 s->error = error;
470 else if (s->write)
471 set_closure_fn(cl, bch_insert_data_error, bcache_wq);
472 else
473 set_closure_fn(cl, NULL, NULL);
474 }
475
476 bch_bbio_endio(op->c, bio, error, "writing data to cache");
477}
478
479static void bch_insert_data_loop(struct closure *cl)
480{
481 struct btree_op *op = container_of(cl, struct btree_op, cl);
482 struct search *s = container_of(op, struct search, op);
483 struct bio *bio = op->cache_bio, *n;
484
485 if (op->skip)
486 return bio_invalidate(cl);
487
488 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
489 set_gc_sectors(op->c);
490 bch_queue_gc(op->c);
491 }
492
493 do {
494 unsigned i;
495 struct bkey *k;
496 struct bio_set *split = s->d
497 ? s->d->bio_split : op->c->bio_split;
498
499 /* 1 for the device pointer and 1 for the chksum */
500 if (bch_keylist_realloc(&op->keys,
501 1 + (op->csum ? 1 : 0),
502 op->c))
503 continue_at(cl, bch_journal, bcache_wq);
504
505 k = op->keys.top;
506 bkey_init(k);
507 SET_KEY_INODE(k, op->inode);
508 SET_KEY_OFFSET(k, bio->bi_sector);
509
510 if (!bch_alloc_sectors(k, bio_sectors(bio), s))
511 goto err;
512
513 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
514 if (!n) {
515 __bkey_put(op->c, k);
516 continue_at(cl, bch_insert_data_loop, bcache_wq);
517 }
518
519 n->bi_end_io = bch_insert_data_endio;
520 n->bi_private = cl;
521
522 if (s->writeback) {
523 SET_KEY_DIRTY(k, true);
524
525 for (i = 0; i < KEY_PTRS(k); i++)
526 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
527 GC_MARK_DIRTY);
528 }
529
530 SET_KEY_CSUM(k, op->csum);
531 if (KEY_CSUM(k))
532 bio_csum(n, k);
533
c37511b8 534 trace_bcache_cache_insert(k);
cafe5635
KO
535 bch_keylist_push(&op->keys);
536
cafe5635
KO
537 n->bi_rw |= REQ_WRITE;
538 bch_submit_bbio(n, op->c, k, 0);
539 } while (n != bio);
540
541 op->insert_data_done = true;
542 continue_at(cl, bch_journal, bcache_wq);
543err:
544 /* bch_alloc_sectors() blocks if s->writeback = true */
545 BUG_ON(s->writeback);
546
547 /*
548 * But if it's not a writeback write we'd rather just bail out if
549 * there aren't any buckets ready to write to - it might take awhile and
550 * we might be starving btree writes for gc or something.
551 */
552
553 if (s->write) {
554 /*
555 * Writethrough write: We can't complete the write until we've
556 * updated the index. But we don't want to delay the write while
557 * we wait for buckets to be freed up, so just invalidate the
558 * rest of the write.
559 */
560 op->skip = true;
561 return bio_invalidate(cl);
562 } else {
563 /*
564 * From a cache miss, we can just insert the keys for the data
565 * we have written or bail out if we didn't do anything.
566 */
567 op->insert_data_done = true;
568 bio_put(bio);
569
570 if (!bch_keylist_empty(&op->keys))
571 continue_at(cl, bch_journal, bcache_wq);
572 else
573 closure_return(cl);
574 }
575}
576
577/**
578 * bch_insert_data - stick some data in the cache
579 *
580 * This is the starting point for any data to end up in a cache device; it could
581 * be from a normal write, or a writeback write, or a write to a flash only
582 * volume - it's also used by the moving garbage collector to compact data in
583 * mostly empty buckets.
584 *
585 * It first writes the data to the cache, creating a list of keys to be inserted
586 * (if the data had to be fragmented there will be multiple keys); after the
587 * data is written it calls bch_journal, and after the keys have been added to
588 * the next journal write they're inserted into the btree.
589 *
590 * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
591 * and op->inode is used for the key inode.
592 *
593 * If op->skip is true, instead of inserting the data it invalidates the region
594 * of the cache represented by op->cache_bio and op->inode.
595 */
596void bch_insert_data(struct closure *cl)
597{
598 struct btree_op *op = container_of(cl, struct btree_op, cl);
599
600 bch_keylist_init(&op->keys);
601 bio_get(op->cache_bio);
602 bch_insert_data_loop(cl);
603}
604
605void bch_btree_insert_async(struct closure *cl)
606{
607 struct btree_op *op = container_of(cl, struct btree_op, cl);
608 struct search *s = container_of(op, struct search, op);
609
610 if (bch_btree_insert(op, op->c)) {
611 s->error = -ENOMEM;
612 op->insert_data_done = true;
613 }
614
615 if (op->insert_data_done) {
616 bch_keylist_free(&op->keys);
617 closure_return(cl);
618 } else
619 continue_at(cl, bch_insert_data_loop, bcache_wq);
620}
621
622/* Common code for the make_request functions */
623
624static void request_endio(struct bio *bio, int error)
625{
626 struct closure *cl = bio->bi_private;
627
628 if (error) {
629 struct search *s = container_of(cl, struct search, cl);
630 s->error = error;
631 /* Only cache read errors are recoverable */
632 s->recoverable = false;
633 }
634
635 bio_put(bio);
636 closure_put(cl);
637}
638
639void bch_cache_read_endio(struct bio *bio, int error)
640{
641 struct bbio *b = container_of(bio, struct bbio, bio);
642 struct closure *cl = bio->bi_private;
643 struct search *s = container_of(cl, struct search, cl);
644
645 /*
646 * If the bucket was reused while our bio was in flight, we might have
647 * read the wrong data. Set s->error but not error so it doesn't get
648 * counted against the cache device, but we'll still reread the data
649 * from the backing device.
650 */
651
652 if (error)
653 s->error = error;
654 else if (ptr_stale(s->op.c, &b->key, 0)) {
655 atomic_long_inc(&s->op.c->cache_read_races);
656 s->error = -EINTR;
657 }
658
659 bch_bbio_endio(s->op.c, bio, error, "reading from cache");
660}
661
662static void bio_complete(struct search *s)
663{
664 if (s->orig_bio) {
665 int cpu, rw = bio_data_dir(s->orig_bio);
666 unsigned long duration = jiffies - s->start_time;
667
668 cpu = part_stat_lock();
669 part_round_stats(cpu, &s->d->disk->part0);
670 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
671 part_stat_unlock();
672
673 trace_bcache_request_end(s, s->orig_bio);
674 bio_endio(s->orig_bio, s->error);
675 s->orig_bio = NULL;
676 }
677}
678
679static void do_bio_hook(struct search *s)
680{
681 struct bio *bio = &s->bio.bio;
682 memcpy(bio, s->orig_bio, sizeof(struct bio));
683
684 bio->bi_end_io = request_endio;
685 bio->bi_private = &s->cl;
686 atomic_set(&bio->bi_cnt, 3);
687}
688
689static void search_free(struct closure *cl)
690{
691 struct search *s = container_of(cl, struct search, cl);
692 bio_complete(s);
693
694 if (s->op.cache_bio)
695 bio_put(s->op.cache_bio);
696
697 if (s->unaligned_bvec)
698 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
699
700 closure_debug_destroy(cl);
701 mempool_free(s, s->d->c->search);
702}
703
704static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
705{
706 struct bio_vec *bv;
707 struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
708 memset(s, 0, offsetof(struct search, op.keys));
709
710 __closure_init(&s->cl, NULL);
711
712 s->op.inode = d->id;
713 s->op.c = d->c;
714 s->d = d;
715 s->op.lock = -1;
716 s->task = current;
717 s->orig_bio = bio;
718 s->write = (bio->bi_rw & REQ_WRITE) != 0;
719 s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0;
720 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
721 s->recoverable = 1;
722 s->start_time = jiffies;
723 do_bio_hook(s);
724
725 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
726 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
727 memcpy(bv, bio_iovec(bio),
728 sizeof(struct bio_vec) * bio_segments(bio));
729
730 s->bio.bio.bi_io_vec = bv;
731 s->unaligned_bvec = 1;
732 }
733
734 return s;
735}
736
737static void btree_read_async(struct closure *cl)
738{
739 struct btree_op *op = container_of(cl, struct btree_op, cl);
740
741 int ret = btree_root(search_recurse, op->c, op);
742
743 if (ret == -EAGAIN)
744 continue_at(cl, btree_read_async, bcache_wq);
745
746 closure_return(cl);
747}
748
749/* Cached devices */
750
751static void cached_dev_bio_complete(struct closure *cl)
752{
753 struct search *s = container_of(cl, struct search, cl);
754 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
755
756 search_free(cl);
757 cached_dev_put(dc);
758}
759
760/* Process reads */
761
762static void cached_dev_read_complete(struct closure *cl)
763{
764 struct search *s = container_of(cl, struct search, cl);
765
766 if (s->op.insert_collision)
767 bch_mark_cache_miss_collision(s);
768
769 if (s->op.cache_bio) {
770 int i;
771 struct bio_vec *bv;
772
773 __bio_for_each_segment(bv, s->op.cache_bio, i, 0)
774 __free_page(bv->bv_page);
775 }
776
777 cached_dev_bio_complete(cl);
778}
779
780static void request_read_error(struct closure *cl)
781{
782 struct search *s = container_of(cl, struct search, cl);
783 struct bio_vec *bv;
784 int i;
785
786 if (s->recoverable) {
c37511b8
KO
787 /* Retry from the backing device: */
788 trace_bcache_read_retry(s->orig_bio);
cafe5635
KO
789
790 s->error = 0;
791 bv = s->bio.bio.bi_io_vec;
792 do_bio_hook(s);
793 s->bio.bio.bi_io_vec = bv;
794
795 if (!s->unaligned_bvec)
796 bio_for_each_segment(bv, s->orig_bio, i)
797 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
798 else
799 memcpy(s->bio.bio.bi_io_vec,
800 bio_iovec(s->orig_bio),
801 sizeof(struct bio_vec) *
802 bio_segments(s->orig_bio));
803
804 /* XXX: invalidate cache */
805
cafe5635
KO
806 closure_bio_submit(&s->bio.bio, &s->cl, s->d);
807 }
808
809 continue_at(cl, cached_dev_read_complete, NULL);
810}
811
812static void request_read_done(struct closure *cl)
813{
814 struct search *s = container_of(cl, struct search, cl);
815 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
816
817 /*
818 * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
819 * contains data ready to be inserted into the cache.
820 *
821 * First, we copy the data we just read from cache_bio's bounce buffers
822 * to the buffers the original bio pointed to:
823 */
824
825 if (s->op.cache_bio) {
826 struct bio_vec *src, *dst;
827 unsigned src_offset, dst_offset, bytes;
828 void *dst_ptr;
829
830 bio_reset(s->op.cache_bio);
831 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
832 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
833 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
169ef1cf 834 bch_bio_map(s->op.cache_bio, NULL);
cafe5635
KO
835
836 src = bio_iovec(s->op.cache_bio);
837 dst = bio_iovec(s->cache_miss);
838 src_offset = src->bv_offset;
839 dst_offset = dst->bv_offset;
840 dst_ptr = kmap(dst->bv_page);
841
842 while (1) {
843 if (dst_offset == dst->bv_offset + dst->bv_len) {
844 kunmap(dst->bv_page);
845 dst++;
846 if (dst == bio_iovec_idx(s->cache_miss,
847 s->cache_miss->bi_vcnt))
848 break;
849
850 dst_offset = dst->bv_offset;
851 dst_ptr = kmap(dst->bv_page);
852 }
853
854 if (src_offset == src->bv_offset + src->bv_len) {
855 src++;
856 if (src == bio_iovec_idx(s->op.cache_bio,
857 s->op.cache_bio->bi_vcnt))
858 BUG();
859
860 src_offset = src->bv_offset;
861 }
862
863 bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
864 src->bv_offset + src->bv_len - src_offset);
865
866 memcpy(dst_ptr + dst_offset,
867 page_address(src->bv_page) + src_offset,
868 bytes);
869
870 src_offset += bytes;
871 dst_offset += bytes;
872 }
873
874 bio_put(s->cache_miss);
875 s->cache_miss = NULL;
876 }
877
878 if (verify(dc, &s->bio.bio) && s->recoverable)
879 bch_data_verify(s);
880
881 bio_complete(s);
882
883 if (s->op.cache_bio &&
884 !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
885 s->op.type = BTREE_REPLACE;
886 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
887 }
888
889 continue_at(cl, cached_dev_read_complete, NULL);
890}
891
892static void request_read_done_bh(struct closure *cl)
893{
894 struct search *s = container_of(cl, struct search, cl);
895 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
896
897 bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
c37511b8 898 trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
cafe5635
KO
899
900 if (s->error)
901 continue_at_nobarrier(cl, request_read_error, bcache_wq);
902 else if (s->op.cache_bio || verify(dc, &s->bio.bio))
903 continue_at_nobarrier(cl, request_read_done, bcache_wq);
904 else
905 continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
906}
907
908static int cached_dev_cache_miss(struct btree *b, struct search *s,
909 struct bio *bio, unsigned sectors)
910{
911 int ret = 0;
912 unsigned reada;
913 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
914 struct bio *miss;
915
916 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
917 if (!miss)
918 return -EAGAIN;
919
920 if (miss == bio)
921 s->op.lookup_done = true;
922
923 miss->bi_end_io = request_endio;
924 miss->bi_private = &s->cl;
925
926 if (s->cache_miss || s->op.skip)
927 goto out_submit;
928
929 if (miss != bio ||
930 (bio->bi_rw & REQ_RAHEAD) ||
931 (bio->bi_rw & REQ_META) ||
932 s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
933 reada = 0;
934 else {
935 reada = min(dc->readahead >> 9,
936 sectors - bio_sectors(miss));
937
938 if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev))
939 reada = bdev_sectors(miss->bi_bdev) - bio_end(miss);
940 }
941
942 s->cache_bio_sectors = bio_sectors(miss) + reada;
943 s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
944 DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
945 dc->disk.bio_split);
946
947 if (!s->op.cache_bio)
948 goto out_submit;
949
950 s->op.cache_bio->bi_sector = miss->bi_sector;
951 s->op.cache_bio->bi_bdev = miss->bi_bdev;
952 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
953
954 s->op.cache_bio->bi_end_io = request_endio;
955 s->op.cache_bio->bi_private = &s->cl;
956
957 /* btree_search_recurse()'s btree iterator is no good anymore */
958 ret = -EINTR;
959 if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
960 goto out_put;
961
169ef1cf
KO
962 bch_bio_map(s->op.cache_bio, NULL);
963 if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
cafe5635
KO
964 goto out_put;
965
966 s->cache_miss = miss;
967 bio_get(s->op.cache_bio);
968
cafe5635
KO
969 closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
970
971 return ret;
972out_put:
973 bio_put(s->op.cache_bio);
974 s->op.cache_bio = NULL;
975out_submit:
976 closure_bio_submit(miss, &s->cl, s->d);
977 return ret;
978}
979
980static void request_read(struct cached_dev *dc, struct search *s)
981{
982 struct closure *cl = &s->cl;
983
984 check_should_skip(dc, s);
985 closure_call(&s->op.cl, btree_read_async, NULL, cl);
986
987 continue_at(cl, request_read_done_bh, NULL);
988}
989
990/* Process writes */
991
992static void cached_dev_write_complete(struct closure *cl)
993{
994 struct search *s = container_of(cl, struct search, cl);
995 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
996
997 up_read_non_owner(&dc->writeback_lock);
998 cached_dev_bio_complete(cl);
999}
1000
1001static bool should_writeback(struct cached_dev *dc, struct bio *bio)
1002{
1003 unsigned threshold = (bio->bi_rw & REQ_SYNC)
1004 ? CUTOFF_WRITEBACK_SYNC
1005 : CUTOFF_WRITEBACK;
1006
1007 return !atomic_read(&dc->disk.detaching) &&
1008 cache_mode(dc, bio) == CACHE_MODE_WRITEBACK &&
1009 dc->disk.c->gc_stats.in_use < threshold;
1010}
1011
1012static void request_write(struct cached_dev *dc, struct search *s)
1013{
1014 struct closure *cl = &s->cl;
1015 struct bio *bio = &s->bio.bio;
1016 struct bkey start, end;
1017 start = KEY(dc->disk.id, bio->bi_sector, 0);
1018 end = KEY(dc->disk.id, bio_end(bio), 0);
1019
1020 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
1021
1022 check_should_skip(dc, s);
1023 down_read_non_owner(&dc->writeback_lock);
1024
1025 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
1026 s->op.skip = false;
1027 s->writeback = true;
1028 }
1029
1030 if (bio->bi_rw & REQ_DISCARD)
1031 goto skip;
1032
1033 if (s->op.skip)
1034 goto skip;
1035
1036 if (should_writeback(dc, s->orig_bio))
1037 s->writeback = true;
1038
c37511b8
KO
1039 trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
1040
cafe5635
KO
1041 if (!s->writeback) {
1042 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1043 dc->disk.bio_split);
1044
cafe5635
KO
1045 closure_bio_submit(bio, cl, s->d);
1046 } else {
1047 s->op.cache_bio = bio;
279afbad 1048 bch_writeback_add(dc);
cafe5635
KO
1049 }
1050out:
1051 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1052 continue_at(cl, cached_dev_write_complete, NULL);
1053skip:
1054 s->op.skip = true;
1055 s->op.cache_bio = s->orig_bio;
1056 bio_get(s->op.cache_bio);
cafe5635
KO
1057
1058 if ((bio->bi_rw & REQ_DISCARD) &&
1059 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1060 goto out;
1061
1062 closure_bio_submit(bio, cl, s->d);
1063 goto out;
1064}
1065
1066static void request_nodata(struct cached_dev *dc, struct search *s)
1067{
1068 struct closure *cl = &s->cl;
1069 struct bio *bio = &s->bio.bio;
1070
1071 if (bio->bi_rw & REQ_DISCARD) {
1072 request_write(dc, s);
1073 return;
1074 }
1075
1076 if (s->op.flush_journal)
1077 bch_journal_meta(s->op.c, cl);
1078
1079 closure_bio_submit(bio, cl, s->d);
1080
1081 continue_at(cl, cached_dev_bio_complete, NULL);
1082}
1083
1084/* Cached devices - read & write stuff */
1085
c37511b8 1086unsigned bch_get_congested(struct cache_set *c)
cafe5635
KO
1087{
1088 int i;
c37511b8 1089 long rand;
cafe5635
KO
1090
1091 if (!c->congested_read_threshold_us &&
1092 !c->congested_write_threshold_us)
1093 return 0;
1094
1095 i = (local_clock_us() - c->congested_last_us) / 1024;
1096 if (i < 0)
1097 return 0;
1098
1099 i += atomic_read(&c->congested);
1100 if (i >= 0)
1101 return 0;
1102
1103 i += CONGESTED_MAX;
1104
c37511b8
KO
1105 if (i > 0)
1106 i = fract_exp_two(i, 6);
1107
1108 rand = get_random_int();
1109 i -= bitmap_weight(&rand, BITS_PER_LONG);
1110
1111 return i > 0 ? i : 1;
cafe5635
KO
1112}
1113
1114static void add_sequential(struct task_struct *t)
1115{
1116 ewma_add(t->sequential_io_avg,
1117 t->sequential_io, 8, 0);
1118
1119 t->sequential_io = 0;
1120}
1121
b1a67b0f 1122static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
cafe5635 1123{
b1a67b0f
KO
1124 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
1125}
cafe5635 1126
b1a67b0f
KO
1127static void check_should_skip(struct cached_dev *dc, struct search *s)
1128{
cafe5635
KO
1129 struct cache_set *c = s->op.c;
1130 struct bio *bio = &s->bio.bio;
cafe5635 1131 unsigned mode = cache_mode(dc, bio);
c37511b8 1132 unsigned sectors, congested = bch_get_congested(c);
cafe5635
KO
1133
1134 if (atomic_read(&dc->disk.detaching) ||
1135 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
1136 (bio->bi_rw & REQ_DISCARD))
1137 goto skip;
1138
1139 if (mode == CACHE_MODE_NONE ||
1140 (mode == CACHE_MODE_WRITEAROUND &&
1141 (bio->bi_rw & REQ_WRITE)))
1142 goto skip;
1143
1144 if (bio->bi_sector & (c->sb.block_size - 1) ||
1145 bio_sectors(bio) & (c->sb.block_size - 1)) {
1146 pr_debug("skipping unaligned io");
1147 goto skip;
1148 }
1149
c37511b8
KO
1150 if (!congested && !dc->sequential_cutoff)
1151 goto rescale;
cafe5635 1152
c37511b8
KO
1153 if (!congested &&
1154 mode == CACHE_MODE_WRITEBACK &&
1155 (bio->bi_rw & REQ_WRITE) &&
1156 (bio->bi_rw & REQ_SYNC))
1157 goto rescale;
cafe5635
KO
1158
1159 if (dc->sequential_merge) {
1160 struct io *i;
1161
1162 spin_lock(&dc->io_lock);
1163
b1a67b0f 1164 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
cafe5635
KO
1165 if (i->last == bio->bi_sector &&
1166 time_before(jiffies, i->jiffies))
1167 goto found;
1168
1169 i = list_first_entry(&dc->io_lru, struct io, lru);
1170
1171 add_sequential(s->task);
1172 i->sequential = 0;
1173found:
1174 if (i->sequential + bio->bi_size > i->sequential)
1175 i->sequential += bio->bi_size;
1176
1177 i->last = bio_end(bio);
1178 i->jiffies = jiffies + msecs_to_jiffies(5000);
1179 s->task->sequential_io = i->sequential;
1180
1181 hlist_del(&i->hash);
b1a67b0f 1182 hlist_add_head(&i->hash, iohash(dc, i->last));
cafe5635
KO
1183 list_move_tail(&i->lru, &dc->io_lru);
1184
1185 spin_unlock(&dc->io_lock);
1186 } else {
1187 s->task->sequential_io = bio->bi_size;
1188
1189 add_sequential(s->task);
1190 }
1191
c37511b8
KO
1192 sectors = max(s->task->sequential_io,
1193 s->task->sequential_io_avg) >> 9;
cafe5635 1194
c37511b8
KO
1195 if (dc->sequential_cutoff &&
1196 sectors >= dc->sequential_cutoff >> 9) {
1197 trace_bcache_bypass_sequential(s->orig_bio);
cafe5635 1198 goto skip;
c37511b8
KO
1199 }
1200
1201 if (congested && sectors >= congested) {
1202 trace_bcache_bypass_congested(s->orig_bio);
1203 goto skip;
1204 }
cafe5635
KO
1205
1206rescale:
1207 bch_rescale_priorities(c, bio_sectors(bio));
1208 return;
1209skip:
1210 bch_mark_sectors_bypassed(s, bio_sectors(bio));
1211 s->op.skip = true;
1212}
1213
1214static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1215{
1216 struct search *s;
1217 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1218 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1219 int cpu, rw = bio_data_dir(bio);
1220
1221 cpu = part_stat_lock();
1222 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1223 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1224 part_stat_unlock();
1225
1226 bio->bi_bdev = dc->bdev;
2903381f 1227 bio->bi_sector += dc->sb.data_offset;
cafe5635
KO
1228
1229 if (cached_dev_get(dc)) {
1230 s = search_alloc(bio, d);
1231 trace_bcache_request_start(s, bio);
1232
1233 if (!bio_has_data(bio))
1234 request_nodata(dc, s);
1235 else if (rw)
1236 request_write(dc, s);
1237 else
1238 request_read(dc, s);
1239 } else {
1240 if ((bio->bi_rw & REQ_DISCARD) &&
1241 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1242 bio_endio(bio, 0);
1243 else
1244 bch_generic_make_request(bio, &d->bio_split_hook);
1245 }
1246}
1247
1248static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1249 unsigned int cmd, unsigned long arg)
1250{
1251 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1252 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1253}
1254
1255static int cached_dev_congested(void *data, int bits)
1256{
1257 struct bcache_device *d = data;
1258 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1259 struct request_queue *q = bdev_get_queue(dc->bdev);
1260 int ret = 0;
1261
1262 if (bdi_congested(&q->backing_dev_info, bits))
1263 return 1;
1264
1265 if (cached_dev_get(dc)) {
1266 unsigned i;
1267 struct cache *ca;
1268
1269 for_each_cache(ca, d->c, i) {
1270 q = bdev_get_queue(ca->bdev);
1271 ret |= bdi_congested(&q->backing_dev_info, bits);
1272 }
1273
1274 cached_dev_put(dc);
1275 }
1276
1277 return ret;
1278}
1279
1280void bch_cached_dev_request_init(struct cached_dev *dc)
1281{
1282 struct gendisk *g = dc->disk.disk;
1283
1284 g->queue->make_request_fn = cached_dev_make_request;
1285 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1286 dc->disk.cache_miss = cached_dev_cache_miss;
1287 dc->disk.ioctl = cached_dev_ioctl;
1288}
1289
1290/* Flash backed devices */
1291
1292static int flash_dev_cache_miss(struct btree *b, struct search *s,
1293 struct bio *bio, unsigned sectors)
1294{
1295 /* Zero fill bio */
1296
1297 while (bio->bi_idx != bio->bi_vcnt) {
1298 struct bio_vec *bv = bio_iovec(bio);
1299 unsigned j = min(bv->bv_len >> 9, sectors);
1300
1301 void *p = kmap(bv->bv_page);
1302 memset(p + bv->bv_offset, 0, j << 9);
1303 kunmap(bv->bv_page);
1304
1305 bv->bv_len -= j << 9;
1306 bv->bv_offset += j << 9;
1307
1308 if (bv->bv_len)
1309 return 0;
1310
1311 bio->bi_sector += j;
1312 bio->bi_size -= j << 9;
1313
1314 bio->bi_idx++;
1315 sectors -= j;
1316 }
1317
1318 s->op.lookup_done = true;
1319
1320 return 0;
1321}
1322
1323static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1324{
1325 struct search *s;
1326 struct closure *cl;
1327 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1328 int cpu, rw = bio_data_dir(bio);
1329
1330 cpu = part_stat_lock();
1331 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1332 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1333 part_stat_unlock();
1334
1335 s = search_alloc(bio, d);
1336 cl = &s->cl;
1337 bio = &s->bio.bio;
1338
1339 trace_bcache_request_start(s, bio);
1340
1341 if (bio_has_data(bio) && !rw) {
1342 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1343 } else if (bio_has_data(bio) || s->op.skip) {
1344 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1345 &KEY(d->id, bio->bi_sector, 0),
1346 &KEY(d->id, bio_end(bio), 0));
1347
1348 s->writeback = true;
1349 s->op.cache_bio = bio;
1350
1351 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1352 } else {
1353 /* No data - probably a cache flush */
1354 if (s->op.flush_journal)
1355 bch_journal_meta(s->op.c, cl);
1356 }
1357
1358 continue_at(cl, search_free, NULL);
1359}
1360
1361static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1362 unsigned int cmd, unsigned long arg)
1363{
1364 return -ENOTTY;
1365}
1366
1367static int flash_dev_congested(void *data, int bits)
1368{
1369 struct bcache_device *d = data;
1370 struct request_queue *q;
1371 struct cache *ca;
1372 unsigned i;
1373 int ret = 0;
1374
1375 for_each_cache(ca, d->c, i) {
1376 q = bdev_get_queue(ca->bdev);
1377 ret |= bdi_congested(&q->backing_dev_info, bits);
1378 }
1379
1380 return ret;
1381}
1382
1383void bch_flash_dev_request_init(struct bcache_device *d)
1384{
1385 struct gendisk *g = d->disk;
1386
1387 g->queue->make_request_fn = flash_dev_make_request;
1388 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1389 d->cache_miss = flash_dev_cache_miss;
1390 d->ioctl = flash_dev_ioctl;
1391}
1392
1393void bch_request_exit(void)
1394{
1395#ifdef CONFIG_CGROUP_BCACHE
1396 cgroup_unload_subsys(&bcache_subsys);
1397#endif
1398 if (bch_search_cache)
1399 kmem_cache_destroy(bch_search_cache);
1400}
1401
1402int __init bch_request_init(void)
1403{
1404 bch_search_cache = KMEM_CACHE(search, 0);
1405 if (!bch_search_cache)
1406 return -ENOMEM;
1407
1408#ifdef CONFIG_CGROUP_BCACHE
1409 cgroup_load_subsys(&bcache_subsys);
1410 init_bch_cgroup(&bcache_default_cgroup);
1411
1412 cgroup_add_cftypes(&bcache_subsys, bch_files);
1413#endif
1414 return 0;
1415}