bcache: FUA fixes
[linux-2.6-block.git] / drivers / md / bcache / request.c
1 /*
2  * Main bcache entry point - handle a read or a write request and decide what to
3  * do with it; the make_request functions are called by the block layer.
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "request.h"
13 #include "writeback.h"
14
15 #include <linux/cgroup.h>
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include "blk-cgroup.h"
20
21 #include <trace/events/bcache.h>
22
23 #define CUTOFF_CACHE_ADD        95
24 #define CUTOFF_CACHE_READA      90
25
26 struct kmem_cache *bch_search_cache;
27
28 static void check_should_skip(struct cached_dev *, struct search *);
29
30 /* Cgroup interface */
31
32 #ifdef CONFIG_CGROUP_BCACHE
33 static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
34
35 static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
36 {
37         struct cgroup_subsys_state *css;
38         return cgroup &&
39                 (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
40                 ? container_of(css, struct bch_cgroup, css)
41                 : &bcache_default_cgroup;
42 }
43
44 struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
45 {
46         struct cgroup_subsys_state *css = bio->bi_css
47                 ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
48                 : task_subsys_state(current, bcache_subsys_id);
49
50         return css
51                 ? container_of(css, struct bch_cgroup, css)
52                 : &bcache_default_cgroup;
53 }
54
55 static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
56                         struct file *file,
57                         char __user *buf, size_t nbytes, loff_t *ppos)
58 {
59         char tmp[1024];
60         int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
61                                           cgroup_to_bcache(cgrp)->cache_mode + 1);
62
63         if (len < 0)
64                 return len;
65
66         return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
67 }
68
69 static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
70                             const char *buf)
71 {
72         int v = bch_read_string_list(buf, bch_cache_modes);
73         if (v < 0)
74                 return v;
75
76         cgroup_to_bcache(cgrp)->cache_mode = v - 1;
77         return 0;
78 }
79
80 static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
81 {
82         return cgroup_to_bcache(cgrp)->verify;
83 }
84
85 static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
86 {
87         cgroup_to_bcache(cgrp)->verify = val;
88         return 0;
89 }
90
91 static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
92 {
93         struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
94         return atomic_read(&bcachecg->stats.cache_hits);
95 }
96
97 static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
98 {
99         struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
100         return atomic_read(&bcachecg->stats.cache_misses);
101 }
102
103 static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
104                                          struct cftype *cft)
105 {
106         struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
107         return atomic_read(&bcachecg->stats.cache_bypass_hits);
108 }
109
110 static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
111                                            struct cftype *cft)
112 {
113         struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
114         return atomic_read(&bcachecg->stats.cache_bypass_misses);
115 }
116
117 static struct cftype bch_files[] = {
118         {
119                 .name           = "cache_mode",
120                 .read           = cache_mode_read,
121                 .write_string   = cache_mode_write,
122         },
123         {
124                 .name           = "verify",
125                 .read_u64       = bch_verify_read,
126                 .write_u64      = bch_verify_write,
127         },
128         {
129                 .name           = "cache_hits",
130                 .read_u64       = bch_cache_hits_read,
131         },
132         {
133                 .name           = "cache_misses",
134                 .read_u64       = bch_cache_misses_read,
135         },
136         {
137                 .name           = "cache_bypass_hits",
138                 .read_u64       = bch_cache_bypass_hits_read,
139         },
140         {
141                 .name           = "cache_bypass_misses",
142                 .read_u64       = bch_cache_bypass_misses_read,
143         },
144         { }     /* terminate */
145 };
146
147 static void init_bch_cgroup(struct bch_cgroup *cg)
148 {
149         cg->cache_mode = -1;
150 }
151
152 static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
153 {
154         struct bch_cgroup *cg;
155
156         cg = kzalloc(sizeof(*cg), GFP_KERNEL);
157         if (!cg)
158                 return ERR_PTR(-ENOMEM);
159         init_bch_cgroup(cg);
160         return &cg->css;
161 }
162
163 static void bcachecg_destroy(struct cgroup *cgroup)
164 {
165         struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
166         free_css_id(&bcache_subsys, &cg->css);
167         kfree(cg);
168 }
169
170 struct cgroup_subsys bcache_subsys = {
171         .create         = bcachecg_create,
172         .destroy        = bcachecg_destroy,
173         .subsys_id      = bcache_subsys_id,
174         .name           = "bcache",
175         .module         = THIS_MODULE,
176 };
177 EXPORT_SYMBOL_GPL(bcache_subsys);
178 #endif
179
180 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
181 {
182 #ifdef CONFIG_CGROUP_BCACHE
183         int r = bch_bio_to_cgroup(bio)->cache_mode;
184         if (r >= 0)
185                 return r;
186 #endif
187         return BDEV_CACHE_MODE(&dc->sb);
188 }
189
190 static bool verify(struct cached_dev *dc, struct bio *bio)
191 {
192 #ifdef CONFIG_CGROUP_BCACHE
193         if (bch_bio_to_cgroup(bio)->verify)
194                 return true;
195 #endif
196         return dc->verify;
197 }
198
199 static void bio_csum(struct bio *bio, struct bkey *k)
200 {
201         struct bio_vec *bv;
202         uint64_t csum = 0;
203         int i;
204
205         bio_for_each_segment(bv, bio, i) {
206                 void *d = kmap(bv->bv_page) + bv->bv_offset;
207                 csum = bch_crc64_update(csum, d, bv->bv_len);
208                 kunmap(bv->bv_page);
209         }
210
211         k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
212 }
213
214 /* Insert data into cache */
215
216 static void bio_invalidate(struct closure *cl)
217 {
218         struct btree_op *op = container_of(cl, struct btree_op, cl);
219         struct bio *bio = op->cache_bio;
220
221         pr_debug("invalidating %i sectors from %llu",
222                  bio_sectors(bio), (uint64_t) bio->bi_sector);
223
224         while (bio_sectors(bio)) {
225                 unsigned len = min(bio_sectors(bio), 1U << 14);
226
227                 if (bch_keylist_realloc(&op->keys, 0, op->c))
228                         goto out;
229
230                 bio->bi_sector  += len;
231                 bio->bi_size    -= len << 9;
232
233                 bch_keylist_add(&op->keys,
234                                 &KEY(op->inode, bio->bi_sector, len));
235         }
236
237         op->insert_data_done = true;
238         bio_put(bio);
239 out:
240         continue_at(cl, bch_journal, bcache_wq);
241 }
242
243 struct open_bucket {
244         struct list_head        list;
245         struct task_struct      *last;
246         unsigned                sectors_free;
247         BKEY_PADDED(key);
248 };
249
250 void bch_open_buckets_free(struct cache_set *c)
251 {
252         struct open_bucket *b;
253
254         while (!list_empty(&c->data_buckets)) {
255                 b = list_first_entry(&c->data_buckets,
256                                      struct open_bucket, list);
257                 list_del(&b->list);
258                 kfree(b);
259         }
260 }
261
262 int bch_open_buckets_alloc(struct cache_set *c)
263 {
264         int i;
265
266         spin_lock_init(&c->data_bucket_lock);
267
268         for (i = 0; i < 6; i++) {
269                 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
270                 if (!b)
271                         return -ENOMEM;
272
273                 list_add(&b->list, &c->data_buckets);
274         }
275
276         return 0;
277 }
278
279 /*
280  * We keep multiple buckets open for writes, and try to segregate different
281  * write streams for better cache utilization: first we look for a bucket where
282  * the last write to it was sequential with the current write, and failing that
283  * we look for a bucket that was last used by the same task.
284  *
285  * The ideas is if you've got multiple tasks pulling data into the cache at the
286  * same time, you'll get better cache utilization if you try to segregate their
287  * data and preserve locality.
288  *
289  * For example, say you've starting Firefox at the same time you're copying a
290  * bunch of files. Firefox will likely end up being fairly hot and stay in the
291  * cache awhile, but the data you copied might not be; if you wrote all that
292  * data to the same buckets it'd get invalidated at the same time.
293  *
294  * Both of those tasks will be doing fairly random IO so we can't rely on
295  * detecting sequential IO to segregate their data, but going off of the task
296  * should be a sane heuristic.
297  */
298 static struct open_bucket *pick_data_bucket(struct cache_set *c,
299                                             const struct bkey *search,
300                                             struct task_struct *task,
301                                             struct bkey *alloc)
302 {
303         struct open_bucket *ret, *ret_task = NULL;
304
305         list_for_each_entry_reverse(ret, &c->data_buckets, list)
306                 if (!bkey_cmp(&ret->key, search))
307                         goto found;
308                 else if (ret->last == task)
309                         ret_task = ret;
310
311         ret = ret_task ?: list_first_entry(&c->data_buckets,
312                                            struct open_bucket, list);
313 found:
314         if (!ret->sectors_free && KEY_PTRS(alloc)) {
315                 ret->sectors_free = c->sb.bucket_size;
316                 bkey_copy(&ret->key, alloc);
317                 bkey_init(alloc);
318         }
319
320         if (!ret->sectors_free)
321                 ret = NULL;
322
323         return ret;
324 }
325
326 /*
327  * Allocates some space in the cache to write to, and k to point to the newly
328  * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
329  * end of the newly allocated space).
330  *
331  * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
332  * sectors were actually allocated.
333  *
334  * If s->writeback is true, will not fail.
335  */
336 static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
337                               struct search *s)
338 {
339         struct cache_set *c = s->op.c;
340         struct open_bucket *b;
341         BKEY_PADDED(key) alloc;
342         struct closure cl, *w = NULL;
343         unsigned i;
344
345         if (s->writeback) {
346                 closure_init_stack(&cl);
347                 w = &cl;
348         }
349
350         /*
351          * We might have to allocate a new bucket, which we can't do with a
352          * spinlock held. So if we have to allocate, we drop the lock, allocate
353          * and then retry. KEY_PTRS() indicates whether alloc points to
354          * allocated bucket(s).
355          */
356
357         bkey_init(&alloc.key);
358         spin_lock(&c->data_bucket_lock);
359
360         while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
361                 unsigned watermark = s->op.write_prio
362                         ? WATERMARK_MOVINGGC
363                         : WATERMARK_NONE;
364
365                 spin_unlock(&c->data_bucket_lock);
366
367                 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
368                         return false;
369
370                 spin_lock(&c->data_bucket_lock);
371         }
372
373         /*
374          * If we had to allocate, we might race and not need to allocate the
375          * second time we call find_data_bucket(). If we allocated a bucket but
376          * didn't use it, drop the refcount bch_bucket_alloc_set() took:
377          */
378         if (KEY_PTRS(&alloc.key))
379                 __bkey_put(c, &alloc.key);
380
381         for (i = 0; i < KEY_PTRS(&b->key); i++)
382                 EBUG_ON(ptr_stale(c, &b->key, i));
383
384         /* Set up the pointer to the space we're allocating: */
385
386         for (i = 0; i < KEY_PTRS(&b->key); i++)
387                 k->ptr[i] = b->key.ptr[i];
388
389         sectors = min(sectors, b->sectors_free);
390
391         SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
392         SET_KEY_SIZE(k, sectors);
393         SET_KEY_PTRS(k, KEY_PTRS(&b->key));
394
395         /*
396          * Move b to the end of the lru, and keep track of what this bucket was
397          * last used for:
398          */
399         list_move_tail(&b->list, &c->data_buckets);
400         bkey_copy_key(&b->key, k);
401         b->last = s->task;
402
403         b->sectors_free -= sectors;
404
405         for (i = 0; i < KEY_PTRS(&b->key); i++) {
406                 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
407
408                 atomic_long_add(sectors,
409                                 &PTR_CACHE(c, &b->key, i)->sectors_written);
410         }
411
412         if (b->sectors_free < c->sb.block_size)
413                 b->sectors_free = 0;
414
415         /*
416          * k takes refcounts on the buckets it points to until it's inserted
417          * into the btree, but if we're done with this bucket we just transfer
418          * get_data_bucket()'s refcount.
419          */
420         if (b->sectors_free)
421                 for (i = 0; i < KEY_PTRS(&b->key); i++)
422                         atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
423
424         spin_unlock(&c->data_bucket_lock);
425         return true;
426 }
427
428 static void bch_insert_data_error(struct closure *cl)
429 {
430         struct btree_op *op = container_of(cl, struct btree_op, cl);
431
432         /*
433          * Our data write just errored, which means we've got a bunch of keys to
434          * insert that point to data that wasn't succesfully written.
435          *
436          * We don't have to insert those keys but we still have to invalidate
437          * that region of the cache - so, if we just strip off all the pointers
438          * from the keys we'll accomplish just that.
439          */
440
441         struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
442
443         while (src != op->keys.top) {
444                 struct bkey *n = bkey_next(src);
445
446                 SET_KEY_PTRS(src, 0);
447                 bkey_copy(dst, src);
448
449                 dst = bkey_next(dst);
450                 src = n;
451         }
452
453         op->keys.top = dst;
454
455         bch_journal(cl);
456 }
457
458 static void bch_insert_data_endio(struct bio *bio, int error)
459 {
460         struct closure *cl = bio->bi_private;
461         struct btree_op *op = container_of(cl, struct btree_op, cl);
462         struct search *s = container_of(op, struct search, op);
463
464         if (error) {
465                 /* TODO: We could try to recover from this. */
466                 if (s->writeback)
467                         s->error = error;
468                 else if (s->write)
469                         set_closure_fn(cl, bch_insert_data_error, bcache_wq);
470                 else
471                         set_closure_fn(cl, NULL, NULL);
472         }
473
474         bch_bbio_endio(op->c, bio, error, "writing data to cache");
475 }
476
477 static void bch_insert_data_loop(struct closure *cl)
478 {
479         struct btree_op *op = container_of(cl, struct btree_op, cl);
480         struct search *s = container_of(op, struct search, op);
481         struct bio *bio = op->cache_bio, *n;
482
483         if (op->skip)
484                 return bio_invalidate(cl);
485
486         if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
487                 set_gc_sectors(op->c);
488                 bch_queue_gc(op->c);
489         }
490
491         do {
492                 unsigned i;
493                 struct bkey *k;
494                 struct bio_set *split = s->d
495                         ? s->d->bio_split : op->c->bio_split;
496
497                 /* 1 for the device pointer and 1 for the chksum */
498                 if (bch_keylist_realloc(&op->keys,
499                                         1 + (op->csum ? 1 : 0),
500                                         op->c))
501                         continue_at(cl, bch_journal, bcache_wq);
502
503                 k = op->keys.top;
504                 bkey_init(k);
505                 SET_KEY_INODE(k, op->inode);
506                 SET_KEY_OFFSET(k, bio->bi_sector);
507
508                 if (!bch_alloc_sectors(k, bio_sectors(bio), s))
509                         goto err;
510
511                 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
512                 if (!n) {
513                         __bkey_put(op->c, k);
514                         continue_at(cl, bch_insert_data_loop, bcache_wq);
515                 }
516
517                 n->bi_end_io    = bch_insert_data_endio;
518                 n->bi_private   = cl;
519
520                 if (s->writeback) {
521                         SET_KEY_DIRTY(k, true);
522
523                         for (i = 0; i < KEY_PTRS(k); i++)
524                                 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
525                                             GC_MARK_DIRTY);
526                 }
527
528                 SET_KEY_CSUM(k, op->csum);
529                 if (KEY_CSUM(k))
530                         bio_csum(n, k);
531
532                 trace_bcache_cache_insert(k);
533                 bch_keylist_push(&op->keys);
534
535                 n->bi_rw |= REQ_WRITE;
536                 bch_submit_bbio(n, op->c, k, 0);
537         } while (n != bio);
538
539         op->insert_data_done = true;
540         continue_at(cl, bch_journal, bcache_wq);
541 err:
542         /* bch_alloc_sectors() blocks if s->writeback = true */
543         BUG_ON(s->writeback);
544
545         /*
546          * But if it's not a writeback write we'd rather just bail out if
547          * there aren't any buckets ready to write to - it might take awhile and
548          * we might be starving btree writes for gc or something.
549          */
550
551         if (s->write) {
552                 /*
553                  * Writethrough write: We can't complete the write until we've
554                  * updated the index. But we don't want to delay the write while
555                  * we wait for buckets to be freed up, so just invalidate the
556                  * rest of the write.
557                  */
558                 op->skip = true;
559                 return bio_invalidate(cl);
560         } else {
561                 /*
562                  * From a cache miss, we can just insert the keys for the data
563                  * we have written or bail out if we didn't do anything.
564                  */
565                 op->insert_data_done = true;
566                 bio_put(bio);
567
568                 if (!bch_keylist_empty(&op->keys))
569                         continue_at(cl, bch_journal, bcache_wq);
570                 else
571                         closure_return(cl);
572         }
573 }
574
575 /**
576  * bch_insert_data - stick some data in the cache
577  *
578  * This is the starting point for any data to end up in a cache device; it could
579  * be from a normal write, or a writeback write, or a write to a flash only
580  * volume - it's also used by the moving garbage collector to compact data in
581  * mostly empty buckets.
582  *
583  * It first writes the data to the cache, creating a list of keys to be inserted
584  * (if the data had to be fragmented there will be multiple keys); after the
585  * data is written it calls bch_journal, and after the keys have been added to
586  * the next journal write they're inserted into the btree.
587  *
588  * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
589  * and op->inode is used for the key inode.
590  *
591  * If op->skip is true, instead of inserting the data it invalidates the region
592  * of the cache represented by op->cache_bio and op->inode.
593  */
594 void bch_insert_data(struct closure *cl)
595 {
596         struct btree_op *op = container_of(cl, struct btree_op, cl);
597
598         bch_keylist_init(&op->keys);
599         bio_get(op->cache_bio);
600         bch_insert_data_loop(cl);
601 }
602
603 void bch_btree_insert_async(struct closure *cl)
604 {
605         struct btree_op *op = container_of(cl, struct btree_op, cl);
606         struct search *s = container_of(op, struct search, op);
607
608         if (bch_btree_insert(op, op->c)) {
609                 s->error                = -ENOMEM;
610                 op->insert_data_done    = true;
611         }
612
613         if (op->insert_data_done) {
614                 bch_keylist_free(&op->keys);
615                 closure_return(cl);
616         } else
617                 continue_at(cl, bch_insert_data_loop, bcache_wq);
618 }
619
620 /* Common code for the make_request functions */
621
622 static void request_endio(struct bio *bio, int error)
623 {
624         struct closure *cl = bio->bi_private;
625
626         if (error) {
627                 struct search *s = container_of(cl, struct search, cl);
628                 s->error = error;
629                 /* Only cache read errors are recoverable */
630                 s->recoverable = false;
631         }
632
633         bio_put(bio);
634         closure_put(cl);
635 }
636
637 void bch_cache_read_endio(struct bio *bio, int error)
638 {
639         struct bbio *b = container_of(bio, struct bbio, bio);
640         struct closure *cl = bio->bi_private;
641         struct search *s = container_of(cl, struct search, cl);
642
643         /*
644          * If the bucket was reused while our bio was in flight, we might have
645          * read the wrong data. Set s->error but not error so it doesn't get
646          * counted against the cache device, but we'll still reread the data
647          * from the backing device.
648          */
649
650         if (error)
651                 s->error = error;
652         else if (ptr_stale(s->op.c, &b->key, 0)) {
653                 atomic_long_inc(&s->op.c->cache_read_races);
654                 s->error = -EINTR;
655         }
656
657         bch_bbio_endio(s->op.c, bio, error, "reading from cache");
658 }
659
660 static void bio_complete(struct search *s)
661 {
662         if (s->orig_bio) {
663                 int cpu, rw = bio_data_dir(s->orig_bio);
664                 unsigned long duration = jiffies - s->start_time;
665
666                 cpu = part_stat_lock();
667                 part_round_stats(cpu, &s->d->disk->part0);
668                 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
669                 part_stat_unlock();
670
671                 trace_bcache_request_end(s, s->orig_bio);
672                 bio_endio(s->orig_bio, s->error);
673                 s->orig_bio = NULL;
674         }
675 }
676
677 static void do_bio_hook(struct search *s)
678 {
679         struct bio *bio = &s->bio.bio;
680         memcpy(bio, s->orig_bio, sizeof(struct bio));
681
682         bio->bi_end_io          = request_endio;
683         bio->bi_private         = &s->cl;
684         atomic_set(&bio->bi_cnt, 3);
685 }
686
687 static void search_free(struct closure *cl)
688 {
689         struct search *s = container_of(cl, struct search, cl);
690         bio_complete(s);
691
692         if (s->op.cache_bio)
693                 bio_put(s->op.cache_bio);
694
695         if (s->unaligned_bvec)
696                 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
697
698         closure_debug_destroy(cl);
699         mempool_free(s, s->d->c->search);
700 }
701
702 static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
703 {
704         struct bio_vec *bv;
705         struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
706         memset(s, 0, offsetof(struct search, op.keys));
707
708         __closure_init(&s->cl, NULL);
709
710         s->op.inode             = d->id;
711         s->op.c                 = d->c;
712         s->d                    = d;
713         s->op.lock              = -1;
714         s->task                 = current;
715         s->orig_bio             = bio;
716         s->write                = (bio->bi_rw & REQ_WRITE) != 0;
717         s->op.flush_journal     = (bio->bi_rw & REQ_FLUSH) != 0;
718         s->op.skip              = (bio->bi_rw & REQ_DISCARD) != 0;
719         s->recoverable          = 1;
720         s->start_time           = jiffies;
721         do_bio_hook(s);
722
723         if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
724                 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
725                 memcpy(bv, bio_iovec(bio),
726                        sizeof(struct bio_vec) * bio_segments(bio));
727
728                 s->bio.bio.bi_io_vec    = bv;
729                 s->unaligned_bvec       = 1;
730         }
731
732         return s;
733 }
734
735 static void btree_read_async(struct closure *cl)
736 {
737         struct btree_op *op = container_of(cl, struct btree_op, cl);
738
739         int ret = btree_root(search_recurse, op->c, op);
740
741         if (ret == -EAGAIN)
742                 continue_at(cl, btree_read_async, bcache_wq);
743
744         closure_return(cl);
745 }
746
747 /* Cached devices */
748
749 static void cached_dev_bio_complete(struct closure *cl)
750 {
751         struct search *s = container_of(cl, struct search, cl);
752         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
753
754         search_free(cl);
755         cached_dev_put(dc);
756 }
757
758 /* Process reads */
759
760 static void cached_dev_read_complete(struct closure *cl)
761 {
762         struct search *s = container_of(cl, struct search, cl);
763
764         if (s->op.insert_collision)
765                 bch_mark_cache_miss_collision(s);
766
767         if (s->op.cache_bio) {
768                 int i;
769                 struct bio_vec *bv;
770
771                 __bio_for_each_segment(bv, s->op.cache_bio, i, 0)
772                         __free_page(bv->bv_page);
773         }
774
775         cached_dev_bio_complete(cl);
776 }
777
778 static void request_read_error(struct closure *cl)
779 {
780         struct search *s = container_of(cl, struct search, cl);
781         struct bio_vec *bv;
782         int i;
783
784         if (s->recoverable) {
785                 /* Retry from the backing device: */
786                 trace_bcache_read_retry(s->orig_bio);
787
788                 s->error = 0;
789                 bv = s->bio.bio.bi_io_vec;
790                 do_bio_hook(s);
791                 s->bio.bio.bi_io_vec = bv;
792
793                 if (!s->unaligned_bvec)
794                         bio_for_each_segment(bv, s->orig_bio, i)
795                                 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
796                 else
797                         memcpy(s->bio.bio.bi_io_vec,
798                                bio_iovec(s->orig_bio),
799                                sizeof(struct bio_vec) *
800                                bio_segments(s->orig_bio));
801
802                 /* XXX: invalidate cache */
803
804                 closure_bio_submit(&s->bio.bio, &s->cl, s->d);
805         }
806
807         continue_at(cl, cached_dev_read_complete, NULL);
808 }
809
810 static void request_read_done(struct closure *cl)
811 {
812         struct search *s = container_of(cl, struct search, cl);
813         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
814
815         /*
816          * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
817          * contains data ready to be inserted into the cache.
818          *
819          * First, we copy the data we just read from cache_bio's bounce buffers
820          * to the buffers the original bio pointed to:
821          */
822
823         if (s->op.cache_bio) {
824                 struct bio_vec *src, *dst;
825                 unsigned src_offset, dst_offset, bytes;
826                 void *dst_ptr;
827
828                 bio_reset(s->op.cache_bio);
829                 s->op.cache_bio->bi_sector      = s->cache_miss->bi_sector;
830                 s->op.cache_bio->bi_bdev        = s->cache_miss->bi_bdev;
831                 s->op.cache_bio->bi_size        = s->cache_bio_sectors << 9;
832                 bch_bio_map(s->op.cache_bio, NULL);
833
834                 src = bio_iovec(s->op.cache_bio);
835                 dst = bio_iovec(s->cache_miss);
836                 src_offset = src->bv_offset;
837                 dst_offset = dst->bv_offset;
838                 dst_ptr = kmap(dst->bv_page);
839
840                 while (1) {
841                         if (dst_offset == dst->bv_offset + dst->bv_len) {
842                                 kunmap(dst->bv_page);
843                                 dst++;
844                                 if (dst == bio_iovec_idx(s->cache_miss,
845                                                 s->cache_miss->bi_vcnt))
846                                         break;
847
848                                 dst_offset = dst->bv_offset;
849                                 dst_ptr = kmap(dst->bv_page);
850                         }
851
852                         if (src_offset == src->bv_offset + src->bv_len) {
853                                 src++;
854                                 if (src == bio_iovec_idx(s->op.cache_bio,
855                                                  s->op.cache_bio->bi_vcnt))
856                                         BUG();
857
858                                 src_offset = src->bv_offset;
859                         }
860
861                         bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
862                                     src->bv_offset + src->bv_len - src_offset);
863
864                         memcpy(dst_ptr + dst_offset,
865                                page_address(src->bv_page) + src_offset,
866                                bytes);
867
868                         src_offset      += bytes;
869                         dst_offset      += bytes;
870                 }
871
872                 bio_put(s->cache_miss);
873                 s->cache_miss = NULL;
874         }
875
876         if (verify(dc, &s->bio.bio) && s->recoverable)
877                 bch_data_verify(s);
878
879         bio_complete(s);
880
881         if (s->op.cache_bio &&
882             !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
883                 s->op.type = BTREE_REPLACE;
884                 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
885         }
886
887         continue_at(cl, cached_dev_read_complete, NULL);
888 }
889
890 static void request_read_done_bh(struct closure *cl)
891 {
892         struct search *s = container_of(cl, struct search, cl);
893         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
894
895         bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
896         trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
897
898         if (s->error)
899                 continue_at_nobarrier(cl, request_read_error, bcache_wq);
900         else if (s->op.cache_bio || verify(dc, &s->bio.bio))
901                 continue_at_nobarrier(cl, request_read_done, bcache_wq);
902         else
903                 continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
904 }
905
906 static int cached_dev_cache_miss(struct btree *b, struct search *s,
907                                  struct bio *bio, unsigned sectors)
908 {
909         int ret = 0;
910         unsigned reada;
911         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
912         struct bio *miss;
913
914         miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
915         if (!miss)
916                 return -EAGAIN;
917
918         if (miss == bio)
919                 s->op.lookup_done = true;
920
921         miss->bi_end_io         = request_endio;
922         miss->bi_private        = &s->cl;
923
924         if (s->cache_miss || s->op.skip)
925                 goto out_submit;
926
927         if (miss != bio ||
928             (bio->bi_rw & REQ_RAHEAD) ||
929             (bio->bi_rw & REQ_META) ||
930             s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
931                 reada = 0;
932         else {
933                 reada = min(dc->readahead >> 9,
934                             sectors - bio_sectors(miss));
935
936                 if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev))
937                         reada = bdev_sectors(miss->bi_bdev) - bio_end(miss);
938         }
939
940         s->cache_bio_sectors = bio_sectors(miss) + reada;
941         s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
942                         DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
943                         dc->disk.bio_split);
944
945         if (!s->op.cache_bio)
946                 goto out_submit;
947
948         s->op.cache_bio->bi_sector      = miss->bi_sector;
949         s->op.cache_bio->bi_bdev        = miss->bi_bdev;
950         s->op.cache_bio->bi_size        = s->cache_bio_sectors << 9;
951
952         s->op.cache_bio->bi_end_io      = request_endio;
953         s->op.cache_bio->bi_private     = &s->cl;
954
955         /* btree_search_recurse()'s btree iterator is no good anymore */
956         ret = -EINTR;
957         if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
958                 goto out_put;
959
960         bch_bio_map(s->op.cache_bio, NULL);
961         if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
962                 goto out_put;
963
964         s->cache_miss = miss;
965         bio_get(s->op.cache_bio);
966
967         closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
968
969         return ret;
970 out_put:
971         bio_put(s->op.cache_bio);
972         s->op.cache_bio = NULL;
973 out_submit:
974         closure_bio_submit(miss, &s->cl, s->d);
975         return ret;
976 }
977
978 static void request_read(struct cached_dev *dc, struct search *s)
979 {
980         struct closure *cl = &s->cl;
981
982         check_should_skip(dc, s);
983         closure_call(&s->op.cl, btree_read_async, NULL, cl);
984
985         continue_at(cl, request_read_done_bh, NULL);
986 }
987
988 /* Process writes */
989
990 static void cached_dev_write_complete(struct closure *cl)
991 {
992         struct search *s = container_of(cl, struct search, cl);
993         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
994
995         up_read_non_owner(&dc->writeback_lock);
996         cached_dev_bio_complete(cl);
997 }
998
999 static void request_write(struct cached_dev *dc, struct search *s)
1000 {
1001         struct closure *cl = &s->cl;
1002         struct bio *bio = &s->bio.bio;
1003         struct bkey start, end;
1004         start = KEY(dc->disk.id, bio->bi_sector, 0);
1005         end = KEY(dc->disk.id, bio_end(bio), 0);
1006
1007         bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
1008
1009         check_should_skip(dc, s);
1010         down_read_non_owner(&dc->writeback_lock);
1011
1012         if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
1013                 s->op.skip      = false;
1014                 s->writeback    = true;
1015         }
1016
1017         if (bio->bi_rw & REQ_DISCARD)
1018                 goto skip;
1019
1020         if (should_writeback(dc, s->orig_bio,
1021                              cache_mode(dc, bio),
1022                              s->op.skip)) {
1023                 s->op.skip = false;
1024                 s->writeback = true;
1025         }
1026
1027         if (s->op.skip)
1028                 goto skip;
1029
1030         trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
1031
1032         if (!s->writeback) {
1033                 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1034                                                    dc->disk.bio_split);
1035
1036                 closure_bio_submit(bio, cl, s->d);
1037         } else {
1038                 bch_writeback_add(dc);
1039
1040                 if (s->op.flush_journal) {
1041                         /* Also need to send a flush to the backing device */
1042                         s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1043                                                            dc->disk.bio_split);
1044
1045                         bio->bi_size = 0;
1046                         bio->bi_vcnt = 0;
1047                         closure_bio_submit(bio, cl, s->d);
1048                 } else {
1049                         s->op.cache_bio = bio;
1050                 }
1051         }
1052 out:
1053         closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1054         continue_at(cl, cached_dev_write_complete, NULL);
1055 skip:
1056         s->op.skip = true;
1057         s->op.cache_bio = s->orig_bio;
1058         bio_get(s->op.cache_bio);
1059
1060         if ((bio->bi_rw & REQ_DISCARD) &&
1061             !blk_queue_discard(bdev_get_queue(dc->bdev)))
1062                 goto out;
1063
1064         closure_bio_submit(bio, cl, s->d);
1065         goto out;
1066 }
1067
1068 static void request_nodata(struct cached_dev *dc, struct search *s)
1069 {
1070         struct closure *cl = &s->cl;
1071         struct bio *bio = &s->bio.bio;
1072
1073         if (bio->bi_rw & REQ_DISCARD) {
1074                 request_write(dc, s);
1075                 return;
1076         }
1077
1078         if (s->op.flush_journal)
1079                 bch_journal_meta(s->op.c, cl);
1080
1081         closure_bio_submit(bio, cl, s->d);
1082
1083         continue_at(cl, cached_dev_bio_complete, NULL);
1084 }
1085
1086 /* Cached devices - read & write stuff */
1087
1088 unsigned bch_get_congested(struct cache_set *c)
1089 {
1090         int i;
1091         long rand;
1092
1093         if (!c->congested_read_threshold_us &&
1094             !c->congested_write_threshold_us)
1095                 return 0;
1096
1097         i = (local_clock_us() - c->congested_last_us) / 1024;
1098         if (i < 0)
1099                 return 0;
1100
1101         i += atomic_read(&c->congested);
1102         if (i >= 0)
1103                 return 0;
1104
1105         i += CONGESTED_MAX;
1106
1107         if (i > 0)
1108                 i = fract_exp_two(i, 6);
1109
1110         rand = get_random_int();
1111         i -= bitmap_weight(&rand, BITS_PER_LONG);
1112
1113         return i > 0 ? i : 1;
1114 }
1115
1116 static void add_sequential(struct task_struct *t)
1117 {
1118         ewma_add(t->sequential_io_avg,
1119                  t->sequential_io, 8, 0);
1120
1121         t->sequential_io = 0;
1122 }
1123
1124 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
1125 {
1126         return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
1127 }
1128
1129 static void check_should_skip(struct cached_dev *dc, struct search *s)
1130 {
1131         struct cache_set *c = s->op.c;
1132         struct bio *bio = &s->bio.bio;
1133         unsigned mode = cache_mode(dc, bio);
1134         unsigned sectors, congested = bch_get_congested(c);
1135
1136         if (atomic_read(&dc->disk.detaching) ||
1137             c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
1138             (bio->bi_rw & REQ_DISCARD))
1139                 goto skip;
1140
1141         if (mode == CACHE_MODE_NONE ||
1142             (mode == CACHE_MODE_WRITEAROUND &&
1143              (bio->bi_rw & REQ_WRITE)))
1144                 goto skip;
1145
1146         if (bio->bi_sector   & (c->sb.block_size - 1) ||
1147             bio_sectors(bio) & (c->sb.block_size - 1)) {
1148                 pr_debug("skipping unaligned io");
1149                 goto skip;
1150         }
1151
1152         if (!congested && !dc->sequential_cutoff)
1153                 goto rescale;
1154
1155         if (!congested &&
1156             mode == CACHE_MODE_WRITEBACK &&
1157             (bio->bi_rw & REQ_WRITE) &&
1158             (bio->bi_rw & REQ_SYNC))
1159                 goto rescale;
1160
1161         if (dc->sequential_merge) {
1162                 struct io *i;
1163
1164                 spin_lock(&dc->io_lock);
1165
1166                 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
1167                         if (i->last == bio->bi_sector &&
1168                             time_before(jiffies, i->jiffies))
1169                                 goto found;
1170
1171                 i = list_first_entry(&dc->io_lru, struct io, lru);
1172
1173                 add_sequential(s->task);
1174                 i->sequential = 0;
1175 found:
1176                 if (i->sequential + bio->bi_size > i->sequential)
1177                         i->sequential   += bio->bi_size;
1178
1179                 i->last                  = bio_end(bio);
1180                 i->jiffies               = jiffies + msecs_to_jiffies(5000);
1181                 s->task->sequential_io   = i->sequential;
1182
1183                 hlist_del(&i->hash);
1184                 hlist_add_head(&i->hash, iohash(dc, i->last));
1185                 list_move_tail(&i->lru, &dc->io_lru);
1186
1187                 spin_unlock(&dc->io_lock);
1188         } else {
1189                 s->task->sequential_io = bio->bi_size;
1190
1191                 add_sequential(s->task);
1192         }
1193
1194         sectors = max(s->task->sequential_io,
1195                       s->task->sequential_io_avg) >> 9;
1196
1197         if (dc->sequential_cutoff &&
1198             sectors >= dc->sequential_cutoff >> 9) {
1199                 trace_bcache_bypass_sequential(s->orig_bio);
1200                 goto skip;
1201         }
1202
1203         if (congested && sectors >= congested) {
1204                 trace_bcache_bypass_congested(s->orig_bio);
1205                 goto skip;
1206         }
1207
1208 rescale:
1209         bch_rescale_priorities(c, bio_sectors(bio));
1210         return;
1211 skip:
1212         bch_mark_sectors_bypassed(s, bio_sectors(bio));
1213         s->op.skip = true;
1214 }
1215
1216 static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1217 {
1218         struct search *s;
1219         struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1220         struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1221         int cpu, rw = bio_data_dir(bio);
1222
1223         cpu = part_stat_lock();
1224         part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1225         part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1226         part_stat_unlock();
1227
1228         bio->bi_bdev = dc->bdev;
1229         bio->bi_sector += dc->sb.data_offset;
1230
1231         if (cached_dev_get(dc)) {
1232                 s = search_alloc(bio, d);
1233                 trace_bcache_request_start(s, bio);
1234
1235                 if (!bio_has_data(bio))
1236                         request_nodata(dc, s);
1237                 else if (rw)
1238                         request_write(dc, s);
1239                 else
1240                         request_read(dc, s);
1241         } else {
1242                 if ((bio->bi_rw & REQ_DISCARD) &&
1243                     !blk_queue_discard(bdev_get_queue(dc->bdev)))
1244                         bio_endio(bio, 0);
1245                 else
1246                         bch_generic_make_request(bio, &d->bio_split_hook);
1247         }
1248 }
1249
1250 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1251                             unsigned int cmd, unsigned long arg)
1252 {
1253         struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1254         return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1255 }
1256
1257 static int cached_dev_congested(void *data, int bits)
1258 {
1259         struct bcache_device *d = data;
1260         struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1261         struct request_queue *q = bdev_get_queue(dc->bdev);
1262         int ret = 0;
1263
1264         if (bdi_congested(&q->backing_dev_info, bits))
1265                 return 1;
1266
1267         if (cached_dev_get(dc)) {
1268                 unsigned i;
1269                 struct cache *ca;
1270
1271                 for_each_cache(ca, d->c, i) {
1272                         q = bdev_get_queue(ca->bdev);
1273                         ret |= bdi_congested(&q->backing_dev_info, bits);
1274                 }
1275
1276                 cached_dev_put(dc);
1277         }
1278
1279         return ret;
1280 }
1281
1282 void bch_cached_dev_request_init(struct cached_dev *dc)
1283 {
1284         struct gendisk *g = dc->disk.disk;
1285
1286         g->queue->make_request_fn               = cached_dev_make_request;
1287         g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1288         dc->disk.cache_miss                     = cached_dev_cache_miss;
1289         dc->disk.ioctl                          = cached_dev_ioctl;
1290 }
1291
1292 /* Flash backed devices */
1293
1294 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1295                                 struct bio *bio, unsigned sectors)
1296 {
1297         /* Zero fill bio */
1298
1299         while (bio->bi_idx != bio->bi_vcnt) {
1300                 struct bio_vec *bv = bio_iovec(bio);
1301                 unsigned j = min(bv->bv_len >> 9, sectors);
1302
1303                 void *p = kmap(bv->bv_page);
1304                 memset(p + bv->bv_offset, 0, j << 9);
1305                 kunmap(bv->bv_page);
1306
1307                 bv->bv_len      -= j << 9;
1308                 bv->bv_offset   += j << 9;
1309
1310                 if (bv->bv_len)
1311                         return 0;
1312
1313                 bio->bi_sector  += j;
1314                 bio->bi_size    -= j << 9;
1315
1316                 bio->bi_idx++;
1317                 sectors         -= j;
1318         }
1319
1320         s->op.lookup_done = true;
1321
1322         return 0;
1323 }
1324
1325 static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1326 {
1327         struct search *s;
1328         struct closure *cl;
1329         struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1330         int cpu, rw = bio_data_dir(bio);
1331
1332         cpu = part_stat_lock();
1333         part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1334         part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1335         part_stat_unlock();
1336
1337         s = search_alloc(bio, d);
1338         cl = &s->cl;
1339         bio = &s->bio.bio;
1340
1341         trace_bcache_request_start(s, bio);
1342
1343         if (bio_has_data(bio) && !rw) {
1344                 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1345         } else if (bio_has_data(bio) || s->op.skip) {
1346                 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1347                                              &KEY(d->id, bio->bi_sector, 0),
1348                                              &KEY(d->id, bio_end(bio), 0));
1349
1350                 s->writeback    = true;
1351                 s->op.cache_bio = bio;
1352
1353                 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1354         } else {
1355                 /* No data - probably a cache flush */
1356                 if (s->op.flush_journal)
1357                         bch_journal_meta(s->op.c, cl);
1358         }
1359
1360         continue_at(cl, search_free, NULL);
1361 }
1362
1363 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1364                            unsigned int cmd, unsigned long arg)
1365 {
1366         return -ENOTTY;
1367 }
1368
1369 static int flash_dev_congested(void *data, int bits)
1370 {
1371         struct bcache_device *d = data;
1372         struct request_queue *q;
1373         struct cache *ca;
1374         unsigned i;
1375         int ret = 0;
1376
1377         for_each_cache(ca, d->c, i) {
1378                 q = bdev_get_queue(ca->bdev);
1379                 ret |= bdi_congested(&q->backing_dev_info, bits);
1380         }
1381
1382         return ret;
1383 }
1384
1385 void bch_flash_dev_request_init(struct bcache_device *d)
1386 {
1387         struct gendisk *g = d->disk;
1388
1389         g->queue->make_request_fn               = flash_dev_make_request;
1390         g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1391         d->cache_miss                           = flash_dev_cache_miss;
1392         d->ioctl                                = flash_dev_ioctl;
1393 }
1394
1395 void bch_request_exit(void)
1396 {
1397 #ifdef CONFIG_CGROUP_BCACHE
1398         cgroup_unload_subsys(&bcache_subsys);
1399 #endif
1400         if (bch_search_cache)
1401                 kmem_cache_destroy(bch_search_cache);
1402 }
1403
1404 int __init bch_request_init(void)
1405 {
1406         bch_search_cache = KMEM_CACHE(search, 0);
1407         if (!bch_search_cache)
1408                 return -ENOMEM;
1409
1410 #ifdef CONFIG_CGROUP_BCACHE
1411         cgroup_load_subsys(&bcache_subsys);
1412         init_bch_cgroup(&bcache_default_cgroup);
1413
1414         cgroup_add_cftypes(&bcache_subsys, bch_files);
1415 #endif
1416         return 0;
1417 }