bcache: Minor fixes from kbuild robot
[linux-2.6-block.git] / drivers / md / bcache / request.c
CommitLineData
cafe5635
KO
1/*
2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "btree.h"
11#include "debug.h"
12#include "request.h"
279afbad 13#include "writeback.h"
cafe5635
KO
14
15#include <linux/cgroup.h>
16#include <linux/module.h>
17#include <linux/hash.h>
18#include <linux/random.h>
19#include "blk-cgroup.h"
20
21#include <trace/events/bcache.h>
22
23#define CUTOFF_CACHE_ADD 95
24#define CUTOFF_CACHE_READA 90
cafe5635
KO
25
26struct kmem_cache *bch_search_cache;
27
a34a8bfd
KO
28static void bch_data_insert_start(struct closure *);
29
cafe5635
KO
30/* Cgroup interface */
31
32#ifdef CONFIG_CGROUP_BCACHE
33static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
34
35static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
36{
37 struct cgroup_subsys_state *css;
38 return cgroup &&
39 (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
40 ? container_of(css, struct bch_cgroup, css)
41 : &bcache_default_cgroup;
42}
43
44struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
45{
46 struct cgroup_subsys_state *css = bio->bi_css
47 ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
48 : task_subsys_state(current, bcache_subsys_id);
49
50 return css
51 ? container_of(css, struct bch_cgroup, css)
52 : &bcache_default_cgroup;
53}
54
55static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
56 struct file *file,
57 char __user *buf, size_t nbytes, loff_t *ppos)
58{
59 char tmp[1024];
169ef1cf
KO
60 int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
61 cgroup_to_bcache(cgrp)->cache_mode + 1);
cafe5635
KO
62
63 if (len < 0)
64 return len;
65
66 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
67}
68
69static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
70 const char *buf)
71{
169ef1cf 72 int v = bch_read_string_list(buf, bch_cache_modes);
cafe5635
KO
73 if (v < 0)
74 return v;
75
76 cgroup_to_bcache(cgrp)->cache_mode = v - 1;
77 return 0;
78}
79
80static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
81{
82 return cgroup_to_bcache(cgrp)->verify;
83}
84
85static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
86{
87 cgroup_to_bcache(cgrp)->verify = val;
88 return 0;
89}
90
91static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
92{
93 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
94 return atomic_read(&bcachecg->stats.cache_hits);
95}
96
97static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
98{
99 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
100 return atomic_read(&bcachecg->stats.cache_misses);
101}
102
103static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
104 struct cftype *cft)
105{
106 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
107 return atomic_read(&bcachecg->stats.cache_bypass_hits);
108}
109
110static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
111 struct cftype *cft)
112{
113 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
114 return atomic_read(&bcachecg->stats.cache_bypass_misses);
115}
116
117static struct cftype bch_files[] = {
118 {
119 .name = "cache_mode",
120 .read = cache_mode_read,
121 .write_string = cache_mode_write,
122 },
123 {
124 .name = "verify",
125 .read_u64 = bch_verify_read,
126 .write_u64 = bch_verify_write,
127 },
128 {
129 .name = "cache_hits",
130 .read_u64 = bch_cache_hits_read,
131 },
132 {
133 .name = "cache_misses",
134 .read_u64 = bch_cache_misses_read,
135 },
136 {
137 .name = "cache_bypass_hits",
138 .read_u64 = bch_cache_bypass_hits_read,
139 },
140 {
141 .name = "cache_bypass_misses",
142 .read_u64 = bch_cache_bypass_misses_read,
143 },
144 { } /* terminate */
145};
146
147static void init_bch_cgroup(struct bch_cgroup *cg)
148{
149 cg->cache_mode = -1;
150}
151
152static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
153{
154 struct bch_cgroup *cg;
155
156 cg = kzalloc(sizeof(*cg), GFP_KERNEL);
157 if (!cg)
158 return ERR_PTR(-ENOMEM);
159 init_bch_cgroup(cg);
160 return &cg->css;
161}
162
163static void bcachecg_destroy(struct cgroup *cgroup)
164{
165 struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
166 free_css_id(&bcache_subsys, &cg->css);
167 kfree(cg);
168}
169
170struct cgroup_subsys bcache_subsys = {
171 .create = bcachecg_create,
172 .destroy = bcachecg_destroy,
173 .subsys_id = bcache_subsys_id,
174 .name = "bcache",
175 .module = THIS_MODULE,
176};
177EXPORT_SYMBOL_GPL(bcache_subsys);
178#endif
179
180static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
181{
182#ifdef CONFIG_CGROUP_BCACHE
183 int r = bch_bio_to_cgroup(bio)->cache_mode;
184 if (r >= 0)
185 return r;
186#endif
187 return BDEV_CACHE_MODE(&dc->sb);
188}
189
190static bool verify(struct cached_dev *dc, struct bio *bio)
191{
192#ifdef CONFIG_CGROUP_BCACHE
193 if (bch_bio_to_cgroup(bio)->verify)
194 return true;
195#endif
196 return dc->verify;
197}
198
199static void bio_csum(struct bio *bio, struct bkey *k)
200{
7988613b
KO
201 struct bio_vec bv;
202 struct bvec_iter iter;
cafe5635 203 uint64_t csum = 0;
cafe5635 204
7988613b
KO
205 bio_for_each_segment(bv, bio, iter) {
206 void *d = kmap(bv.bv_page) + bv.bv_offset;
207 csum = bch_crc64_update(csum, d, bv.bv_len);
208 kunmap(bv.bv_page);
cafe5635
KO
209 }
210
211 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
212}
213
214/* Insert data into cache */
215
a34a8bfd 216static void bch_data_insert_keys(struct closure *cl)
cafe5635 217{
220bb38c 218 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
c18536a7 219 atomic_t *journal_ref = NULL;
220bb38c 220 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
6054c6d4 221 int ret;
cafe5635 222
a34a8bfd
KO
223 /*
224 * If we're looping, might already be waiting on
225 * another journal write - can't wait on more than one journal write at
226 * a time
227 *
228 * XXX: this looks wrong
229 */
230#if 0
231 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
232 closure_sync(&s->cl);
233#endif
cafe5635 234
220bb38c
KO
235 if (!op->replace)
236 journal_ref = bch_journal(op->c, &op->insert_keys,
237 op->flush_journal ? cl : NULL);
cafe5635 238
220bb38c 239 ret = bch_btree_insert(op->c, &op->insert_keys,
6054c6d4
KO
240 journal_ref, replace_key);
241 if (ret == -ESRCH) {
220bb38c 242 op->replace_collision = true;
6054c6d4 243 } else if (ret) {
220bb38c
KO
244 op->error = -ENOMEM;
245 op->insert_data_done = true;
a34a8bfd 246 }
cafe5635 247
c18536a7
KO
248 if (journal_ref)
249 atomic_dec_bug(journal_ref);
cafe5635 250
220bb38c 251 if (!op->insert_data_done)
a34a8bfd 252 continue_at(cl, bch_data_insert_start, bcache_wq);
cafe5635 253
220bb38c 254 bch_keylist_free(&op->insert_keys);
a34a8bfd 255 closure_return(cl);
cafe5635
KO
256}
257
085d2a3d
KO
258static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
259 struct cache_set *c)
260{
261 size_t oldsize = bch_keylist_nkeys(l);
262 size_t newsize = oldsize + u64s;
263
264 /*
265 * The journalling code doesn't handle the case where the keys to insert
266 * is bigger than an empty write: If we just return -ENOMEM here,
267 * bio_insert() and bio_invalidate() will insert the keys created so far
268 * and finish the rest when the keylist is empty.
269 */
270 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
271 return -ENOMEM;
272
273 return __bch_keylist_realloc(l, u64s);
274}
275
a34a8bfd
KO
276static void bch_data_invalidate(struct closure *cl)
277{
220bb38c
KO
278 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
279 struct bio *bio = op->bio;
a34a8bfd
KO
280
281 pr_debug("invalidating %i sectors from %llu",
4f024f37 282 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
a34a8bfd
KO
283
284 while (bio_sectors(bio)) {
81ab4190
KO
285 unsigned sectors = min(bio_sectors(bio),
286 1U << (KEY_SIZE_BITS - 1));
a34a8bfd 287
085d2a3d 288 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
a34a8bfd
KO
289 goto out;
290
4f024f37
KO
291 bio->bi_iter.bi_sector += sectors;
292 bio->bi_iter.bi_size -= sectors << 9;
a34a8bfd 293
220bb38c 294 bch_keylist_add(&op->insert_keys,
4f024f37 295 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
a34a8bfd
KO
296 }
297
220bb38c 298 op->insert_data_done = true;
a34a8bfd
KO
299 bio_put(bio);
300out:
301 continue_at(cl, bch_data_insert_keys, bcache_wq);
302}
303
304static void bch_data_insert_error(struct closure *cl)
cafe5635 305{
220bb38c 306 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cafe5635
KO
307
308 /*
309 * Our data write just errored, which means we've got a bunch of keys to
310 * insert that point to data that wasn't succesfully written.
311 *
312 * We don't have to insert those keys but we still have to invalidate
313 * that region of the cache - so, if we just strip off all the pointers
314 * from the keys we'll accomplish just that.
315 */
316
220bb38c 317 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
cafe5635 318
220bb38c 319 while (src != op->insert_keys.top) {
cafe5635
KO
320 struct bkey *n = bkey_next(src);
321
322 SET_KEY_PTRS(src, 0);
c2f95ae2 323 memmove(dst, src, bkey_bytes(src));
cafe5635
KO
324
325 dst = bkey_next(dst);
326 src = n;
327 }
328
220bb38c 329 op->insert_keys.top = dst;
cafe5635 330
a34a8bfd 331 bch_data_insert_keys(cl);
cafe5635
KO
332}
333
a34a8bfd 334static void bch_data_insert_endio(struct bio *bio, int error)
cafe5635
KO
335{
336 struct closure *cl = bio->bi_private;
220bb38c 337 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cafe5635
KO
338
339 if (error) {
340 /* TODO: We could try to recover from this. */
220bb38c
KO
341 if (op->writeback)
342 op->error = error;
343 else if (!op->replace)
a34a8bfd 344 set_closure_fn(cl, bch_data_insert_error, bcache_wq);
cafe5635
KO
345 else
346 set_closure_fn(cl, NULL, NULL);
347 }
348
220bb38c 349 bch_bbio_endio(op->c, bio, error, "writing data to cache");
cafe5635
KO
350}
351
a34a8bfd 352static void bch_data_insert_start(struct closure *cl)
cafe5635 353{
220bb38c
KO
354 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
355 struct bio *bio = op->bio, *n;
cafe5635 356
220bb38c 357 if (op->bypass)
a34a8bfd 358 return bch_data_invalidate(cl);
cafe5635 359
220bb38c
KO
360 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
361 set_gc_sectors(op->c);
362 wake_up_gc(op->c);
cafe5635
KO
363 }
364
54d12f2b
KO
365 /*
366 * Journal writes are marked REQ_FLUSH; if the original write was a
367 * flush, it'll wait on the journal write.
368 */
369 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
370
cafe5635
KO
371 do {
372 unsigned i;
373 struct bkey *k;
220bb38c 374 struct bio_set *split = op->c->bio_split;
cafe5635
KO
375
376 /* 1 for the device pointer and 1 for the chksum */
220bb38c 377 if (bch_keylist_realloc(&op->insert_keys,
085d2a3d 378 3 + (op->csum ? 1 : 0),
220bb38c 379 op->c))
a34a8bfd 380 continue_at(cl, bch_data_insert_keys, bcache_wq);
cafe5635 381
220bb38c 382 k = op->insert_keys.top;
cafe5635 383 bkey_init(k);
220bb38c 384 SET_KEY_INODE(k, op->inode);
4f024f37 385 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
cafe5635 386
2599b53b
KO
387 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
388 op->write_point, op->write_prio,
389 op->writeback))
cafe5635
KO
390 goto err;
391
20d0189b 392 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
cafe5635 393
a34a8bfd 394 n->bi_end_io = bch_data_insert_endio;
cafe5635
KO
395 n->bi_private = cl;
396
220bb38c 397 if (op->writeback) {
cafe5635
KO
398 SET_KEY_DIRTY(k, true);
399
400 for (i = 0; i < KEY_PTRS(k); i++)
220bb38c 401 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
cafe5635
KO
402 GC_MARK_DIRTY);
403 }
404
220bb38c 405 SET_KEY_CSUM(k, op->csum);
cafe5635
KO
406 if (KEY_CSUM(k))
407 bio_csum(n, k);
408
c37511b8 409 trace_bcache_cache_insert(k);
220bb38c 410 bch_keylist_push(&op->insert_keys);
cafe5635 411
cafe5635 412 n->bi_rw |= REQ_WRITE;
220bb38c 413 bch_submit_bbio(n, op->c, k, 0);
cafe5635
KO
414 } while (n != bio);
415
220bb38c 416 op->insert_data_done = true;
a34a8bfd 417 continue_at(cl, bch_data_insert_keys, bcache_wq);
cafe5635
KO
418err:
419 /* bch_alloc_sectors() blocks if s->writeback = true */
220bb38c 420 BUG_ON(op->writeback);
cafe5635
KO
421
422 /*
423 * But if it's not a writeback write we'd rather just bail out if
424 * there aren't any buckets ready to write to - it might take awhile and
425 * we might be starving btree writes for gc or something.
426 */
427
220bb38c 428 if (!op->replace) {
cafe5635
KO
429 /*
430 * Writethrough write: We can't complete the write until we've
431 * updated the index. But we don't want to delay the write while
432 * we wait for buckets to be freed up, so just invalidate the
433 * rest of the write.
434 */
220bb38c 435 op->bypass = true;
a34a8bfd 436 return bch_data_invalidate(cl);
cafe5635
KO
437 } else {
438 /*
439 * From a cache miss, we can just insert the keys for the data
440 * we have written or bail out if we didn't do anything.
441 */
220bb38c 442 op->insert_data_done = true;
cafe5635
KO
443 bio_put(bio);
444
220bb38c 445 if (!bch_keylist_empty(&op->insert_keys))
a34a8bfd 446 continue_at(cl, bch_data_insert_keys, bcache_wq);
cafe5635
KO
447 else
448 closure_return(cl);
449 }
450}
451
452/**
a34a8bfd 453 * bch_data_insert - stick some data in the cache
cafe5635
KO
454 *
455 * This is the starting point for any data to end up in a cache device; it could
456 * be from a normal write, or a writeback write, or a write to a flash only
457 * volume - it's also used by the moving garbage collector to compact data in
458 * mostly empty buckets.
459 *
460 * It first writes the data to the cache, creating a list of keys to be inserted
461 * (if the data had to be fragmented there will be multiple keys); after the
462 * data is written it calls bch_journal, and after the keys have been added to
463 * the next journal write they're inserted into the btree.
464 *
c18536a7 465 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
cafe5635
KO
466 * and op->inode is used for the key inode.
467 *
c18536a7
KO
468 * If s->bypass is true, instead of inserting the data it invalidates the
469 * region of the cache represented by s->cache_bio and op->inode.
cafe5635 470 */
a34a8bfd 471void bch_data_insert(struct closure *cl)
cafe5635 472{
220bb38c 473 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cafe5635 474
220bb38c
KO
475 trace_bcache_write(op->bio, op->writeback, op->bypass);
476
477 bch_keylist_init(&op->insert_keys);
478 bio_get(op->bio);
a34a8bfd 479 bch_data_insert_start(cl);
cafe5635
KO
480}
481
220bb38c
KO
482/* Congested? */
483
484unsigned bch_get_congested(struct cache_set *c)
485{
486 int i;
487 long rand;
488
489 if (!c->congested_read_threshold_us &&
490 !c->congested_write_threshold_us)
491 return 0;
492
493 i = (local_clock_us() - c->congested_last_us) / 1024;
494 if (i < 0)
495 return 0;
496
497 i += atomic_read(&c->congested);
498 if (i >= 0)
499 return 0;
500
501 i += CONGESTED_MAX;
502
503 if (i > 0)
504 i = fract_exp_two(i, 6);
505
506 rand = get_random_int();
507 i -= bitmap_weight(&rand, BITS_PER_LONG);
508
509 return i > 0 ? i : 1;
510}
511
512static void add_sequential(struct task_struct *t)
513{
514 ewma_add(t->sequential_io_avg,
515 t->sequential_io, 8, 0);
516
517 t->sequential_io = 0;
518}
519
520static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
521{
522 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
523}
524
525static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
526{
527 struct cache_set *c = dc->disk.c;
528 unsigned mode = cache_mode(dc, bio);
529 unsigned sectors, congested = bch_get_congested(c);
530 struct task_struct *task = current;
8aee1220 531 struct io *i;
220bb38c 532
c4d951dd 533 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
220bb38c
KO
534 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
535 (bio->bi_rw & REQ_DISCARD))
536 goto skip;
537
538 if (mode == CACHE_MODE_NONE ||
539 (mode == CACHE_MODE_WRITEAROUND &&
540 (bio->bi_rw & REQ_WRITE)))
541 goto skip;
542
4f024f37 543 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
220bb38c
KO
544 bio_sectors(bio) & (c->sb.block_size - 1)) {
545 pr_debug("skipping unaligned io");
546 goto skip;
547 }
548
5ceaaad7
KO
549 if (bypass_torture_test(dc)) {
550 if ((get_random_int() & 3) == 3)
551 goto skip;
552 else
553 goto rescale;
554 }
555
220bb38c
KO
556 if (!congested && !dc->sequential_cutoff)
557 goto rescale;
558
559 if (!congested &&
560 mode == CACHE_MODE_WRITEBACK &&
561 (bio->bi_rw & REQ_WRITE) &&
562 (bio->bi_rw & REQ_SYNC))
563 goto rescale;
564
8aee1220 565 spin_lock(&dc->io_lock);
220bb38c 566
4f024f37
KO
567 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
568 if (i->last == bio->bi_iter.bi_sector &&
8aee1220
KO
569 time_before(jiffies, i->jiffies))
570 goto found;
220bb38c 571
8aee1220 572 i = list_first_entry(&dc->io_lru, struct io, lru);
220bb38c 573
8aee1220
KO
574 add_sequential(task);
575 i->sequential = 0;
220bb38c 576found:
4f024f37
KO
577 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
578 i->sequential += bio->bi_iter.bi_size;
220bb38c 579
8aee1220
KO
580 i->last = bio_end_sector(bio);
581 i->jiffies = jiffies + msecs_to_jiffies(5000);
582 task->sequential_io = i->sequential;
220bb38c 583
8aee1220
KO
584 hlist_del(&i->hash);
585 hlist_add_head(&i->hash, iohash(dc, i->last));
586 list_move_tail(&i->lru, &dc->io_lru);
220bb38c 587
8aee1220 588 spin_unlock(&dc->io_lock);
220bb38c
KO
589
590 sectors = max(task->sequential_io,
591 task->sequential_io_avg) >> 9;
592
593 if (dc->sequential_cutoff &&
594 sectors >= dc->sequential_cutoff >> 9) {
595 trace_bcache_bypass_sequential(bio);
596 goto skip;
597 }
598
599 if (congested && sectors >= congested) {
600 trace_bcache_bypass_congested(bio);
601 goto skip;
602 }
603
604rescale:
605 bch_rescale_priorities(c, bio_sectors(bio));
606 return false;
607skip:
608 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
609 return true;
610}
611
2c1953e2 612/* Cache lookup */
cafe5635 613
220bb38c
KO
614struct search {
615 /* Stack frame for bio_complete */
616 struct closure cl;
617
220bb38c
KO
618 struct bbio bio;
619 struct bio *orig_bio;
620 struct bio *cache_miss;
a5ae4300 621 struct bcache_device *d;
220bb38c
KO
622
623 unsigned insert_bio_sectors;
220bb38c 624 unsigned recoverable:1;
220bb38c 625 unsigned write:1;
5ceaaad7 626 unsigned read_dirty_data:1;
220bb38c
KO
627
628 unsigned long start_time;
629
630 struct btree_op op;
631 struct data_insert_op iop;
632};
633
2c1953e2 634static void bch_cache_read_endio(struct bio *bio, int error)
cafe5635
KO
635{
636 struct bbio *b = container_of(bio, struct bbio, bio);
637 struct closure *cl = bio->bi_private;
638 struct search *s = container_of(cl, struct search, cl);
639
640 /*
641 * If the bucket was reused while our bio was in flight, we might have
642 * read the wrong data. Set s->error but not error so it doesn't get
643 * counted against the cache device, but we'll still reread the data
644 * from the backing device.
645 */
646
647 if (error)
220bb38c 648 s->iop.error = error;
d56d000a
KO
649 else if (!KEY_DIRTY(&b->key) &&
650 ptr_stale(s->iop.c, &b->key, 0)) {
220bb38c
KO
651 atomic_long_inc(&s->iop.c->cache_read_races);
652 s->iop.error = -EINTR;
cafe5635
KO
653 }
654
220bb38c 655 bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
cafe5635
KO
656}
657
2c1953e2
KO
658/*
659 * Read from a single key, handling the initial cache miss if the key starts in
660 * the middle of the bio
661 */
cc231966 662static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
2c1953e2
KO
663{
664 struct search *s = container_of(op, struct search, op);
cc231966
KO
665 struct bio *n, *bio = &s->bio.bio;
666 struct bkey *bio_key;
2c1953e2 667 unsigned ptr;
2c1953e2 668
4f024f37 669 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
cc231966
KO
670 return MAP_CONTINUE;
671
220bb38c 672 if (KEY_INODE(k) != s->iop.inode ||
4f024f37 673 KEY_START(k) > bio->bi_iter.bi_sector) {
cc231966 674 unsigned bio_sectors = bio_sectors(bio);
220bb38c 675 unsigned sectors = KEY_INODE(k) == s->iop.inode
cc231966 676 ? min_t(uint64_t, INT_MAX,
4f024f37 677 KEY_START(k) - bio->bi_iter.bi_sector)
cc231966
KO
678 : INT_MAX;
679
680 int ret = s->d->cache_miss(b, s, bio, sectors);
681 if (ret != MAP_CONTINUE)
682 return ret;
683
684 /* if this was a complete miss we shouldn't get here */
685 BUG_ON(bio_sectors <= sectors);
686 }
687
688 if (!KEY_SIZE(k))
689 return MAP_CONTINUE;
2c1953e2
KO
690
691 /* XXX: figure out best pointer - for multiple cache devices */
692 ptr = 0;
693
694 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
695
5ceaaad7
KO
696 if (KEY_DIRTY(k))
697 s->read_dirty_data = true;
698
20d0189b
KO
699 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
700 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
701 GFP_NOIO, s->d->bio_split);
2c1953e2 702
cc231966
KO
703 bio_key = &container_of(n, struct bbio, bio)->key;
704 bch_bkey_copy_single_ptr(bio_key, k, ptr);
2c1953e2 705
4f024f37 706 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
220bb38c 707 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
2c1953e2 708
cc231966
KO
709 n->bi_end_io = bch_cache_read_endio;
710 n->bi_private = &s->cl;
2c1953e2 711
cc231966
KO
712 /*
713 * The bucket we're reading from might be reused while our bio
714 * is in flight, and we could then end up reading the wrong
715 * data.
716 *
717 * We guard against this by checking (in cache_read_endio()) if
718 * the pointer is stale again; if so, we treat it as an error
719 * and reread from the backing device (but we don't pass that
720 * error up anywhere).
721 */
2c1953e2 722
cc231966
KO
723 __bch_submit_bbio(n, b->c);
724 return n == bio ? MAP_DONE : MAP_CONTINUE;
2c1953e2
KO
725}
726
727static void cache_lookup(struct closure *cl)
728{
220bb38c 729 struct search *s = container_of(cl, struct search, iop.cl);
2c1953e2 730 struct bio *bio = &s->bio.bio;
a5ae4300 731 int ret;
2c1953e2 732
a5ae4300
KO
733 bch_btree_op_init(&s->op, -1);
734
735 ret = bch_btree_map_keys(&s->op, s->iop.c,
736 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
737 cache_lookup_fn, MAP_END_KEY);
2c1953e2
KO
738 if (ret == -EAGAIN)
739 continue_at(cl, cache_lookup, bcache_wq);
740
741 closure_return(cl);
742}
743
744/* Common code for the make_request functions */
745
746static void request_endio(struct bio *bio, int error)
747{
748 struct closure *cl = bio->bi_private;
749
750 if (error) {
751 struct search *s = container_of(cl, struct search, cl);
220bb38c 752 s->iop.error = error;
2c1953e2
KO
753 /* Only cache read errors are recoverable */
754 s->recoverable = false;
755 }
756
757 bio_put(bio);
758 closure_put(cl);
759}
760
cafe5635
KO
761static void bio_complete(struct search *s)
762{
763 if (s->orig_bio) {
764 int cpu, rw = bio_data_dir(s->orig_bio);
765 unsigned long duration = jiffies - s->start_time;
766
767 cpu = part_stat_lock();
768 part_round_stats(cpu, &s->d->disk->part0);
769 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
770 part_stat_unlock();
771
220bb38c
KO
772 trace_bcache_request_end(s->d, s->orig_bio);
773 bio_endio(s->orig_bio, s->iop.error);
cafe5635
KO
774 s->orig_bio = NULL;
775 }
776}
777
a5ae4300 778static void do_bio_hook(struct search *s, struct bio *orig_bio)
cafe5635
KO
779{
780 struct bio *bio = &s->bio.bio;
cafe5635 781
ed9c47be 782 bio_init(bio);
a5ae4300 783 __bio_clone_fast(bio, orig_bio);
cafe5635
KO
784 bio->bi_end_io = request_endio;
785 bio->bi_private = &s->cl;
ed9c47be 786
cafe5635
KO
787 atomic_set(&bio->bi_cnt, 3);
788}
789
790static void search_free(struct closure *cl)
791{
792 struct search *s = container_of(cl, struct search, cl);
793 bio_complete(s);
794
220bb38c
KO
795 if (s->iop.bio)
796 bio_put(s->iop.bio);
cafe5635 797
cafe5635
KO
798 closure_debug_destroy(cl);
799 mempool_free(s, s->d->c->search);
800}
801
a5ae4300
KO
802static inline struct search *search_alloc(struct bio *bio,
803 struct bcache_device *d)
cafe5635 804{
0b93207a 805 struct search *s;
0b93207a
KO
806
807 s = mempool_alloc(d->c->search, GFP_NOIO);
cafe5635 808
a5ae4300
KO
809 closure_init(&s->cl, NULL);
810 do_bio_hook(s, bio);
cafe5635 811
cafe5635 812 s->orig_bio = bio;
a5ae4300
KO
813 s->cache_miss = NULL;
814 s->d = d;
cafe5635 815 s->recoverable = 1;
a5ae4300
KO
816 s->write = (bio->bi_rw & REQ_WRITE) != 0;
817 s->read_dirty_data = 0;
cafe5635 818 s->start_time = jiffies;
a5ae4300
KO
819
820 s->iop.c = d->c;
821 s->iop.bio = NULL;
822 s->iop.inode = d->id;
823 s->iop.write_point = hash_long((unsigned long) current, 16);
824 s->iop.write_prio = 0;
825 s->iop.error = 0;
826 s->iop.flags = 0;
827 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
cafe5635 828
cafe5635
KO
829 return s;
830}
831
cafe5635
KO
832/* Cached devices */
833
834static void cached_dev_bio_complete(struct closure *cl)
835{
836 struct search *s = container_of(cl, struct search, cl);
837 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
838
839 search_free(cl);
840 cached_dev_put(dc);
841}
842
843/* Process reads */
844
cdd972b1 845static void cached_dev_cache_miss_done(struct closure *cl)
cafe5635
KO
846{
847 struct search *s = container_of(cl, struct search, cl);
848
220bb38c
KO
849 if (s->iop.replace_collision)
850 bch_mark_cache_miss_collision(s->iop.c, s->d);
cafe5635 851
220bb38c 852 if (s->iop.bio) {
cafe5635
KO
853 int i;
854 struct bio_vec *bv;
855
220bb38c 856 bio_for_each_segment_all(bv, s->iop.bio, i)
cafe5635
KO
857 __free_page(bv->bv_page);
858 }
859
860 cached_dev_bio_complete(cl);
861}
862
cdd972b1 863static void cached_dev_read_error(struct closure *cl)
cafe5635
KO
864{
865 struct search *s = container_of(cl, struct search, cl);
cdd972b1 866 struct bio *bio = &s->bio.bio;
cafe5635
KO
867
868 if (s->recoverable) {
c37511b8
KO
869 /* Retry from the backing device: */
870 trace_bcache_read_retry(s->orig_bio);
cafe5635 871
220bb38c 872 s->iop.error = 0;
a5ae4300 873 do_bio_hook(s, s->orig_bio);
cafe5635
KO
874
875 /* XXX: invalidate cache */
876
cdd972b1 877 closure_bio_submit(bio, cl, s->d);
cafe5635
KO
878 }
879
cdd972b1 880 continue_at(cl, cached_dev_cache_miss_done, NULL);
cafe5635
KO
881}
882
cdd972b1 883static void cached_dev_read_done(struct closure *cl)
cafe5635
KO
884{
885 struct search *s = container_of(cl, struct search, cl);
886 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
887
888 /*
cdd972b1
KO
889 * We had a cache miss; cache_bio now contains data ready to be inserted
890 * into the cache.
cafe5635
KO
891 *
892 * First, we copy the data we just read from cache_bio's bounce buffers
893 * to the buffers the original bio pointed to:
894 */
895
220bb38c
KO
896 if (s->iop.bio) {
897 bio_reset(s->iop.bio);
4f024f37 898 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
220bb38c 899 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
4f024f37 900 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
220bb38c 901 bch_bio_map(s->iop.bio, NULL);
cafe5635 902
220bb38c 903 bio_copy_data(s->cache_miss, s->iop.bio);
cafe5635
KO
904
905 bio_put(s->cache_miss);
906 s->cache_miss = NULL;
907 }
908
ed9c47be 909 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
220bb38c 910 bch_data_verify(dc, s->orig_bio);
cafe5635
KO
911
912 bio_complete(s);
913
220bb38c
KO
914 if (s->iop.bio &&
915 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
916 BUG_ON(!s->iop.replace);
917 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
cafe5635
KO
918 }
919
cdd972b1 920 continue_at(cl, cached_dev_cache_miss_done, NULL);
cafe5635
KO
921}
922
cdd972b1 923static void cached_dev_read_done_bh(struct closure *cl)
cafe5635
KO
924{
925 struct search *s = container_of(cl, struct search, cl);
926 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
927
220bb38c
KO
928 bch_mark_cache_accounting(s->iop.c, s->d,
929 !s->cache_miss, s->iop.bypass);
930 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
cafe5635 931
220bb38c 932 if (s->iop.error)
cdd972b1 933 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
220bb38c 934 else if (s->iop.bio || verify(dc, &s->bio.bio))
cdd972b1 935 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
cafe5635 936 else
cdd972b1 937 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
cafe5635
KO
938}
939
940static int cached_dev_cache_miss(struct btree *b, struct search *s,
941 struct bio *bio, unsigned sectors)
942{
2c1953e2 943 int ret = MAP_CONTINUE;
e7c590eb 944 unsigned reada = 0;
cafe5635 945 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
cdd972b1 946 struct bio *miss, *cache_bio;
cafe5635 947
220bb38c 948 if (s->cache_miss || s->iop.bypass) {
20d0189b 949 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
2c1953e2 950 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
e7c590eb
KO
951 goto out_submit;
952 }
cafe5635 953
e7c590eb
KO
954 if (!(bio->bi_rw & REQ_RAHEAD) &&
955 !(bio->bi_rw & REQ_META) &&
220bb38c 956 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
e7c590eb
KO
957 reada = min_t(sector_t, dc->readahead >> 9,
958 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
cafe5635 959
220bb38c 960 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
cafe5635 961
220bb38c 962 s->iop.replace_key = KEY(s->iop.inode,
4f024f37 963 bio->bi_iter.bi_sector + s->insert_bio_sectors,
220bb38c 964 s->insert_bio_sectors);
e7c590eb 965
220bb38c 966 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
e7c590eb
KO
967 if (ret)
968 return ret;
969
220bb38c 970 s->iop.replace = true;
1b207d80 971
20d0189b 972 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
2c1953e2
KO
973
974 /* btree_search_recurse()'s btree iterator is no good anymore */
975 ret = miss == bio ? MAP_DONE : -EINTR;
cafe5635 976
cdd972b1 977 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
220bb38c 978 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
cafe5635 979 dc->disk.bio_split);
cdd972b1 980 if (!cache_bio)
cafe5635
KO
981 goto out_submit;
982
4f024f37
KO
983 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
984 cache_bio->bi_bdev = miss->bi_bdev;
985 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cafe5635 986
cdd972b1
KO
987 cache_bio->bi_end_io = request_endio;
988 cache_bio->bi_private = &s->cl;
cafe5635 989
cdd972b1
KO
990 bch_bio_map(cache_bio, NULL);
991 if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
cafe5635
KO
992 goto out_put;
993
220bb38c
KO
994 if (reada)
995 bch_mark_cache_readahead(s->iop.c, s->d);
996
cdd972b1 997 s->cache_miss = miss;
220bb38c 998 s->iop.bio = cache_bio;
cdd972b1
KO
999 bio_get(cache_bio);
1000 closure_bio_submit(cache_bio, &s->cl, s->d);
cafe5635
KO
1001
1002 return ret;
1003out_put:
cdd972b1 1004 bio_put(cache_bio);
cafe5635 1005out_submit:
e7c590eb
KO
1006 miss->bi_end_io = request_endio;
1007 miss->bi_private = &s->cl;
cafe5635
KO
1008 closure_bio_submit(miss, &s->cl, s->d);
1009 return ret;
1010}
1011
cdd972b1 1012static void cached_dev_read(struct cached_dev *dc, struct search *s)
cafe5635
KO
1013{
1014 struct closure *cl = &s->cl;
1015
220bb38c 1016 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
cdd972b1 1017 continue_at(cl, cached_dev_read_done_bh, NULL);
cafe5635
KO
1018}
1019
1020/* Process writes */
1021
1022static void cached_dev_write_complete(struct closure *cl)
1023{
1024 struct search *s = container_of(cl, struct search, cl);
1025 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1026
1027 up_read_non_owner(&dc->writeback_lock);
1028 cached_dev_bio_complete(cl);
1029}
1030
cdd972b1 1031static void cached_dev_write(struct cached_dev *dc, struct search *s)
cafe5635
KO
1032{
1033 struct closure *cl = &s->cl;
1034 struct bio *bio = &s->bio.bio;
4f024f37 1035 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
84f0db03 1036 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
cafe5635 1037
220bb38c 1038 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
cafe5635 1039
cafe5635 1040 down_read_non_owner(&dc->writeback_lock);
cafe5635 1041 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
84f0db03
KO
1042 /*
1043 * We overlap with some dirty data undergoing background
1044 * writeback, force this write to writeback
1045 */
220bb38c
KO
1046 s->iop.bypass = false;
1047 s->iop.writeback = true;
cafe5635
KO
1048 }
1049
84f0db03
KO
1050 /*
1051 * Discards aren't _required_ to do anything, so skipping if
1052 * check_overlapping returned true is ok
1053 *
1054 * But check_overlapping drops dirty keys for which io hasn't started,
1055 * so we still want to call it.
1056 */
cafe5635 1057 if (bio->bi_rw & REQ_DISCARD)
220bb38c 1058 s->iop.bypass = true;
cafe5635 1059
72c27061
KO
1060 if (should_writeback(dc, s->orig_bio,
1061 cache_mode(dc, bio),
220bb38c
KO
1062 s->iop.bypass)) {
1063 s->iop.bypass = false;
1064 s->iop.writeback = true;
72c27061
KO
1065 }
1066
220bb38c
KO
1067 if (s->iop.bypass) {
1068 s->iop.bio = s->orig_bio;
1069 bio_get(s->iop.bio);
cafe5635 1070
84f0db03
KO
1071 if (!(bio->bi_rw & REQ_DISCARD) ||
1072 blk_queue_discard(bdev_get_queue(dc->bdev)))
1073 closure_bio_submit(bio, cl, s->d);
220bb38c 1074 } else if (s->iop.writeback) {
279afbad 1075 bch_writeback_add(dc);
220bb38c 1076 s->iop.bio = bio;
e49c7c37 1077
c0f04d88 1078 if (bio->bi_rw & REQ_FLUSH) {
e49c7c37 1079 /* Also need to send a flush to the backing device */
d4eddd42 1080 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
c0f04d88 1081 dc->disk.bio_split);
e49c7c37 1082
c0f04d88
KO
1083 flush->bi_rw = WRITE_FLUSH;
1084 flush->bi_bdev = bio->bi_bdev;
1085 flush->bi_end_io = request_endio;
1086 flush->bi_private = cl;
1087
1088 closure_bio_submit(flush, cl, s->d);
e49c7c37 1089 }
84f0db03 1090 } else {
59d276fe 1091 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
84f0db03
KO
1092
1093 closure_bio_submit(bio, cl, s->d);
cafe5635 1094 }
84f0db03 1095
220bb38c 1096 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
cafe5635 1097 continue_at(cl, cached_dev_write_complete, NULL);
cafe5635
KO
1098}
1099
a34a8bfd 1100static void cached_dev_nodata(struct closure *cl)
cafe5635 1101{
a34a8bfd 1102 struct search *s = container_of(cl, struct search, cl);
cafe5635
KO
1103 struct bio *bio = &s->bio.bio;
1104
220bb38c
KO
1105 if (s->iop.flush_journal)
1106 bch_journal_meta(s->iop.c, cl);
cafe5635 1107
84f0db03 1108 /* If it's a flush, we send the flush to the backing device too */
cafe5635
KO
1109 closure_bio_submit(bio, cl, s->d);
1110
1111 continue_at(cl, cached_dev_bio_complete, NULL);
1112}
1113
1114/* Cached devices - read & write stuff */
1115
cafe5635
KO
1116static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1117{
1118 struct search *s;
1119 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1120 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1121 int cpu, rw = bio_data_dir(bio);
1122
1123 cpu = part_stat_lock();
1124 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1125 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1126 part_stat_unlock();
1127
1128 bio->bi_bdev = dc->bdev;
4f024f37 1129 bio->bi_iter.bi_sector += dc->sb.data_offset;
cafe5635
KO
1130
1131 if (cached_dev_get(dc)) {
1132 s = search_alloc(bio, d);
220bb38c 1133 trace_bcache_request_start(s->d, bio);
cafe5635 1134
4f024f37 1135 if (!bio->bi_iter.bi_size) {
a34a8bfd
KO
1136 /*
1137 * can't call bch_journal_meta from under
1138 * generic_make_request
1139 */
1140 continue_at_nobarrier(&s->cl,
1141 cached_dev_nodata,
1142 bcache_wq);
1143 } else {
220bb38c 1144 s->iop.bypass = check_should_bypass(dc, bio);
84f0db03
KO
1145
1146 if (rw)
cdd972b1 1147 cached_dev_write(dc, s);
84f0db03 1148 else
cdd972b1 1149 cached_dev_read(dc, s);
84f0db03 1150 }
cafe5635
KO
1151 } else {
1152 if ((bio->bi_rw & REQ_DISCARD) &&
1153 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1154 bio_endio(bio, 0);
1155 else
1156 bch_generic_make_request(bio, &d->bio_split_hook);
1157 }
1158}
1159
1160static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1161 unsigned int cmd, unsigned long arg)
1162{
1163 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1164 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1165}
1166
1167static int cached_dev_congested(void *data, int bits)
1168{
1169 struct bcache_device *d = data;
1170 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1171 struct request_queue *q = bdev_get_queue(dc->bdev);
1172 int ret = 0;
1173
1174 if (bdi_congested(&q->backing_dev_info, bits))
1175 return 1;
1176
1177 if (cached_dev_get(dc)) {
1178 unsigned i;
1179 struct cache *ca;
1180
1181 for_each_cache(ca, d->c, i) {
1182 q = bdev_get_queue(ca->bdev);
1183 ret |= bdi_congested(&q->backing_dev_info, bits);
1184 }
1185
1186 cached_dev_put(dc);
1187 }
1188
1189 return ret;
1190}
1191
1192void bch_cached_dev_request_init(struct cached_dev *dc)
1193{
1194 struct gendisk *g = dc->disk.disk;
1195
1196 g->queue->make_request_fn = cached_dev_make_request;
1197 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1198 dc->disk.cache_miss = cached_dev_cache_miss;
1199 dc->disk.ioctl = cached_dev_ioctl;
1200}
1201
1202/* Flash backed devices */
1203
1204static int flash_dev_cache_miss(struct btree *b, struct search *s,
1205 struct bio *bio, unsigned sectors)
1206{
7988613b
KO
1207 struct bio_vec bv;
1208 struct bvec_iter iter;
8e51e414 1209
cafe5635
KO
1210 /* Zero fill bio */
1211
7988613b
KO
1212 bio_for_each_segment(bv, bio, iter) {
1213 unsigned j = min(bv.bv_len >> 9, sectors);
cafe5635 1214
7988613b
KO
1215 void *p = kmap(bv.bv_page);
1216 memset(p + bv.bv_offset, 0, j << 9);
1217 kunmap(bv.bv_page);
cafe5635 1218
8e51e414 1219 sectors -= j;
cafe5635
KO
1220 }
1221
4f024f37 1222 bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
8e51e414 1223
4f024f37 1224 if (!bio->bi_iter.bi_size)
2c1953e2 1225 return MAP_DONE;
cafe5635 1226
2c1953e2 1227 return MAP_CONTINUE;
cafe5635
KO
1228}
1229
a34a8bfd
KO
1230static void flash_dev_nodata(struct closure *cl)
1231{
1232 struct search *s = container_of(cl, struct search, cl);
1233
220bb38c
KO
1234 if (s->iop.flush_journal)
1235 bch_journal_meta(s->iop.c, cl);
a34a8bfd
KO
1236
1237 continue_at(cl, search_free, NULL);
1238}
1239
cafe5635
KO
1240static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1241{
1242 struct search *s;
1243 struct closure *cl;
1244 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1245 int cpu, rw = bio_data_dir(bio);
1246
1247 cpu = part_stat_lock();
1248 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1249 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1250 part_stat_unlock();
1251
1252 s = search_alloc(bio, d);
1253 cl = &s->cl;
1254 bio = &s->bio.bio;
1255
220bb38c 1256 trace_bcache_request_start(s->d, bio);
cafe5635 1257
4f024f37 1258 if (!bio->bi_iter.bi_size) {
a34a8bfd
KO
1259 /*
1260 * can't call bch_journal_meta from under
1261 * generic_make_request
1262 */
1263 continue_at_nobarrier(&s->cl,
1264 flash_dev_nodata,
1265 bcache_wq);
84f0db03 1266 } else if (rw) {
220bb38c 1267 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
4f024f37 1268 &KEY(d->id, bio->bi_iter.bi_sector, 0),
8e51e414 1269 &KEY(d->id, bio_end_sector(bio), 0));
cafe5635 1270
220bb38c
KO
1271 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
1272 s->iop.writeback = true;
1273 s->iop.bio = bio;
cafe5635 1274
220bb38c 1275 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
cafe5635 1276 } else {
220bb38c 1277 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
cafe5635
KO
1278 }
1279
1280 continue_at(cl, search_free, NULL);
1281}
1282
1283static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1284 unsigned int cmd, unsigned long arg)
1285{
1286 return -ENOTTY;
1287}
1288
1289static int flash_dev_congested(void *data, int bits)
1290{
1291 struct bcache_device *d = data;
1292 struct request_queue *q;
1293 struct cache *ca;
1294 unsigned i;
1295 int ret = 0;
1296
1297 for_each_cache(ca, d->c, i) {
1298 q = bdev_get_queue(ca->bdev);
1299 ret |= bdi_congested(&q->backing_dev_info, bits);
1300 }
1301
1302 return ret;
1303}
1304
1305void bch_flash_dev_request_init(struct bcache_device *d)
1306{
1307 struct gendisk *g = d->disk;
1308
1309 g->queue->make_request_fn = flash_dev_make_request;
1310 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1311 d->cache_miss = flash_dev_cache_miss;
1312 d->ioctl = flash_dev_ioctl;
1313}
1314
1315void bch_request_exit(void)
1316{
1317#ifdef CONFIG_CGROUP_BCACHE
1318 cgroup_unload_subsys(&bcache_subsys);
1319#endif
1320 if (bch_search_cache)
1321 kmem_cache_destroy(bch_search_cache);
1322}
1323
1324int __init bch_request_init(void)
1325{
1326 bch_search_cache = KMEM_CACHE(search, 0);
1327 if (!bch_search_cache)
1328 return -ENOMEM;
1329
1330#ifdef CONFIG_CGROUP_BCACHE
1331 cgroup_load_subsys(&bcache_subsys);
1332 init_bch_cgroup(&bcache_default_cgroup);
1333
1334 cgroup_add_cftypes(&bcache_subsys, bch_files);
1335#endif
1336 return 0;
1337}