Commit | Line | Data |
---|---|---|
ae1519ec MB |
1 | /* |
2 | * Copyright (C) 2015 IT University of Copenhagen | |
3 | * Initial release: Matias Bjorling <m@bjorling.me> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version | |
7 | * 2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs. | |
15 | */ | |
16 | ||
17 | #include "rrpc.h" | |
18 | ||
19 | static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache; | |
20 | static DECLARE_RWSEM(rrpc_lock); | |
21 | ||
22 | static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, | |
23 | struct nvm_rq *rqd, unsigned long flags); | |
24 | ||
25 | #define rrpc_for_each_lun(rrpc, rlun, i) \ | |
26 | for ((i) = 0, rlun = &(rrpc)->luns[0]; \ | |
27 | (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)]) | |
28 | ||
29 | static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a) | |
30 | { | |
31 | struct rrpc_block *rblk = a->rblk; | |
32 | unsigned int pg_offset; | |
33 | ||
34 | lockdep_assert_held(&rrpc->rev_lock); | |
35 | ||
36 | if (a->addr == ADDR_EMPTY || !rblk) | |
37 | return; | |
38 | ||
39 | spin_lock(&rblk->lock); | |
40 | ||
41 | div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset); | |
42 | WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages)); | |
43 | rblk->nr_invalid_pages++; | |
44 | ||
45 | spin_unlock(&rblk->lock); | |
46 | ||
47 | rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY; | |
48 | } | |
49 | ||
50 | static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba, | |
51 | unsigned len) | |
52 | { | |
53 | sector_t i; | |
54 | ||
55 | spin_lock(&rrpc->rev_lock); | |
56 | for (i = slba; i < slba + len; i++) { | |
57 | struct rrpc_addr *gp = &rrpc->trans_map[i]; | |
58 | ||
59 | rrpc_page_invalidate(rrpc, gp); | |
60 | gp->rblk = NULL; | |
61 | } | |
62 | spin_unlock(&rrpc->rev_lock); | |
63 | } | |
64 | ||
65 | static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc, | |
66 | sector_t laddr, unsigned int pages) | |
67 | { | |
68 | struct nvm_rq *rqd; | |
69 | struct rrpc_inflight_rq *inf; | |
70 | ||
71 | rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC); | |
72 | if (!rqd) | |
73 | return ERR_PTR(-ENOMEM); | |
74 | ||
75 | inf = rrpc_get_inflight_rq(rqd); | |
76 | if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) { | |
77 | mempool_free(rqd, rrpc->rq_pool); | |
78 | return NULL; | |
79 | } | |
80 | ||
81 | return rqd; | |
82 | } | |
83 | ||
84 | static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd) | |
85 | { | |
86 | struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd); | |
87 | ||
88 | rrpc_unlock_laddr(rrpc, inf); | |
89 | ||
90 | mempool_free(rqd, rrpc->rq_pool); | |
91 | } | |
92 | ||
93 | static void rrpc_discard(struct rrpc *rrpc, struct bio *bio) | |
94 | { | |
95 | sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG; | |
96 | sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE; | |
97 | struct nvm_rq *rqd; | |
98 | ||
99 | do { | |
100 | rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len); | |
101 | schedule(); | |
102 | } while (!rqd); | |
103 | ||
104 | if (IS_ERR(rqd)) { | |
105 | pr_err("rrpc: unable to acquire inflight IO\n"); | |
106 | bio_io_error(bio); | |
107 | return; | |
108 | } | |
109 | ||
110 | rrpc_invalidate_range(rrpc, slba, len); | |
111 | rrpc_inflight_laddr_release(rrpc, rqd); | |
112 | } | |
113 | ||
114 | static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk) | |
115 | { | |
116 | return (rblk->next_page == rrpc->dev->pgs_per_blk); | |
117 | } | |
118 | ||
b7ceb7d5 | 119 | static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) |
ae1519ec MB |
120 | { |
121 | struct nvm_block *blk = rblk->parent; | |
122 | ||
123 | return blk->id * rrpc->dev->pgs_per_blk; | |
124 | } | |
125 | ||
7386af27 MB |
126 | static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, |
127 | struct ppa_addr r) | |
128 | { | |
129 | struct ppa_addr l; | |
130 | int secs, pgs, blks, luns; | |
131 | sector_t ppa = r.ppa; | |
132 | ||
133 | l.ppa = 0; | |
134 | ||
135 | div_u64_rem(ppa, dev->sec_per_pg, &secs); | |
136 | l.g.sec = secs; | |
137 | ||
138 | sector_div(ppa, dev->sec_per_pg); | |
139 | div_u64_rem(ppa, dev->sec_per_blk, &pgs); | |
140 | l.g.pg = pgs; | |
141 | ||
142 | sector_div(ppa, dev->pgs_per_blk); | |
143 | div_u64_rem(ppa, dev->blks_per_lun, &blks); | |
144 | l.g.blk = blks; | |
145 | ||
146 | sector_div(ppa, dev->blks_per_lun); | |
147 | div_u64_rem(ppa, dev->luns_per_chnl, &luns); | |
148 | l.g.lun = luns; | |
149 | ||
150 | sector_div(ppa, dev->luns_per_chnl); | |
151 | l.g.ch = ppa; | |
152 | ||
153 | return l; | |
154 | } | |
155 | ||
b7ceb7d5 | 156 | static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) |
ae1519ec MB |
157 | { |
158 | struct ppa_addr paddr; | |
159 | ||
160 | paddr.ppa = addr; | |
7386af27 | 161 | return linear_to_generic_addr(dev, paddr); |
ae1519ec MB |
162 | } |
163 | ||
164 | /* requires lun->lock taken */ | |
165 | static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk) | |
166 | { | |
167 | struct rrpc *rrpc = rlun->rrpc; | |
168 | ||
169 | BUG_ON(!rblk); | |
170 | ||
171 | if (rlun->cur) { | |
172 | spin_lock(&rlun->cur->lock); | |
173 | WARN_ON(!block_is_full(rrpc, rlun->cur)); | |
174 | spin_unlock(&rlun->cur->lock); | |
175 | } | |
176 | rlun->cur = rblk; | |
177 | } | |
178 | ||
179 | static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, | |
180 | unsigned long flags) | |
181 | { | |
ff0e498b | 182 | struct nvm_lun *lun = rlun->parent; |
ae1519ec MB |
183 | struct nvm_block *blk; |
184 | struct rrpc_block *rblk; | |
185 | ||
ff0e498b JG |
186 | spin_lock(&lun->lock); |
187 | blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags); | |
188 | if (!blk) { | |
189 | pr_err("nvm: rrpc: cannot get new block from media manager\n"); | |
190 | spin_unlock(&lun->lock); | |
ae1519ec | 191 | return NULL; |
ff0e498b | 192 | } |
ae1519ec MB |
193 | |
194 | rblk = &rlun->blocks[blk->id]; | |
ff0e498b JG |
195 | list_add_tail(&rblk->list, &rlun->open_list); |
196 | spin_unlock(&lun->lock); | |
ae1519ec | 197 | |
ff0e498b | 198 | blk->priv = rblk; |
ae1519ec MB |
199 | bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk); |
200 | rblk->next_page = 0; | |
201 | rblk->nr_invalid_pages = 0; | |
202 | atomic_set(&rblk->data_cmnt_size, 0); | |
203 | ||
204 | return rblk; | |
205 | } | |
206 | ||
207 | static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) | |
208 | { | |
ff0e498b JG |
209 | struct rrpc_lun *rlun = rblk->rlun; |
210 | struct nvm_lun *lun = rlun->parent; | |
211 | ||
212 | spin_lock(&lun->lock); | |
213 | nvm_put_blk_unlocked(rrpc->dev, rblk->parent); | |
214 | list_del(&rblk->list); | |
215 | spin_unlock(&lun->lock); | |
ae1519ec MB |
216 | } |
217 | ||
d3d1a438 WT |
218 | static void rrpc_put_blks(struct rrpc *rrpc) |
219 | { | |
220 | struct rrpc_lun *rlun; | |
221 | int i; | |
222 | ||
223 | for (i = 0; i < rrpc->nr_luns; i++) { | |
224 | rlun = &rrpc->luns[i]; | |
225 | if (rlun->cur) | |
226 | rrpc_put_blk(rrpc, rlun->cur); | |
227 | if (rlun->gc_cur) | |
228 | rrpc_put_blk(rrpc, rlun->gc_cur); | |
229 | } | |
230 | } | |
231 | ||
ae1519ec MB |
232 | static struct rrpc_lun *get_next_lun(struct rrpc *rrpc) |
233 | { | |
234 | int next = atomic_inc_return(&rrpc->next_lun); | |
235 | ||
236 | return &rrpc->luns[next % rrpc->nr_luns]; | |
237 | } | |
238 | ||
239 | static void rrpc_gc_kick(struct rrpc *rrpc) | |
240 | { | |
241 | struct rrpc_lun *rlun; | |
242 | unsigned int i; | |
243 | ||
244 | for (i = 0; i < rrpc->nr_luns; i++) { | |
245 | rlun = &rrpc->luns[i]; | |
246 | queue_work(rrpc->krqd_wq, &rlun->ws_gc); | |
247 | } | |
248 | } | |
249 | ||
250 | /* | |
251 | * timed GC every interval. | |
252 | */ | |
253 | static void rrpc_gc_timer(unsigned long data) | |
254 | { | |
255 | struct rrpc *rrpc = (struct rrpc *)data; | |
256 | ||
257 | rrpc_gc_kick(rrpc); | |
258 | mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); | |
259 | } | |
260 | ||
261 | static void rrpc_end_sync_bio(struct bio *bio) | |
262 | { | |
263 | struct completion *waiting = bio->bi_private; | |
264 | ||
265 | if (bio->bi_error) | |
266 | pr_err("nvm: gc request failed (%u).\n", bio->bi_error); | |
267 | ||
268 | complete(waiting); | |
269 | } | |
270 | ||
271 | /* | |
272 | * rrpc_move_valid_pages -- migrate live data off the block | |
273 | * @rrpc: the 'rrpc' structure | |
274 | * @block: the block from which to migrate live pages | |
275 | * | |
276 | * Description: | |
277 | * GC algorithms may call this function to migrate remaining live | |
278 | * pages off the block prior to erasing it. This function blocks | |
279 | * further execution until the operation is complete. | |
280 | */ | |
281 | static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) | |
282 | { | |
283 | struct request_queue *q = rrpc->dev->q; | |
284 | struct rrpc_rev_addr *rev; | |
285 | struct nvm_rq *rqd; | |
286 | struct bio *bio; | |
287 | struct page *page; | |
288 | int slot; | |
289 | int nr_pgs_per_blk = rrpc->dev->pgs_per_blk; | |
b7ceb7d5 | 290 | u64 phys_addr; |
ae1519ec MB |
291 | DECLARE_COMPLETION_ONSTACK(wait); |
292 | ||
293 | if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) | |
294 | return 0; | |
295 | ||
296 | bio = bio_alloc(GFP_NOIO, 1); | |
297 | if (!bio) { | |
298 | pr_err("nvm: could not alloc bio to gc\n"); | |
299 | return -ENOMEM; | |
300 | } | |
301 | ||
302 | page = mempool_alloc(rrpc->page_pool, GFP_NOIO); | |
16c6d048 WT |
303 | if (!page) { |
304 | bio_put(bio); | |
3bfbc6ad | 305 | return -ENOMEM; |
16c6d048 | 306 | } |
ae1519ec MB |
307 | |
308 | while ((slot = find_first_zero_bit(rblk->invalid_pages, | |
309 | nr_pgs_per_blk)) < nr_pgs_per_blk) { | |
310 | ||
311 | /* Lock laddr */ | |
312 | phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot; | |
313 | ||
314 | try: | |
315 | spin_lock(&rrpc->rev_lock); | |
316 | /* Get logical address from physical to logical table */ | |
317 | rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset]; | |
318 | /* already updated by previous regular write */ | |
319 | if (rev->addr == ADDR_EMPTY) { | |
320 | spin_unlock(&rrpc->rev_lock); | |
321 | continue; | |
322 | } | |
323 | ||
324 | rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1); | |
325 | if (IS_ERR_OR_NULL(rqd)) { | |
326 | spin_unlock(&rrpc->rev_lock); | |
327 | schedule(); | |
328 | goto try; | |
329 | } | |
330 | ||
331 | spin_unlock(&rrpc->rev_lock); | |
332 | ||
333 | /* Perform read to do GC */ | |
334 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | |
335 | bio->bi_rw = READ; | |
336 | bio->bi_private = &wait; | |
337 | bio->bi_end_io = rrpc_end_sync_bio; | |
338 | ||
339 | /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */ | |
340 | bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); | |
341 | ||
342 | if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { | |
343 | pr_err("rrpc: gc read failed.\n"); | |
344 | rrpc_inflight_laddr_release(rrpc, rqd); | |
345 | goto finished; | |
346 | } | |
347 | wait_for_completion_io(&wait); | |
2b11c1b2 WT |
348 | if (bio->bi_error) { |
349 | rrpc_inflight_laddr_release(rrpc, rqd); | |
350 | goto finished; | |
351 | } | |
ae1519ec MB |
352 | |
353 | bio_reset(bio); | |
354 | reinit_completion(&wait); | |
355 | ||
356 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | |
357 | bio->bi_rw = WRITE; | |
358 | bio->bi_private = &wait; | |
359 | bio->bi_end_io = rrpc_end_sync_bio; | |
360 | ||
361 | bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); | |
362 | ||
363 | /* turn the command around and write the data back to a new | |
364 | * address | |
365 | */ | |
366 | if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { | |
367 | pr_err("rrpc: gc write failed.\n"); | |
368 | rrpc_inflight_laddr_release(rrpc, rqd); | |
369 | goto finished; | |
370 | } | |
371 | wait_for_completion_io(&wait); | |
372 | ||
373 | rrpc_inflight_laddr_release(rrpc, rqd); | |
2b11c1b2 WT |
374 | if (bio->bi_error) |
375 | goto finished; | |
ae1519ec MB |
376 | |
377 | bio_reset(bio); | |
378 | } | |
379 | ||
380 | finished: | |
381 | mempool_free(page, rrpc->page_pool); | |
382 | bio_put(bio); | |
383 | ||
384 | if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) { | |
385 | pr_err("nvm: failed to garbage collect block\n"); | |
386 | return -EIO; | |
387 | } | |
388 | ||
389 | return 0; | |
390 | } | |
391 | ||
392 | static void rrpc_block_gc(struct work_struct *work) | |
393 | { | |
394 | struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, | |
395 | ws_gc); | |
396 | struct rrpc *rrpc = gcb->rrpc; | |
397 | struct rrpc_block *rblk = gcb->rblk; | |
398 | struct nvm_dev *dev = rrpc->dev; | |
d0ca798f WT |
399 | struct nvm_lun *lun = rblk->parent->lun; |
400 | struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset]; | |
ae1519ec | 401 | |
d0ca798f | 402 | mempool_free(gcb, rrpc->gcb_pool); |
ae1519ec MB |
403 | pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id); |
404 | ||
405 | if (rrpc_move_valid_pages(rrpc, rblk)) | |
d0ca798f WT |
406 | goto put_back; |
407 | ||
408 | if (nvm_erase_blk(dev, rblk->parent)) | |
409 | goto put_back; | |
ae1519ec | 410 | |
ae1519ec | 411 | rrpc_put_blk(rrpc, rblk); |
d0ca798f WT |
412 | |
413 | return; | |
414 | ||
415 | put_back: | |
416 | spin_lock(&rlun->lock); | |
417 | list_add_tail(&rblk->prio, &rlun->prio_list); | |
418 | spin_unlock(&rlun->lock); | |
ae1519ec MB |
419 | } |
420 | ||
421 | /* the block with highest number of invalid pages, will be in the beginning | |
422 | * of the list | |
423 | */ | |
424 | static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra, | |
425 | struct rrpc_block *rb) | |
426 | { | |
427 | if (ra->nr_invalid_pages == rb->nr_invalid_pages) | |
428 | return ra; | |
429 | ||
430 | return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra; | |
431 | } | |
432 | ||
433 | /* linearly find the block with highest number of invalid pages | |
434 | * requires lun->lock | |
435 | */ | |
436 | static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun) | |
437 | { | |
438 | struct list_head *prio_list = &rlun->prio_list; | |
439 | struct rrpc_block *rblock, *max; | |
440 | ||
441 | BUG_ON(list_empty(prio_list)); | |
442 | ||
443 | max = list_first_entry(prio_list, struct rrpc_block, prio); | |
444 | list_for_each_entry(rblock, prio_list, prio) | |
445 | max = rblock_max_invalid(max, rblock); | |
446 | ||
447 | return max; | |
448 | } | |
449 | ||
450 | static void rrpc_lun_gc(struct work_struct *work) | |
451 | { | |
452 | struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc); | |
453 | struct rrpc *rrpc = rlun->rrpc; | |
454 | struct nvm_lun *lun = rlun->parent; | |
455 | struct rrpc_block_gc *gcb; | |
456 | unsigned int nr_blocks_need; | |
457 | ||
458 | nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE; | |
459 | ||
460 | if (nr_blocks_need < rrpc->nr_luns) | |
461 | nr_blocks_need = rrpc->nr_luns; | |
462 | ||
b262924b | 463 | spin_lock(&rlun->lock); |
ae1519ec MB |
464 | while (nr_blocks_need > lun->nr_free_blocks && |
465 | !list_empty(&rlun->prio_list)) { | |
466 | struct rrpc_block *rblock = block_prio_find_max(rlun); | |
467 | struct nvm_block *block = rblock->parent; | |
468 | ||
469 | if (!rblock->nr_invalid_pages) | |
470 | break; | |
471 | ||
b262924b WT |
472 | gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); |
473 | if (!gcb) | |
474 | break; | |
475 | ||
ae1519ec MB |
476 | list_del_init(&rblock->prio); |
477 | ||
478 | BUG_ON(!block_is_full(rrpc, rblock)); | |
479 | ||
480 | pr_debug("rrpc: selected block '%lu' for GC\n", block->id); | |
481 | ||
ae1519ec MB |
482 | gcb->rrpc = rrpc; |
483 | gcb->rblk = rblock; | |
484 | INIT_WORK(&gcb->ws_gc, rrpc_block_gc); | |
485 | ||
486 | queue_work(rrpc->kgc_wq, &gcb->ws_gc); | |
487 | ||
488 | nr_blocks_need--; | |
489 | } | |
b262924b | 490 | spin_unlock(&rlun->lock); |
ae1519ec MB |
491 | |
492 | /* TODO: Hint that request queue can be started again */ | |
493 | } | |
494 | ||
495 | static void rrpc_gc_queue(struct work_struct *work) | |
496 | { | |
497 | struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, | |
498 | ws_gc); | |
499 | struct rrpc *rrpc = gcb->rrpc; | |
500 | struct rrpc_block *rblk = gcb->rblk; | |
501 | struct nvm_lun *lun = rblk->parent->lun; | |
502 | struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset]; | |
503 | ||
504 | spin_lock(&rlun->lock); | |
505 | list_add_tail(&rblk->prio, &rlun->prio_list); | |
506 | spin_unlock(&rlun->lock); | |
507 | ||
508 | mempool_free(gcb, rrpc->gcb_pool); | |
509 | pr_debug("nvm: block '%lu' is full, allow GC (sched)\n", | |
510 | rblk->parent->id); | |
511 | } | |
512 | ||
513 | static const struct block_device_operations rrpc_fops = { | |
514 | .owner = THIS_MODULE, | |
515 | }; | |
516 | ||
517 | static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc) | |
518 | { | |
519 | unsigned int i; | |
520 | struct rrpc_lun *rlun, *max_free; | |
521 | ||
522 | if (!is_gc) | |
523 | return get_next_lun(rrpc); | |
524 | ||
525 | /* during GC, we don't care about RR, instead we want to make | |
526 | * sure that we maintain evenness between the block luns. | |
527 | */ | |
528 | max_free = &rrpc->luns[0]; | |
529 | /* prevent GC-ing lun from devouring pages of a lun with | |
530 | * little free blocks. We don't take the lock as we only need an | |
531 | * estimate. | |
532 | */ | |
533 | rrpc_for_each_lun(rrpc, rlun, i) { | |
534 | if (rlun->parent->nr_free_blocks > | |
535 | max_free->parent->nr_free_blocks) | |
536 | max_free = rlun; | |
537 | } | |
538 | ||
539 | return max_free; | |
540 | } | |
541 | ||
542 | static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr, | |
b7ceb7d5 | 543 | struct rrpc_block *rblk, u64 paddr) |
ae1519ec MB |
544 | { |
545 | struct rrpc_addr *gp; | |
546 | struct rrpc_rev_addr *rev; | |
547 | ||
548 | BUG_ON(laddr >= rrpc->nr_pages); | |
549 | ||
550 | gp = &rrpc->trans_map[laddr]; | |
551 | spin_lock(&rrpc->rev_lock); | |
552 | if (gp->rblk) | |
553 | rrpc_page_invalidate(rrpc, gp); | |
554 | ||
555 | gp->addr = paddr; | |
556 | gp->rblk = rblk; | |
557 | ||
558 | rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset]; | |
559 | rev->addr = laddr; | |
560 | spin_unlock(&rrpc->rev_lock); | |
561 | ||
562 | return gp; | |
563 | } | |
564 | ||
b7ceb7d5 | 565 | static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk) |
ae1519ec | 566 | { |
b7ceb7d5 | 567 | u64 addr = ADDR_EMPTY; |
ae1519ec MB |
568 | |
569 | spin_lock(&rblk->lock); | |
570 | if (block_is_full(rrpc, rblk)) | |
571 | goto out; | |
572 | ||
573 | addr = block_to_addr(rrpc, rblk) + rblk->next_page; | |
574 | ||
575 | rblk->next_page++; | |
576 | out: | |
577 | spin_unlock(&rblk->lock); | |
578 | return addr; | |
579 | } | |
580 | ||
581 | /* Simple round-robin Logical to physical address translation. | |
582 | * | |
583 | * Retrieve the mapping using the active append point. Then update the ap for | |
584 | * the next write to the disk. | |
585 | * | |
586 | * Returns rrpc_addr with the physical address and block. Remember to return to | |
587 | * rrpc->addr_cache when request is finished. | |
588 | */ | |
589 | static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr, | |
590 | int is_gc) | |
591 | { | |
592 | struct rrpc_lun *rlun; | |
593 | struct rrpc_block *rblk; | |
594 | struct nvm_lun *lun; | |
b7ceb7d5 | 595 | u64 paddr; |
ae1519ec MB |
596 | |
597 | rlun = rrpc_get_lun_rr(rrpc, is_gc); | |
598 | lun = rlun->parent; | |
599 | ||
600 | if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4) | |
601 | return NULL; | |
602 | ||
603 | spin_lock(&rlun->lock); | |
604 | ||
605 | rblk = rlun->cur; | |
606 | retry: | |
607 | paddr = rrpc_alloc_addr(rrpc, rblk); | |
608 | ||
609 | if (paddr == ADDR_EMPTY) { | |
610 | rblk = rrpc_get_blk(rrpc, rlun, 0); | |
611 | if (rblk) { | |
612 | rrpc_set_lun_cur(rlun, rblk); | |
613 | goto retry; | |
614 | } | |
615 | ||
616 | if (is_gc) { | |
617 | /* retry from emergency gc block */ | |
618 | paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); | |
619 | if (paddr == ADDR_EMPTY) { | |
620 | rblk = rrpc_get_blk(rrpc, rlun, 1); | |
621 | if (!rblk) { | |
622 | pr_err("rrpc: no more blocks"); | |
623 | goto err; | |
624 | } | |
625 | ||
626 | rlun->gc_cur = rblk; | |
627 | paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); | |
628 | } | |
629 | rblk = rlun->gc_cur; | |
630 | } | |
631 | } | |
632 | ||
633 | spin_unlock(&rlun->lock); | |
634 | return rrpc_update_map(rrpc, laddr, rblk, paddr); | |
635 | err: | |
636 | spin_unlock(&rlun->lock); | |
637 | return NULL; | |
638 | } | |
639 | ||
640 | static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk) | |
641 | { | |
642 | struct rrpc_block_gc *gcb; | |
643 | ||
644 | gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); | |
645 | if (!gcb) { | |
646 | pr_err("rrpc: unable to queue block for gc."); | |
647 | return; | |
648 | } | |
649 | ||
650 | gcb->rrpc = rrpc; | |
651 | gcb->rblk = rblk; | |
652 | ||
653 | INIT_WORK(&gcb->ws_gc, rrpc_gc_queue); | |
654 | queue_work(rrpc->kgc_wq, &gcb->ws_gc); | |
655 | } | |
656 | ||
657 | static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, | |
658 | sector_t laddr, uint8_t npages) | |
659 | { | |
660 | struct rrpc_addr *p; | |
661 | struct rrpc_block *rblk; | |
662 | struct nvm_lun *lun; | |
663 | int cmnt_size, i; | |
664 | ||
665 | for (i = 0; i < npages; i++) { | |
666 | p = &rrpc->trans_map[laddr + i]; | |
667 | rblk = p->rblk; | |
668 | lun = rblk->parent->lun; | |
669 | ||
670 | cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); | |
ff0e498b JG |
671 | if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) { |
672 | struct nvm_block *blk = rblk->parent; | |
673 | struct rrpc_lun *rlun = rblk->rlun; | |
674 | ||
675 | spin_lock(&lun->lock); | |
676 | lun->nr_open_blocks--; | |
677 | lun->nr_closed_blocks++; | |
678 | blk->state &= ~NVM_BLK_ST_OPEN; | |
679 | blk->state |= NVM_BLK_ST_CLOSED; | |
680 | list_move_tail(&rblk->list, &rlun->closed_list); | |
681 | spin_unlock(&lun->lock); | |
682 | ||
ae1519ec | 683 | rrpc_run_gc(rrpc, rblk); |
ff0e498b | 684 | } |
ae1519ec MB |
685 | } |
686 | } | |
687 | ||
72d256ec | 688 | static void rrpc_end_io(struct nvm_rq *rqd) |
ae1519ec MB |
689 | { |
690 | struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); | |
691 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); | |
692 | uint8_t npages = rqd->nr_pages; | |
693 | sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; | |
694 | ||
695 | if (bio_data_dir(rqd->bio) == WRITE) | |
696 | rrpc_end_io_write(rrpc, rrqd, laddr, npages); | |
697 | ||
3cd485b1 WT |
698 | bio_put(rqd->bio); |
699 | ||
ae1519ec | 700 | if (rrqd->flags & NVM_IOTYPE_GC) |
91276162 | 701 | return; |
ae1519ec MB |
702 | |
703 | rrpc_unlock_rq(rrpc, rqd); | |
ae1519ec MB |
704 | |
705 | if (npages > 1) | |
706 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); | |
707 | if (rqd->metadata) | |
708 | nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata); | |
709 | ||
710 | mempool_free(rqd, rrpc->rq_pool); | |
ae1519ec MB |
711 | } |
712 | ||
713 | static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio, | |
714 | struct nvm_rq *rqd, unsigned long flags, int npages) | |
715 | { | |
716 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); | |
717 | struct rrpc_addr *gp; | |
718 | sector_t laddr = rrpc_get_laddr(bio); | |
719 | int is_gc = flags & NVM_IOTYPE_GC; | |
720 | int i; | |
721 | ||
722 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { | |
723 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); | |
724 | return NVM_IO_REQUEUE; | |
725 | } | |
726 | ||
727 | for (i = 0; i < npages; i++) { | |
728 | /* We assume that mapping occurs at 4KB granularity */ | |
729 | BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages)); | |
730 | gp = &rrpc->trans_map[laddr + i]; | |
731 | ||
732 | if (gp->rblk) { | |
733 | rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, | |
734 | gp->addr); | |
735 | } else { | |
736 | BUG_ON(is_gc); | |
737 | rrpc_unlock_laddr(rrpc, r); | |
738 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, | |
739 | rqd->dma_ppa_list); | |
740 | return NVM_IO_DONE; | |
741 | } | |
742 | } | |
743 | ||
744 | rqd->opcode = NVM_OP_HBREAD; | |
745 | ||
746 | return NVM_IO_OK; | |
747 | } | |
748 | ||
749 | static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd, | |
750 | unsigned long flags) | |
751 | { | |
752 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); | |
753 | int is_gc = flags & NVM_IOTYPE_GC; | |
754 | sector_t laddr = rrpc_get_laddr(bio); | |
755 | struct rrpc_addr *gp; | |
756 | ||
757 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) | |
758 | return NVM_IO_REQUEUE; | |
759 | ||
760 | BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages)); | |
761 | gp = &rrpc->trans_map[laddr]; | |
762 | ||
763 | if (gp->rblk) { | |
764 | rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr); | |
765 | } else { | |
766 | BUG_ON(is_gc); | |
767 | rrpc_unlock_rq(rrpc, rqd); | |
768 | return NVM_IO_DONE; | |
769 | } | |
770 | ||
771 | rqd->opcode = NVM_OP_HBREAD; | |
772 | rrqd->addr = gp; | |
773 | ||
774 | return NVM_IO_OK; | |
775 | } | |
776 | ||
777 | static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio, | |
778 | struct nvm_rq *rqd, unsigned long flags, int npages) | |
779 | { | |
780 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); | |
781 | struct rrpc_addr *p; | |
782 | sector_t laddr = rrpc_get_laddr(bio); | |
783 | int is_gc = flags & NVM_IOTYPE_GC; | |
784 | int i; | |
785 | ||
786 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { | |
787 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); | |
788 | return NVM_IO_REQUEUE; | |
789 | } | |
790 | ||
791 | for (i = 0; i < npages; i++) { | |
792 | /* We assume that mapping occurs at 4KB granularity */ | |
793 | p = rrpc_map_page(rrpc, laddr + i, is_gc); | |
794 | if (!p) { | |
795 | BUG_ON(is_gc); | |
796 | rrpc_unlock_laddr(rrpc, r); | |
797 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, | |
798 | rqd->dma_ppa_list); | |
799 | rrpc_gc_kick(rrpc); | |
800 | return NVM_IO_REQUEUE; | |
801 | } | |
802 | ||
803 | rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, | |
804 | p->addr); | |
805 | } | |
806 | ||
807 | rqd->opcode = NVM_OP_HBWRITE; | |
808 | ||
809 | return NVM_IO_OK; | |
810 | } | |
811 | ||
812 | static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio, | |
813 | struct nvm_rq *rqd, unsigned long flags) | |
814 | { | |
815 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); | |
816 | struct rrpc_addr *p; | |
817 | int is_gc = flags & NVM_IOTYPE_GC; | |
818 | sector_t laddr = rrpc_get_laddr(bio); | |
819 | ||
820 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) | |
821 | return NVM_IO_REQUEUE; | |
822 | ||
823 | p = rrpc_map_page(rrpc, laddr, is_gc); | |
824 | if (!p) { | |
825 | BUG_ON(is_gc); | |
826 | rrpc_unlock_rq(rrpc, rqd); | |
827 | rrpc_gc_kick(rrpc); | |
828 | return NVM_IO_REQUEUE; | |
829 | } | |
830 | ||
831 | rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr); | |
832 | rqd->opcode = NVM_OP_HBWRITE; | |
833 | rrqd->addr = p; | |
834 | ||
835 | return NVM_IO_OK; | |
836 | } | |
837 | ||
838 | static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio, | |
839 | struct nvm_rq *rqd, unsigned long flags, uint8_t npages) | |
840 | { | |
841 | if (npages > 1) { | |
842 | rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL, | |
843 | &rqd->dma_ppa_list); | |
844 | if (!rqd->ppa_list) { | |
845 | pr_err("rrpc: not able to allocate ppa list\n"); | |
846 | return NVM_IO_ERR; | |
847 | } | |
848 | ||
849 | if (bio_rw(bio) == WRITE) | |
850 | return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, | |
851 | npages); | |
852 | ||
853 | return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); | |
854 | } | |
855 | ||
856 | if (bio_rw(bio) == WRITE) | |
857 | return rrpc_write_rq(rrpc, bio, rqd, flags); | |
858 | ||
859 | return rrpc_read_rq(rrpc, bio, rqd, flags); | |
860 | } | |
861 | ||
862 | static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, | |
863 | struct nvm_rq *rqd, unsigned long flags) | |
864 | { | |
865 | int err; | |
866 | struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd); | |
867 | uint8_t nr_pages = rrpc_get_pages(bio); | |
868 | int bio_size = bio_sectors(bio) << 9; | |
869 | ||
870 | if (bio_size < rrpc->dev->sec_size) | |
871 | return NVM_IO_ERR; | |
872 | else if (bio_size > rrpc->dev->max_rq_size) | |
873 | return NVM_IO_ERR; | |
874 | ||
875 | err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages); | |
876 | if (err) | |
877 | return err; | |
878 | ||
879 | bio_get(bio); | |
880 | rqd->bio = bio; | |
881 | rqd->ins = &rrpc->instance; | |
882 | rqd->nr_pages = nr_pages; | |
883 | rrq->flags = flags; | |
884 | ||
885 | err = nvm_submit_io(rrpc->dev, rqd); | |
886 | if (err) { | |
887 | pr_err("rrpc: I/O submission failed: %d\n", err); | |
3cd485b1 | 888 | bio_put(bio); |
c27278bd WT |
889 | if (!(flags & NVM_IOTYPE_GC)) { |
890 | rrpc_unlock_rq(rrpc, rqd); | |
891 | if (rqd->nr_pages > 1) | |
892 | nvm_dev_dma_free(rrpc->dev, | |
893 | rqd->ppa_list, rqd->dma_ppa_list); | |
894 | } | |
ae1519ec MB |
895 | return NVM_IO_ERR; |
896 | } | |
897 | ||
898 | return NVM_IO_OK; | |
899 | } | |
900 | ||
dece1635 | 901 | static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio) |
ae1519ec MB |
902 | { |
903 | struct rrpc *rrpc = q->queuedata; | |
904 | struct nvm_rq *rqd; | |
905 | int err; | |
906 | ||
907 | if (bio->bi_rw & REQ_DISCARD) { | |
908 | rrpc_discard(rrpc, bio); | |
dece1635 | 909 | return BLK_QC_T_NONE; |
ae1519ec MB |
910 | } |
911 | ||
912 | rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); | |
913 | if (!rqd) { | |
914 | pr_err_ratelimited("rrpc: not able to queue bio."); | |
915 | bio_io_error(bio); | |
dece1635 | 916 | return BLK_QC_T_NONE; |
ae1519ec MB |
917 | } |
918 | memset(rqd, 0, sizeof(struct nvm_rq)); | |
919 | ||
920 | err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); | |
921 | switch (err) { | |
922 | case NVM_IO_OK: | |
dece1635 | 923 | return BLK_QC_T_NONE; |
ae1519ec MB |
924 | case NVM_IO_ERR: |
925 | bio_io_error(bio); | |
926 | break; | |
927 | case NVM_IO_DONE: | |
928 | bio_endio(bio); | |
929 | break; | |
930 | case NVM_IO_REQUEUE: | |
931 | spin_lock(&rrpc->bio_lock); | |
932 | bio_list_add(&rrpc->requeue_bios, bio); | |
933 | spin_unlock(&rrpc->bio_lock); | |
934 | queue_work(rrpc->kgc_wq, &rrpc->ws_requeue); | |
935 | break; | |
936 | } | |
937 | ||
938 | mempool_free(rqd, rrpc->rq_pool); | |
dece1635 | 939 | return BLK_QC_T_NONE; |
ae1519ec MB |
940 | } |
941 | ||
942 | static void rrpc_requeue(struct work_struct *work) | |
943 | { | |
944 | struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue); | |
945 | struct bio_list bios; | |
946 | struct bio *bio; | |
947 | ||
948 | bio_list_init(&bios); | |
949 | ||
950 | spin_lock(&rrpc->bio_lock); | |
951 | bio_list_merge(&bios, &rrpc->requeue_bios); | |
952 | bio_list_init(&rrpc->requeue_bios); | |
953 | spin_unlock(&rrpc->bio_lock); | |
954 | ||
955 | while ((bio = bio_list_pop(&bios))) | |
956 | rrpc_make_rq(rrpc->disk->queue, bio); | |
957 | } | |
958 | ||
959 | static void rrpc_gc_free(struct rrpc *rrpc) | |
960 | { | |
961 | struct rrpc_lun *rlun; | |
962 | int i; | |
963 | ||
964 | if (rrpc->krqd_wq) | |
965 | destroy_workqueue(rrpc->krqd_wq); | |
966 | ||
967 | if (rrpc->kgc_wq) | |
968 | destroy_workqueue(rrpc->kgc_wq); | |
969 | ||
970 | if (!rrpc->luns) | |
971 | return; | |
972 | ||
973 | for (i = 0; i < rrpc->nr_luns; i++) { | |
974 | rlun = &rrpc->luns[i]; | |
975 | ||
976 | if (!rlun->blocks) | |
977 | break; | |
978 | vfree(rlun->blocks); | |
979 | } | |
980 | } | |
981 | ||
982 | static int rrpc_gc_init(struct rrpc *rrpc) | |
983 | { | |
984 | rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND, | |
985 | rrpc->nr_luns); | |
986 | if (!rrpc->krqd_wq) | |
987 | return -ENOMEM; | |
988 | ||
989 | rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1); | |
990 | if (!rrpc->kgc_wq) | |
991 | return -ENOMEM; | |
992 | ||
993 | setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc); | |
994 | ||
995 | return 0; | |
996 | } | |
997 | ||
998 | static void rrpc_map_free(struct rrpc *rrpc) | |
999 | { | |
1000 | vfree(rrpc->rev_trans_map); | |
1001 | vfree(rrpc->trans_map); | |
1002 | } | |
1003 | ||
1004 | static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private) | |
1005 | { | |
1006 | struct rrpc *rrpc = (struct rrpc *)private; | |
1007 | struct nvm_dev *dev = rrpc->dev; | |
1008 | struct rrpc_addr *addr = rrpc->trans_map + slba; | |
1009 | struct rrpc_rev_addr *raddr = rrpc->rev_trans_map; | |
1010 | sector_t max_pages = dev->total_pages * (dev->sec_size >> 9); | |
1011 | u64 elba = slba + nlb; | |
1012 | u64 i; | |
1013 | ||
1014 | if (unlikely(elba > dev->total_pages)) { | |
1015 | pr_err("nvm: L2P data from device is out of bounds!\n"); | |
1016 | return -EINVAL; | |
1017 | } | |
1018 | ||
1019 | for (i = 0; i < nlb; i++) { | |
1020 | u64 pba = le64_to_cpu(entries[i]); | |
1021 | /* LNVM treats address-spaces as silos, LBA and PBA are | |
1022 | * equally large and zero-indexed. | |
1023 | */ | |
1024 | if (unlikely(pba >= max_pages && pba != U64_MAX)) { | |
1025 | pr_err("nvm: L2P data entry is out of bounds!\n"); | |
1026 | return -EINVAL; | |
1027 | } | |
1028 | ||
1029 | /* Address zero is a special one. The first page on a disk is | |
1030 | * protected. As it often holds internal device boot | |
1031 | * information. | |
1032 | */ | |
1033 | if (!pba) | |
1034 | continue; | |
1035 | ||
1036 | addr[i].addr = pba; | |
1037 | raddr[pba].addr = slba + i; | |
1038 | } | |
1039 | ||
1040 | return 0; | |
1041 | } | |
1042 | ||
1043 | static int rrpc_map_init(struct rrpc *rrpc) | |
1044 | { | |
1045 | struct nvm_dev *dev = rrpc->dev; | |
1046 | sector_t i; | |
1047 | int ret; | |
1048 | ||
1049 | rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages); | |
1050 | if (!rrpc->trans_map) | |
1051 | return -ENOMEM; | |
1052 | ||
1053 | rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) | |
1054 | * rrpc->nr_pages); | |
1055 | if (!rrpc->rev_trans_map) | |
1056 | return -ENOMEM; | |
1057 | ||
1058 | for (i = 0; i < rrpc->nr_pages; i++) { | |
1059 | struct rrpc_addr *p = &rrpc->trans_map[i]; | |
1060 | struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i]; | |
1061 | ||
1062 | p->addr = ADDR_EMPTY; | |
1063 | r->addr = ADDR_EMPTY; | |
1064 | } | |
1065 | ||
1066 | if (!dev->ops->get_l2p_tbl) | |
1067 | return 0; | |
1068 | ||
1069 | /* Bring up the mapping table from device */ | |
16f26c3a | 1070 | ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages, |
ae1519ec MB |
1071 | rrpc_l2p_update, rrpc); |
1072 | if (ret) { | |
1073 | pr_err("nvm: rrpc: could not read L2P table.\n"); | |
1074 | return -EINVAL; | |
1075 | } | |
1076 | ||
1077 | return 0; | |
1078 | } | |
1079 | ||
1080 | ||
1081 | /* Minimum pages needed within a lun */ | |
1082 | #define PAGE_POOL_SIZE 16 | |
1083 | #define ADDR_POOL_SIZE 64 | |
1084 | ||
1085 | static int rrpc_core_init(struct rrpc *rrpc) | |
1086 | { | |
1087 | down_write(&rrpc_lock); | |
1088 | if (!rrpc_gcb_cache) { | |
1089 | rrpc_gcb_cache = kmem_cache_create("rrpc_gcb", | |
1090 | sizeof(struct rrpc_block_gc), 0, 0, NULL); | |
1091 | if (!rrpc_gcb_cache) { | |
1092 | up_write(&rrpc_lock); | |
1093 | return -ENOMEM; | |
1094 | } | |
1095 | ||
1096 | rrpc_rq_cache = kmem_cache_create("rrpc_rq", | |
1097 | sizeof(struct nvm_rq) + sizeof(struct rrpc_rq), | |
1098 | 0, 0, NULL); | |
1099 | if (!rrpc_rq_cache) { | |
1100 | kmem_cache_destroy(rrpc_gcb_cache); | |
1101 | up_write(&rrpc_lock); | |
1102 | return -ENOMEM; | |
1103 | } | |
1104 | } | |
1105 | up_write(&rrpc_lock); | |
1106 | ||
1107 | rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); | |
1108 | if (!rrpc->page_pool) | |
1109 | return -ENOMEM; | |
1110 | ||
1111 | rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns, | |
1112 | rrpc_gcb_cache); | |
1113 | if (!rrpc->gcb_pool) | |
1114 | return -ENOMEM; | |
1115 | ||
1116 | rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache); | |
1117 | if (!rrpc->rq_pool) | |
1118 | return -ENOMEM; | |
1119 | ||
1120 | spin_lock_init(&rrpc->inflights.lock); | |
1121 | INIT_LIST_HEAD(&rrpc->inflights.reqs); | |
1122 | ||
1123 | return 0; | |
1124 | } | |
1125 | ||
1126 | static void rrpc_core_free(struct rrpc *rrpc) | |
1127 | { | |
1128 | mempool_destroy(rrpc->page_pool); | |
1129 | mempool_destroy(rrpc->gcb_pool); | |
1130 | mempool_destroy(rrpc->rq_pool); | |
1131 | } | |
1132 | ||
1133 | static void rrpc_luns_free(struct rrpc *rrpc) | |
1134 | { | |
1135 | kfree(rrpc->luns); | |
1136 | } | |
1137 | ||
1138 | static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) | |
1139 | { | |
1140 | struct nvm_dev *dev = rrpc->dev; | |
1141 | struct rrpc_lun *rlun; | |
1142 | int i, j; | |
1143 | ||
4b79beb4 WT |
1144 | if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { |
1145 | pr_err("rrpc: number of pages per block too high."); | |
1146 | return -EINVAL; | |
1147 | } | |
1148 | ||
ae1519ec MB |
1149 | spin_lock_init(&rrpc->rev_lock); |
1150 | ||
1151 | rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun), | |
1152 | GFP_KERNEL); | |
1153 | if (!rrpc->luns) | |
1154 | return -ENOMEM; | |
1155 | ||
1156 | /* 1:1 mapping */ | |
1157 | for (i = 0; i < rrpc->nr_luns; i++) { | |
1158 | struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i); | |
1159 | ||
ae1519ec MB |
1160 | rlun = &rrpc->luns[i]; |
1161 | rlun->rrpc = rrpc; | |
1162 | rlun->parent = lun; | |
1163 | INIT_LIST_HEAD(&rlun->prio_list); | |
ff0e498b JG |
1164 | INIT_LIST_HEAD(&rlun->open_list); |
1165 | INIT_LIST_HEAD(&rlun->closed_list); | |
1166 | ||
ae1519ec MB |
1167 | INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); |
1168 | spin_lock_init(&rlun->lock); | |
1169 | ||
1170 | rrpc->total_blocks += dev->blks_per_lun; | |
1171 | rrpc->nr_pages += dev->sec_per_lun; | |
1172 | ||
1173 | rlun->blocks = vzalloc(sizeof(struct rrpc_block) * | |
1174 | rrpc->dev->blks_per_lun); | |
1175 | if (!rlun->blocks) | |
1176 | goto err; | |
1177 | ||
1178 | for (j = 0; j < rrpc->dev->blks_per_lun; j++) { | |
1179 | struct rrpc_block *rblk = &rlun->blocks[j]; | |
1180 | struct nvm_block *blk = &lun->blocks[j]; | |
1181 | ||
1182 | rblk->parent = blk; | |
d7a64d27 | 1183 | rblk->rlun = rlun; |
ae1519ec MB |
1184 | INIT_LIST_HEAD(&rblk->prio); |
1185 | spin_lock_init(&rblk->lock); | |
1186 | } | |
1187 | } | |
1188 | ||
1189 | return 0; | |
1190 | err: | |
1191 | return -ENOMEM; | |
1192 | } | |
1193 | ||
1194 | static void rrpc_free(struct rrpc *rrpc) | |
1195 | { | |
1196 | rrpc_gc_free(rrpc); | |
1197 | rrpc_map_free(rrpc); | |
1198 | rrpc_core_free(rrpc); | |
1199 | rrpc_luns_free(rrpc); | |
1200 | ||
1201 | kfree(rrpc); | |
1202 | } | |
1203 | ||
1204 | static void rrpc_exit(void *private) | |
1205 | { | |
1206 | struct rrpc *rrpc = private; | |
1207 | ||
1208 | del_timer(&rrpc->gc_timer); | |
1209 | ||
1210 | flush_workqueue(rrpc->krqd_wq); | |
1211 | flush_workqueue(rrpc->kgc_wq); | |
1212 | ||
1213 | rrpc_free(rrpc); | |
1214 | } | |
1215 | ||
1216 | static sector_t rrpc_capacity(void *private) | |
1217 | { | |
1218 | struct rrpc *rrpc = private; | |
1219 | struct nvm_dev *dev = rrpc->dev; | |
1220 | sector_t reserved, provisioned; | |
1221 | ||
1222 | /* cur, gc, and two emergency blocks for each lun */ | |
1223 | reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4; | |
1224 | provisioned = rrpc->nr_pages - reserved; | |
1225 | ||
1226 | if (reserved > rrpc->nr_pages) { | |
1227 | pr_err("rrpc: not enough space available to expose storage.\n"); | |
1228 | return 0; | |
1229 | } | |
1230 | ||
1231 | sector_div(provisioned, 10); | |
1232 | return provisioned * 9 * NR_PHY_IN_LOG; | |
1233 | } | |
1234 | ||
1235 | /* | |
1236 | * Looks up the logical address from reverse trans map and check if its valid by | |
1237 | * comparing the logical to physical address with the physical address. | |
1238 | * Returns 0 on free, otherwise 1 if in use | |
1239 | */ | |
1240 | static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk) | |
1241 | { | |
1242 | struct nvm_dev *dev = rrpc->dev; | |
1243 | int offset; | |
1244 | struct rrpc_addr *laddr; | |
b7ceb7d5 | 1245 | u64 paddr, pladdr; |
ae1519ec MB |
1246 | |
1247 | for (offset = 0; offset < dev->pgs_per_blk; offset++) { | |
1248 | paddr = block_to_addr(rrpc, rblk) + offset; | |
1249 | ||
1250 | pladdr = rrpc->rev_trans_map[paddr].addr; | |
1251 | if (pladdr == ADDR_EMPTY) | |
1252 | continue; | |
1253 | ||
1254 | laddr = &rrpc->trans_map[pladdr]; | |
1255 | ||
1256 | if (paddr == laddr->addr) { | |
1257 | laddr->rblk = rblk; | |
1258 | } else { | |
1259 | set_bit(offset, rblk->invalid_pages); | |
1260 | rblk->nr_invalid_pages++; | |
1261 | } | |
1262 | } | |
1263 | } | |
1264 | ||
1265 | static int rrpc_blocks_init(struct rrpc *rrpc) | |
1266 | { | |
1267 | struct rrpc_lun *rlun; | |
1268 | struct rrpc_block *rblk; | |
1269 | int lun_iter, blk_iter; | |
1270 | ||
1271 | for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) { | |
1272 | rlun = &rrpc->luns[lun_iter]; | |
1273 | ||
1274 | for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun; | |
1275 | blk_iter++) { | |
1276 | rblk = &rlun->blocks[blk_iter]; | |
1277 | rrpc_block_map_update(rrpc, rblk); | |
1278 | } | |
1279 | } | |
1280 | ||
1281 | return 0; | |
1282 | } | |
1283 | ||
1284 | static int rrpc_luns_configure(struct rrpc *rrpc) | |
1285 | { | |
1286 | struct rrpc_lun *rlun; | |
1287 | struct rrpc_block *rblk; | |
1288 | int i; | |
1289 | ||
1290 | for (i = 0; i < rrpc->nr_luns; i++) { | |
1291 | rlun = &rrpc->luns[i]; | |
1292 | ||
1293 | rblk = rrpc_get_blk(rrpc, rlun, 0); | |
1294 | if (!rblk) | |
d3d1a438 | 1295 | goto err; |
ae1519ec MB |
1296 | |
1297 | rrpc_set_lun_cur(rlun, rblk); | |
1298 | ||
1299 | /* Emergency gc block */ | |
1300 | rblk = rrpc_get_blk(rrpc, rlun, 1); | |
1301 | if (!rblk) | |
d3d1a438 | 1302 | goto err; |
ae1519ec MB |
1303 | rlun->gc_cur = rblk; |
1304 | } | |
1305 | ||
1306 | return 0; | |
d3d1a438 WT |
1307 | err: |
1308 | rrpc_put_blks(rrpc); | |
1309 | return -EINVAL; | |
ae1519ec MB |
1310 | } |
1311 | ||
1312 | static struct nvm_tgt_type tt_rrpc; | |
1313 | ||
1314 | static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, | |
1315 | int lun_begin, int lun_end) | |
1316 | { | |
1317 | struct request_queue *bqueue = dev->q; | |
1318 | struct request_queue *tqueue = tdisk->queue; | |
1319 | struct rrpc *rrpc; | |
1320 | int ret; | |
1321 | ||
1322 | if (!(dev->identity.dom & NVM_RSP_L2P)) { | |
1323 | pr_err("nvm: rrpc: device does not support l2p (%x)\n", | |
1324 | dev->identity.dom); | |
1325 | return ERR_PTR(-EINVAL); | |
1326 | } | |
1327 | ||
1328 | rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL); | |
1329 | if (!rrpc) | |
1330 | return ERR_PTR(-ENOMEM); | |
1331 | ||
1332 | rrpc->instance.tt = &tt_rrpc; | |
1333 | rrpc->dev = dev; | |
1334 | rrpc->disk = tdisk; | |
1335 | ||
1336 | bio_list_init(&rrpc->requeue_bios); | |
1337 | spin_lock_init(&rrpc->bio_lock); | |
1338 | INIT_WORK(&rrpc->ws_requeue, rrpc_requeue); | |
1339 | ||
1340 | rrpc->nr_luns = lun_end - lun_begin + 1; | |
1341 | ||
1342 | /* simple round-robin strategy */ | |
1343 | atomic_set(&rrpc->next_lun, -1); | |
1344 | ||
1345 | ret = rrpc_luns_init(rrpc, lun_begin, lun_end); | |
1346 | if (ret) { | |
1347 | pr_err("nvm: rrpc: could not initialize luns\n"); | |
1348 | goto err; | |
1349 | } | |
1350 | ||
1351 | rrpc->poffset = dev->sec_per_lun * lun_begin; | |
1352 | rrpc->lun_offset = lun_begin; | |
1353 | ||
1354 | ret = rrpc_core_init(rrpc); | |
1355 | if (ret) { | |
1356 | pr_err("nvm: rrpc: could not initialize core\n"); | |
1357 | goto err; | |
1358 | } | |
1359 | ||
1360 | ret = rrpc_map_init(rrpc); | |
1361 | if (ret) { | |
1362 | pr_err("nvm: rrpc: could not initialize maps\n"); | |
1363 | goto err; | |
1364 | } | |
1365 | ||
1366 | ret = rrpc_blocks_init(rrpc); | |
1367 | if (ret) { | |
1368 | pr_err("nvm: rrpc: could not initialize state for blocks\n"); | |
1369 | goto err; | |
1370 | } | |
1371 | ||
1372 | ret = rrpc_luns_configure(rrpc); | |
1373 | if (ret) { | |
1374 | pr_err("nvm: rrpc: not enough blocks available in LUNs.\n"); | |
1375 | goto err; | |
1376 | } | |
1377 | ||
1378 | ret = rrpc_gc_init(rrpc); | |
1379 | if (ret) { | |
1380 | pr_err("nvm: rrpc: could not initialize gc\n"); | |
1381 | goto err; | |
1382 | } | |
1383 | ||
1384 | /* inherit the size from the underlying device */ | |
1385 | blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue)); | |
1386 | blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue)); | |
1387 | ||
1388 | pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n", | |
1389 | rrpc->nr_luns, (unsigned long long)rrpc->nr_pages); | |
1390 | ||
1391 | mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); | |
1392 | ||
1393 | return rrpc; | |
1394 | err: | |
1395 | rrpc_free(rrpc); | |
1396 | return ERR_PTR(ret); | |
1397 | } | |
1398 | ||
1399 | /* round robin, page-based FTL, and cost-based GC */ | |
1400 | static struct nvm_tgt_type tt_rrpc = { | |
1401 | .name = "rrpc", | |
1402 | .version = {1, 0, 0}, | |
1403 | ||
1404 | .make_rq = rrpc_make_rq, | |
1405 | .capacity = rrpc_capacity, | |
1406 | .end_io = rrpc_end_io, | |
1407 | ||
1408 | .init = rrpc_init, | |
1409 | .exit = rrpc_exit, | |
1410 | }; | |
1411 | ||
1412 | static int __init rrpc_module_init(void) | |
1413 | { | |
1414 | return nvm_register_target(&tt_rrpc); | |
1415 | } | |
1416 | ||
1417 | static void rrpc_module_exit(void) | |
1418 | { | |
1419 | nvm_unregister_target(&tt_rrpc); | |
1420 | } | |
1421 | ||
1422 | module_init(rrpc_module_init); | |
1423 | module_exit(rrpc_module_exit); | |
1424 | MODULE_LICENSE("GPL v2"); | |
1425 | MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs"); |