Commit | Line | Data |
---|---|---|
ae1519ec MB |
1 | /* |
2 | * Copyright (C) 2015 IT University of Copenhagen | |
3 | * Initial release: Matias Bjorling <m@bjorling.me> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version | |
7 | * 2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs. | |
15 | */ | |
16 | ||
17 | #include "rrpc.h" | |
18 | ||
19 | static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache; | |
20 | static DECLARE_RWSEM(rrpc_lock); | |
21 | ||
22 | static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, | |
23 | struct nvm_rq *rqd, unsigned long flags); | |
24 | ||
25 | #define rrpc_for_each_lun(rrpc, rlun, i) \ | |
26 | for ((i) = 0, rlun = &(rrpc)->luns[0]; \ | |
27 | (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)]) | |
28 | ||
29 | static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a) | |
30 | { | |
8e79b5cb | 31 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
32 | struct rrpc_block *rblk = a->rblk; |
33 | unsigned int pg_offset; | |
34 | ||
35 | lockdep_assert_held(&rrpc->rev_lock); | |
36 | ||
37 | if (a->addr == ADDR_EMPTY || !rblk) | |
38 | return; | |
39 | ||
40 | spin_lock(&rblk->lock); | |
41 | ||
8e79b5cb | 42 | div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset); |
ae1519ec MB |
43 | WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages)); |
44 | rblk->nr_invalid_pages++; | |
45 | ||
46 | spin_unlock(&rblk->lock); | |
47 | ||
8e53624d | 48 | rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY; |
ae1519ec MB |
49 | } |
50 | ||
51 | static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba, | |
5114e277 | 52 | unsigned int len) |
ae1519ec MB |
53 | { |
54 | sector_t i; | |
55 | ||
56 | spin_lock(&rrpc->rev_lock); | |
57 | for (i = slba; i < slba + len; i++) { | |
58 | struct rrpc_addr *gp = &rrpc->trans_map[i]; | |
59 | ||
60 | rrpc_page_invalidate(rrpc, gp); | |
61 | gp->rblk = NULL; | |
62 | } | |
63 | spin_unlock(&rrpc->rev_lock); | |
64 | } | |
65 | ||
66 | static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc, | |
67 | sector_t laddr, unsigned int pages) | |
68 | { | |
69 | struct nvm_rq *rqd; | |
70 | struct rrpc_inflight_rq *inf; | |
71 | ||
72 | rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC); | |
73 | if (!rqd) | |
74 | return ERR_PTR(-ENOMEM); | |
75 | ||
76 | inf = rrpc_get_inflight_rq(rqd); | |
77 | if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) { | |
78 | mempool_free(rqd, rrpc->rq_pool); | |
79 | return NULL; | |
80 | } | |
81 | ||
82 | return rqd; | |
83 | } | |
84 | ||
85 | static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd) | |
86 | { | |
87 | struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd); | |
88 | ||
89 | rrpc_unlock_laddr(rrpc, inf); | |
90 | ||
91 | mempool_free(rqd, rrpc->rq_pool); | |
92 | } | |
93 | ||
94 | static void rrpc_discard(struct rrpc *rrpc, struct bio *bio) | |
95 | { | |
96 | sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG; | |
97 | sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE; | |
98 | struct nvm_rq *rqd; | |
99 | ||
0de2415b | 100 | while (1) { |
ae1519ec | 101 | rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len); |
0de2415b WT |
102 | if (rqd) |
103 | break; | |
104 | ||
ae1519ec | 105 | schedule(); |
0de2415b | 106 | } |
ae1519ec MB |
107 | |
108 | if (IS_ERR(rqd)) { | |
109 | pr_err("rrpc: unable to acquire inflight IO\n"); | |
110 | bio_io_error(bio); | |
111 | return; | |
112 | } | |
113 | ||
114 | rrpc_invalidate_range(rrpc, slba, len); | |
115 | rrpc_inflight_laddr_release(rrpc, rqd); | |
116 | } | |
117 | ||
118 | static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk) | |
119 | { | |
8e79b5cb JG |
120 | struct nvm_tgt_dev *dev = rrpc->dev; |
121 | ||
122 | return (rblk->next_page == dev->geo.sec_per_blk); | |
ae1519ec MB |
123 | } |
124 | ||
afb18e0e JG |
125 | /* Calculate relative addr for the given block, considering instantiated LUNs */ |
126 | static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk) | |
127 | { | |
8e79b5cb | 128 | struct nvm_tgt_dev *dev = rrpc->dev; |
2a02e627 | 129 | struct rrpc_lun *rlun = rblk->rlun; |
afb18e0e | 130 | |
8e53624d | 131 | return rlun->id * dev->geo.sec_per_blk; |
afb18e0e JG |
132 | } |
133 | ||
8e53624d JG |
134 | static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev, |
135 | struct rrpc_addr *gp) | |
ae1519ec | 136 | { |
8e53624d | 137 | struct rrpc_block *rblk = gp->rblk; |
2a02e627 | 138 | struct rrpc_lun *rlun = rblk->rlun; |
8e53624d | 139 | u64 addr = gp->addr; |
ae1519ec MB |
140 | struct ppa_addr paddr; |
141 | ||
142 | paddr.ppa = addr; | |
8e53624d JG |
143 | paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr); |
144 | paddr.g.ch = rlun->bppa.g.ch; | |
145 | paddr.g.lun = rlun->bppa.g.lun; | |
146 | paddr.g.blk = rblk->id; | |
147 | ||
148 | return paddr; | |
ae1519ec MB |
149 | } |
150 | ||
151 | /* requires lun->lock taken */ | |
855cdd2c MB |
152 | static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk, |
153 | struct rrpc_block **cur_rblk) | |
ae1519ec MB |
154 | { |
155 | struct rrpc *rrpc = rlun->rrpc; | |
156 | ||
855cdd2c MB |
157 | if (*cur_rblk) { |
158 | spin_lock(&(*cur_rblk)->lock); | |
159 | WARN_ON(!block_is_full(rrpc, *cur_rblk)); | |
160 | spin_unlock(&(*cur_rblk)->lock); | |
ae1519ec | 161 | } |
855cdd2c | 162 | *cur_rblk = new_rblk; |
ae1519ec MB |
163 | } |
164 | ||
2a02e627 | 165 | static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc, |
8e79b5cb JG |
166 | struct rrpc_lun *rlun) |
167 | { | |
2a02e627 | 168 | struct rrpc_block *rblk = NULL; |
8e79b5cb | 169 | |
2a02e627 | 170 | if (list_empty(&rlun->free_list)) |
8e79b5cb JG |
171 | goto out; |
172 | ||
2a02e627 | 173 | rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list); |
8e79b5cb | 174 | |
2a02e627 JG |
175 | list_move_tail(&rblk->list, &rlun->used_list); |
176 | rblk->state = NVM_BLK_ST_TGT; | |
177 | rlun->nr_free_blocks--; | |
8e79b5cb JG |
178 | |
179 | out: | |
2a02e627 | 180 | return rblk; |
8e79b5cb JG |
181 | } |
182 | ||
ae1519ec MB |
183 | static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, |
184 | unsigned long flags) | |
185 | { | |
8e79b5cb | 186 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec | 187 | struct rrpc_block *rblk; |
8e79b5cb JG |
188 | int is_gc = flags & NVM_IOTYPE_GC; |
189 | ||
190 | spin_lock(&rlun->lock); | |
2a02e627 | 191 | if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) { |
8e79b5cb JG |
192 | pr_err("nvm: rrpc: cannot give block to non GC request\n"); |
193 | spin_unlock(&rlun->lock); | |
194 | return NULL; | |
195 | } | |
ae1519ec | 196 | |
2a02e627 JG |
197 | rblk = __rrpc_get_blk(rrpc, rlun); |
198 | if (!rblk) { | |
8e79b5cb JG |
199 | pr_err("nvm: rrpc: cannot get new block\n"); |
200 | spin_unlock(&rlun->lock); | |
ae1519ec | 201 | return NULL; |
ff0e498b | 202 | } |
8e79b5cb | 203 | spin_unlock(&rlun->lock); |
ae1519ec | 204 | |
8e79b5cb | 205 | bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk); |
ae1519ec MB |
206 | rblk->next_page = 0; |
207 | rblk->nr_invalid_pages = 0; | |
208 | atomic_set(&rblk->data_cmnt_size, 0); | |
209 | ||
210 | return rblk; | |
211 | } | |
212 | ||
213 | static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) | |
214 | { | |
8e79b5cb | 215 | struct rrpc_lun *rlun = rblk->rlun; |
8e79b5cb JG |
216 | |
217 | spin_lock(&rlun->lock); | |
2a02e627 JG |
218 | if (rblk->state & NVM_BLK_ST_TGT) { |
219 | list_move_tail(&rblk->list, &rlun->free_list); | |
220 | rlun->nr_free_blocks++; | |
221 | rblk->state = NVM_BLK_ST_FREE; | |
222 | } else if (rblk->state & NVM_BLK_ST_BAD) { | |
223 | list_move_tail(&rblk->list, &rlun->bb_list); | |
224 | rblk->state = NVM_BLK_ST_BAD; | |
8e79b5cb JG |
225 | } else { |
226 | WARN_ON_ONCE(1); | |
2a02e627 | 227 | pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n", |
8e53624d JG |
228 | rlun->bppa.g.ch, rlun->bppa.g.lun, |
229 | rblk->id, rblk->state); | |
2a02e627 | 230 | list_move_tail(&rblk->list, &rlun->bb_list); |
8e79b5cb JG |
231 | } |
232 | spin_unlock(&rlun->lock); | |
ae1519ec MB |
233 | } |
234 | ||
d3d1a438 WT |
235 | static void rrpc_put_blks(struct rrpc *rrpc) |
236 | { | |
237 | struct rrpc_lun *rlun; | |
238 | int i; | |
239 | ||
240 | for (i = 0; i < rrpc->nr_luns; i++) { | |
241 | rlun = &rrpc->luns[i]; | |
242 | if (rlun->cur) | |
243 | rrpc_put_blk(rrpc, rlun->cur); | |
244 | if (rlun->gc_cur) | |
245 | rrpc_put_blk(rrpc, rlun->gc_cur); | |
246 | } | |
247 | } | |
248 | ||
ae1519ec MB |
249 | static struct rrpc_lun *get_next_lun(struct rrpc *rrpc) |
250 | { | |
251 | int next = atomic_inc_return(&rrpc->next_lun); | |
252 | ||
253 | return &rrpc->luns[next % rrpc->nr_luns]; | |
254 | } | |
255 | ||
256 | static void rrpc_gc_kick(struct rrpc *rrpc) | |
257 | { | |
258 | struct rrpc_lun *rlun; | |
259 | unsigned int i; | |
260 | ||
261 | for (i = 0; i < rrpc->nr_luns; i++) { | |
262 | rlun = &rrpc->luns[i]; | |
263 | queue_work(rrpc->krqd_wq, &rlun->ws_gc); | |
264 | } | |
265 | } | |
266 | ||
267 | /* | |
268 | * timed GC every interval. | |
269 | */ | |
87c1d2d3 | 270 | static void rrpc_gc_timer(struct timer_list *t) |
ae1519ec | 271 | { |
87c1d2d3 | 272 | struct rrpc *rrpc = from_timer(rrpc, t, gc_timer); |
ae1519ec MB |
273 | |
274 | rrpc_gc_kick(rrpc); | |
275 | mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); | |
276 | } | |
277 | ||
278 | static void rrpc_end_sync_bio(struct bio *bio) | |
279 | { | |
280 | struct completion *waiting = bio->bi_private; | |
281 | ||
4e4cbee9 CH |
282 | if (bio->bi_status) |
283 | pr_err("nvm: gc request failed (%u).\n", bio->bi_status); | |
ae1519ec MB |
284 | |
285 | complete(waiting); | |
286 | } | |
287 | ||
288 | /* | |
289 | * rrpc_move_valid_pages -- migrate live data off the block | |
290 | * @rrpc: the 'rrpc' structure | |
291 | * @block: the block from which to migrate live pages | |
292 | * | |
293 | * Description: | |
294 | * GC algorithms may call this function to migrate remaining live | |
295 | * pages off the block prior to erasing it. This function blocks | |
296 | * further execution until the operation is complete. | |
297 | */ | |
298 | static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) | |
299 | { | |
8e79b5cb JG |
300 | struct nvm_tgt_dev *dev = rrpc->dev; |
301 | struct request_queue *q = dev->q; | |
ae1519ec MB |
302 | struct rrpc_rev_addr *rev; |
303 | struct nvm_rq *rqd; | |
304 | struct bio *bio; | |
305 | struct page *page; | |
306 | int slot; | |
8e79b5cb | 307 | int nr_sec_per_blk = dev->geo.sec_per_blk; |
b7ceb7d5 | 308 | u64 phys_addr; |
ae1519ec MB |
309 | DECLARE_COMPLETION_ONSTACK(wait); |
310 | ||
afb18e0e | 311 | if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) |
ae1519ec MB |
312 | return 0; |
313 | ||
314 | bio = bio_alloc(GFP_NOIO, 1); | |
315 | if (!bio) { | |
316 | pr_err("nvm: could not alloc bio to gc\n"); | |
317 | return -ENOMEM; | |
318 | } | |
319 | ||
320 | page = mempool_alloc(rrpc->page_pool, GFP_NOIO); | |
321 | ||
322 | while ((slot = find_first_zero_bit(rblk->invalid_pages, | |
afb18e0e | 323 | nr_sec_per_blk)) < nr_sec_per_blk) { |
ae1519ec MB |
324 | |
325 | /* Lock laddr */ | |
2a02e627 | 326 | phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot; |
ae1519ec MB |
327 | |
328 | try: | |
329 | spin_lock(&rrpc->rev_lock); | |
330 | /* Get logical address from physical to logical table */ | |
8e53624d | 331 | rev = &rrpc->rev_trans_map[phys_addr]; |
ae1519ec MB |
332 | /* already updated by previous regular write */ |
333 | if (rev->addr == ADDR_EMPTY) { | |
334 | spin_unlock(&rrpc->rev_lock); | |
335 | continue; | |
336 | } | |
337 | ||
338 | rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1); | |
339 | if (IS_ERR_OR_NULL(rqd)) { | |
340 | spin_unlock(&rrpc->rev_lock); | |
341 | schedule(); | |
342 | goto try; | |
343 | } | |
344 | ||
345 | spin_unlock(&rrpc->rev_lock); | |
346 | ||
347 | /* Perform read to do GC */ | |
348 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | |
95fe6c1a | 349 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
ae1519ec MB |
350 | bio->bi_private = &wait; |
351 | bio->bi_end_io = rrpc_end_sync_bio; | |
352 | ||
353 | /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */ | |
354 | bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); | |
355 | ||
356 | if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { | |
357 | pr_err("rrpc: gc read failed.\n"); | |
358 | rrpc_inflight_laddr_release(rrpc, rqd); | |
359 | goto finished; | |
360 | } | |
361 | wait_for_completion_io(&wait); | |
4e4cbee9 | 362 | if (bio->bi_status) { |
2b11c1b2 WT |
363 | rrpc_inflight_laddr_release(rrpc, rqd); |
364 | goto finished; | |
365 | } | |
ae1519ec MB |
366 | |
367 | bio_reset(bio); | |
368 | reinit_completion(&wait); | |
369 | ||
370 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | |
95fe6c1a | 371 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
ae1519ec MB |
372 | bio->bi_private = &wait; |
373 | bio->bi_end_io = rrpc_end_sync_bio; | |
374 | ||
375 | bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); | |
376 | ||
377 | /* turn the command around and write the data back to a new | |
378 | * address | |
379 | */ | |
380 | if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { | |
381 | pr_err("rrpc: gc write failed.\n"); | |
382 | rrpc_inflight_laddr_release(rrpc, rqd); | |
383 | goto finished; | |
384 | } | |
385 | wait_for_completion_io(&wait); | |
386 | ||
387 | rrpc_inflight_laddr_release(rrpc, rqd); | |
4e4cbee9 | 388 | if (bio->bi_status) |
2b11c1b2 | 389 | goto finished; |
ae1519ec MB |
390 | |
391 | bio_reset(bio); | |
392 | } | |
393 | ||
394 | finished: | |
395 | mempool_free(page, rrpc->page_pool); | |
396 | bio_put(bio); | |
397 | ||
afb18e0e | 398 | if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) { |
ae1519ec MB |
399 | pr_err("nvm: failed to garbage collect block\n"); |
400 | return -EIO; | |
401 | } | |
402 | ||
403 | return 0; | |
404 | } | |
405 | ||
406 | static void rrpc_block_gc(struct work_struct *work) | |
407 | { | |
408 | struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, | |
409 | ws_gc); | |
410 | struct rrpc *rrpc = gcb->rrpc; | |
411 | struct rrpc_block *rblk = gcb->rblk; | |
cca87bc9 | 412 | struct rrpc_lun *rlun = rblk->rlun; |
2a02e627 | 413 | struct ppa_addr ppa; |
ae1519ec | 414 | |
d0ca798f | 415 | mempool_free(gcb, rrpc->gcb_pool); |
2a02e627 | 416 | pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n", |
8e53624d | 417 | rlun->bppa.g.ch, rlun->bppa.g.lun, |
2a02e627 | 418 | rblk->id); |
ae1519ec MB |
419 | |
420 | if (rrpc_move_valid_pages(rrpc, rblk)) | |
d0ca798f WT |
421 | goto put_back; |
422 | ||
2a02e627 | 423 | ppa.ppa = 0; |
8e53624d JG |
424 | ppa.g.ch = rlun->bppa.g.ch; |
425 | ppa.g.lun = rlun->bppa.g.lun; | |
2a02e627 JG |
426 | ppa.g.blk = rblk->id; |
427 | ||
17912c49 | 428 | if (nvm_erase_sync(rrpc->dev, &ppa, 1)) |
d0ca798f | 429 | goto put_back; |
ae1519ec | 430 | |
ae1519ec | 431 | rrpc_put_blk(rrpc, rblk); |
d0ca798f WT |
432 | |
433 | return; | |
434 | ||
435 | put_back: | |
436 | spin_lock(&rlun->lock); | |
437 | list_add_tail(&rblk->prio, &rlun->prio_list); | |
438 | spin_unlock(&rlun->lock); | |
ae1519ec MB |
439 | } |
440 | ||
441 | /* the block with highest number of invalid pages, will be in the beginning | |
442 | * of the list | |
443 | */ | |
2a02e627 | 444 | static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra, |
ae1519ec MB |
445 | struct rrpc_block *rb) |
446 | { | |
447 | if (ra->nr_invalid_pages == rb->nr_invalid_pages) | |
448 | return ra; | |
449 | ||
450 | return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra; | |
451 | } | |
452 | ||
453 | /* linearly find the block with highest number of invalid pages | |
454 | * requires lun->lock | |
455 | */ | |
456 | static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun) | |
457 | { | |
458 | struct list_head *prio_list = &rlun->prio_list; | |
2a02e627 | 459 | struct rrpc_block *rblk, *max; |
ae1519ec MB |
460 | |
461 | BUG_ON(list_empty(prio_list)); | |
462 | ||
463 | max = list_first_entry(prio_list, struct rrpc_block, prio); | |
2a02e627 JG |
464 | list_for_each_entry(rblk, prio_list, prio) |
465 | max = rblk_max_invalid(max, rblk); | |
ae1519ec MB |
466 | |
467 | return max; | |
468 | } | |
469 | ||
470 | static void rrpc_lun_gc(struct work_struct *work) | |
471 | { | |
472 | struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc); | |
473 | struct rrpc *rrpc = rlun->rrpc; | |
8e79b5cb | 474 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
475 | struct rrpc_block_gc *gcb; |
476 | unsigned int nr_blocks_need; | |
477 | ||
8e79b5cb | 478 | nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE; |
ae1519ec MB |
479 | |
480 | if (nr_blocks_need < rrpc->nr_luns) | |
481 | nr_blocks_need = rrpc->nr_luns; | |
482 | ||
b262924b | 483 | spin_lock(&rlun->lock); |
2a02e627 | 484 | while (nr_blocks_need > rlun->nr_free_blocks && |
ae1519ec | 485 | !list_empty(&rlun->prio_list)) { |
2a02e627 | 486 | struct rrpc_block *rblk = block_prio_find_max(rlun); |
ae1519ec | 487 | |
2a02e627 | 488 | if (!rblk->nr_invalid_pages) |
ae1519ec MB |
489 | break; |
490 | ||
b262924b WT |
491 | gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); |
492 | if (!gcb) | |
493 | break; | |
494 | ||
2a02e627 | 495 | list_del_init(&rblk->prio); |
ae1519ec | 496 | |
2a02e627 | 497 | WARN_ON(!block_is_full(rrpc, rblk)); |
ae1519ec | 498 | |
2a02e627 | 499 | pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n", |
8e53624d | 500 | rlun->bppa.g.ch, rlun->bppa.g.lun, |
2a02e627 | 501 | rblk->id); |
ae1519ec | 502 | |
ae1519ec | 503 | gcb->rrpc = rrpc; |
2a02e627 | 504 | gcb->rblk = rblk; |
ae1519ec MB |
505 | INIT_WORK(&gcb->ws_gc, rrpc_block_gc); |
506 | ||
507 | queue_work(rrpc->kgc_wq, &gcb->ws_gc); | |
508 | ||
509 | nr_blocks_need--; | |
510 | } | |
b262924b | 511 | spin_unlock(&rlun->lock); |
ae1519ec MB |
512 | |
513 | /* TODO: Hint that request queue can be started again */ | |
514 | } | |
515 | ||
516 | static void rrpc_gc_queue(struct work_struct *work) | |
517 | { | |
518 | struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, | |
519 | ws_gc); | |
520 | struct rrpc *rrpc = gcb->rrpc; | |
521 | struct rrpc_block *rblk = gcb->rblk; | |
cca87bc9 | 522 | struct rrpc_lun *rlun = rblk->rlun; |
ae1519ec MB |
523 | |
524 | spin_lock(&rlun->lock); | |
525 | list_add_tail(&rblk->prio, &rlun->prio_list); | |
526 | spin_unlock(&rlun->lock); | |
527 | ||
528 | mempool_free(gcb, rrpc->gcb_pool); | |
2a02e627 | 529 | pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n", |
8e53624d | 530 | rlun->bppa.g.ch, rlun->bppa.g.lun, |
2a02e627 | 531 | rblk->id); |
ae1519ec MB |
532 | } |
533 | ||
534 | static const struct block_device_operations rrpc_fops = { | |
535 | .owner = THIS_MODULE, | |
536 | }; | |
537 | ||
538 | static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc) | |
539 | { | |
540 | unsigned int i; | |
541 | struct rrpc_lun *rlun, *max_free; | |
542 | ||
543 | if (!is_gc) | |
544 | return get_next_lun(rrpc); | |
545 | ||
546 | /* during GC, we don't care about RR, instead we want to make | |
547 | * sure that we maintain evenness between the block luns. | |
548 | */ | |
549 | max_free = &rrpc->luns[0]; | |
550 | /* prevent GC-ing lun from devouring pages of a lun with | |
551 | * little free blocks. We don't take the lock as we only need an | |
552 | * estimate. | |
553 | */ | |
554 | rrpc_for_each_lun(rrpc, rlun, i) { | |
2a02e627 | 555 | if (rlun->nr_free_blocks > max_free->nr_free_blocks) |
ae1519ec MB |
556 | max_free = rlun; |
557 | } | |
558 | ||
559 | return max_free; | |
560 | } | |
561 | ||
562 | static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr, | |
b7ceb7d5 | 563 | struct rrpc_block *rblk, u64 paddr) |
ae1519ec MB |
564 | { |
565 | struct rrpc_addr *gp; | |
566 | struct rrpc_rev_addr *rev; | |
567 | ||
4ece44af | 568 | BUG_ON(laddr >= rrpc->nr_sects); |
ae1519ec MB |
569 | |
570 | gp = &rrpc->trans_map[laddr]; | |
571 | spin_lock(&rrpc->rev_lock); | |
572 | if (gp->rblk) | |
573 | rrpc_page_invalidate(rrpc, gp); | |
574 | ||
575 | gp->addr = paddr; | |
576 | gp->rblk = rblk; | |
577 | ||
8e53624d | 578 | rev = &rrpc->rev_trans_map[gp->addr]; |
ae1519ec MB |
579 | rev->addr = laddr; |
580 | spin_unlock(&rrpc->rev_lock); | |
581 | ||
582 | return gp; | |
583 | } | |
584 | ||
b7ceb7d5 | 585 | static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk) |
ae1519ec | 586 | { |
b7ceb7d5 | 587 | u64 addr = ADDR_EMPTY; |
ae1519ec MB |
588 | |
589 | spin_lock(&rblk->lock); | |
590 | if (block_is_full(rrpc, rblk)) | |
591 | goto out; | |
592 | ||
8e53624d | 593 | addr = rblk->next_page; |
ae1519ec MB |
594 | |
595 | rblk->next_page++; | |
596 | out: | |
597 | spin_unlock(&rblk->lock); | |
598 | return addr; | |
599 | } | |
600 | ||
855cdd2c MB |
601 | /* Map logical address to a physical page. The mapping implements a round robin |
602 | * approach and allocates a page from the next lun available. | |
ae1519ec | 603 | * |
855cdd2c MB |
604 | * Returns rrpc_addr with the physical address and block. Returns NULL if no |
605 | * blocks in the next rlun are available. | |
ae1519ec | 606 | */ |
8e53624d | 607 | static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr, |
ae1519ec MB |
608 | int is_gc) |
609 | { | |
8e53624d | 610 | struct nvm_tgt_dev *tgt_dev = rrpc->dev; |
ae1519ec | 611 | struct rrpc_lun *rlun; |
855cdd2c | 612 | struct rrpc_block *rblk, **cur_rblk; |
8e53624d JG |
613 | struct rrpc_addr *p; |
614 | struct ppa_addr ppa; | |
b7ceb7d5 | 615 | u64 paddr; |
855cdd2c | 616 | int gc_force = 0; |
ae1519ec | 617 | |
8e53624d | 618 | ppa.ppa = ADDR_EMPTY; |
ae1519ec | 619 | rlun = rrpc_get_lun_rr(rrpc, is_gc); |
ae1519ec | 620 | |
2a02e627 | 621 | if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4) |
8e53624d | 622 | return ppa; |
ae1519ec | 623 | |
855cdd2c MB |
624 | /* |
625 | * page allocation steps: | |
626 | * 1. Try to allocate new page from current rblk | |
627 | * 2a. If succeed, proceed to map it in and return | |
628 | * 2b. If fail, first try to allocate a new block from media manger, | |
629 | * and then retry step 1. Retry until the normal block pool is | |
630 | * exhausted. | |
631 | * 3. If exhausted, and garbage collector is requesting the block, | |
632 | * go to the reserved block and retry step 1. | |
633 | * In the case that this fails as well, or it is not GC | |
634 | * requesting, report not able to retrieve a block and let the | |
635 | * caller handle further processing. | |
636 | */ | |
ae1519ec | 637 | |
855cdd2c MB |
638 | spin_lock(&rlun->lock); |
639 | cur_rblk = &rlun->cur; | |
ae1519ec MB |
640 | rblk = rlun->cur; |
641 | retry: | |
642 | paddr = rrpc_alloc_addr(rrpc, rblk); | |
643 | ||
855cdd2c MB |
644 | if (paddr != ADDR_EMPTY) |
645 | goto done; | |
ae1519ec | 646 | |
855cdd2c MB |
647 | if (!list_empty(&rlun->wblk_list)) { |
648 | new_blk: | |
649 | rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block, | |
650 | prio); | |
651 | rrpc_set_lun_cur(rlun, rblk, cur_rblk); | |
652 | list_del(&rblk->prio); | |
653 | goto retry; | |
654 | } | |
655 | spin_unlock(&rlun->lock); | |
656 | ||
657 | rblk = rrpc_get_blk(rrpc, rlun, gc_force); | |
658 | if (rblk) { | |
659 | spin_lock(&rlun->lock); | |
660 | list_add_tail(&rblk->prio, &rlun->wblk_list); | |
661 | /* | |
662 | * another thread might already have added a new block, | |
663 | * Therefore, make sure that one is used, instead of the | |
664 | * one just added. | |
665 | */ | |
666 | goto new_blk; | |
667 | } | |
668 | ||
669 | if (unlikely(is_gc) && !gc_force) { | |
670 | /* retry from emergency gc block */ | |
671 | cur_rblk = &rlun->gc_cur; | |
672 | rblk = rlun->gc_cur; | |
673 | gc_force = 1; | |
674 | spin_lock(&rlun->lock); | |
675 | goto retry; | |
ae1519ec MB |
676 | } |
677 | ||
855cdd2c | 678 | pr_err("rrpc: failed to allocate new block\n"); |
8e53624d | 679 | return ppa; |
855cdd2c | 680 | done: |
ae1519ec | 681 | spin_unlock(&rlun->lock); |
8e53624d JG |
682 | p = rrpc_update_map(rrpc, laddr, rblk, paddr); |
683 | if (!p) | |
684 | return ppa; | |
685 | ||
686 | /* return global address */ | |
687 | return rrpc_ppa_to_gaddr(tgt_dev, p); | |
ae1519ec MB |
688 | } |
689 | ||
690 | static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk) | |
691 | { | |
692 | struct rrpc_block_gc *gcb; | |
693 | ||
694 | gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); | |
695 | if (!gcb) { | |
696 | pr_err("rrpc: unable to queue block for gc."); | |
697 | return; | |
698 | } | |
699 | ||
700 | gcb->rrpc = rrpc; | |
701 | gcb->rblk = rblk; | |
702 | ||
703 | INIT_WORK(&gcb->ws_gc, rrpc_gc_queue); | |
704 | queue_work(rrpc->kgc_wq, &gcb->ws_gc); | |
705 | } | |
706 | ||
2a02e627 | 707 | static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p) |
a24ba464 | 708 | { |
2a02e627 JG |
709 | struct rrpc_lun *rlun = NULL; |
710 | int i; | |
711 | ||
712 | for (i = 0; i < rrpc->nr_luns; i++) { | |
8e53624d JG |
713 | if (rrpc->luns[i].bppa.g.ch == p.g.ch && |
714 | rrpc->luns[i].bppa.g.lun == p.g.lun) { | |
2a02e627 JG |
715 | rlun = &rrpc->luns[i]; |
716 | break; | |
717 | } | |
718 | } | |
719 | ||
720 | return rlun; | |
a24ba464 JG |
721 | } |
722 | ||
2a02e627 | 723 | static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa) |
a24ba464 | 724 | { |
8e79b5cb | 725 | struct nvm_tgt_dev *dev = rrpc->dev; |
2a02e627 JG |
726 | struct rrpc_lun *rlun; |
727 | struct rrpc_block *rblk; | |
728 | ||
729 | rlun = rrpc_ppa_to_lun(rrpc, ppa); | |
730 | rblk = &rlun->blocks[ppa.g.blk]; | |
731 | rblk->state = NVM_BLK_ST_BAD; | |
732 | ||
333ba053 | 733 | nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD); |
2a02e627 JG |
734 | } |
735 | ||
736 | static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd) | |
737 | { | |
a24ba464 JG |
738 | void *comp_bits = &rqd->ppa_status; |
739 | struct ppa_addr ppa, prev_ppa; | |
740 | int nr_ppas = rqd->nr_ppas; | |
741 | int bit; | |
742 | ||
743 | if (rqd->nr_ppas == 1) | |
2a02e627 | 744 | __rrpc_mark_bad_block(rrpc, rqd->ppa_addr); |
a24ba464 JG |
745 | |
746 | ppa_set_empty(&prev_ppa); | |
747 | bit = -1; | |
748 | while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) { | |
749 | ppa = rqd->ppa_list[bit]; | |
750 | if (ppa_cmp_blk(ppa, prev_ppa)) | |
751 | continue; | |
752 | ||
2a02e627 | 753 | __rrpc_mark_bad_block(rrpc, ppa); |
a24ba464 JG |
754 | } |
755 | } | |
756 | ||
ae1519ec MB |
757 | static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, |
758 | sector_t laddr, uint8_t npages) | |
759 | { | |
8e79b5cb | 760 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
761 | struct rrpc_addr *p; |
762 | struct rrpc_block *rblk; | |
ae1519ec MB |
763 | int cmnt_size, i; |
764 | ||
765 | for (i = 0; i < npages; i++) { | |
766 | p = &rrpc->trans_map[laddr + i]; | |
767 | rblk = p->rblk; | |
ae1519ec MB |
768 | |
769 | cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); | |
8e79b5cb | 770 | if (unlikely(cmnt_size == dev->geo.sec_per_blk)) |
ae1519ec MB |
771 | rrpc_run_gc(rrpc, rblk); |
772 | } | |
773 | } | |
774 | ||
72d256ec | 775 | static void rrpc_end_io(struct nvm_rq *rqd) |
ae1519ec | 776 | { |
06894efe | 777 | struct rrpc *rrpc = rqd->private; |
8e79b5cb | 778 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec | 779 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); |
6d5be959 | 780 | uint8_t npages = rqd->nr_ppas; |
ae1519ec MB |
781 | sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; |
782 | ||
a24ba464 JG |
783 | if (bio_data_dir(rqd->bio) == WRITE) { |
784 | if (rqd->error == NVM_RSP_ERR_FAILWRITE) | |
785 | rrpc_mark_bad_block(rrpc, rqd); | |
786 | ||
ae1519ec | 787 | rrpc_end_io_write(rrpc, rrqd, laddr, npages); |
a24ba464 | 788 | } |
ae1519ec | 789 | |
3cd485b1 WT |
790 | bio_put(rqd->bio); |
791 | ||
ae1519ec | 792 | if (rrqd->flags & NVM_IOTYPE_GC) |
91276162 | 793 | return; |
ae1519ec MB |
794 | |
795 | rrpc_unlock_rq(rrpc, rqd); | |
ae1519ec MB |
796 | |
797 | if (npages > 1) | |
8e79b5cb | 798 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list); |
ae1519ec MB |
799 | |
800 | mempool_free(rqd, rrpc->rq_pool); | |
ae1519ec MB |
801 | } |
802 | ||
803 | static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio, | |
804 | struct nvm_rq *rqd, unsigned long flags, int npages) | |
805 | { | |
8e79b5cb | 806 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
807 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); |
808 | struct rrpc_addr *gp; | |
809 | sector_t laddr = rrpc_get_laddr(bio); | |
810 | int is_gc = flags & NVM_IOTYPE_GC; | |
811 | int i; | |
812 | ||
813 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { | |
8e79b5cb | 814 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list); |
ae1519ec MB |
815 | return NVM_IO_REQUEUE; |
816 | } | |
817 | ||
818 | for (i = 0; i < npages; i++) { | |
819 | /* We assume that mapping occurs at 4KB granularity */ | |
d788c59b | 820 | BUG_ON(!(laddr + i < rrpc->nr_sects)); |
ae1519ec MB |
821 | gp = &rrpc->trans_map[laddr + i]; |
822 | ||
823 | if (gp->rblk) { | |
8e53624d | 824 | rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp); |
ae1519ec MB |
825 | } else { |
826 | BUG_ON(is_gc); | |
827 | rrpc_unlock_laddr(rrpc, r); | |
8e79b5cb | 828 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, |
ae1519ec MB |
829 | rqd->dma_ppa_list); |
830 | return NVM_IO_DONE; | |
831 | } | |
832 | } | |
833 | ||
834 | rqd->opcode = NVM_OP_HBREAD; | |
835 | ||
836 | return NVM_IO_OK; | |
837 | } | |
838 | ||
839 | static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd, | |
840 | unsigned long flags) | |
841 | { | |
ae1519ec MB |
842 | int is_gc = flags & NVM_IOTYPE_GC; |
843 | sector_t laddr = rrpc_get_laddr(bio); | |
844 | struct rrpc_addr *gp; | |
845 | ||
846 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) | |
847 | return NVM_IO_REQUEUE; | |
848 | ||
d788c59b | 849 | BUG_ON(!(laddr < rrpc->nr_sects)); |
ae1519ec MB |
850 | gp = &rrpc->trans_map[laddr]; |
851 | ||
852 | if (gp->rblk) { | |
8e53624d | 853 | rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp); |
ae1519ec MB |
854 | } else { |
855 | BUG_ON(is_gc); | |
856 | rrpc_unlock_rq(rrpc, rqd); | |
857 | return NVM_IO_DONE; | |
858 | } | |
859 | ||
860 | rqd->opcode = NVM_OP_HBREAD; | |
ae1519ec MB |
861 | |
862 | return NVM_IO_OK; | |
863 | } | |
864 | ||
865 | static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio, | |
866 | struct nvm_rq *rqd, unsigned long flags, int npages) | |
867 | { | |
8e79b5cb | 868 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec | 869 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); |
8e53624d | 870 | struct ppa_addr p; |
ae1519ec MB |
871 | sector_t laddr = rrpc_get_laddr(bio); |
872 | int is_gc = flags & NVM_IOTYPE_GC; | |
873 | int i; | |
874 | ||
875 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { | |
8e79b5cb | 876 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list); |
ae1519ec MB |
877 | return NVM_IO_REQUEUE; |
878 | } | |
879 | ||
880 | for (i = 0; i < npages; i++) { | |
881 | /* We assume that mapping occurs at 4KB granularity */ | |
882 | p = rrpc_map_page(rrpc, laddr + i, is_gc); | |
8e53624d | 883 | if (p.ppa == ADDR_EMPTY) { |
ae1519ec MB |
884 | BUG_ON(is_gc); |
885 | rrpc_unlock_laddr(rrpc, r); | |
8e79b5cb | 886 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, |
ae1519ec MB |
887 | rqd->dma_ppa_list); |
888 | rrpc_gc_kick(rrpc); | |
889 | return NVM_IO_REQUEUE; | |
890 | } | |
891 | ||
8e53624d | 892 | rqd->ppa_list[i] = p; |
ae1519ec MB |
893 | } |
894 | ||
895 | rqd->opcode = NVM_OP_HBWRITE; | |
896 | ||
897 | return NVM_IO_OK; | |
898 | } | |
899 | ||
900 | static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio, | |
901 | struct nvm_rq *rqd, unsigned long flags) | |
902 | { | |
8e53624d | 903 | struct ppa_addr p; |
ae1519ec MB |
904 | int is_gc = flags & NVM_IOTYPE_GC; |
905 | sector_t laddr = rrpc_get_laddr(bio); | |
906 | ||
907 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) | |
908 | return NVM_IO_REQUEUE; | |
909 | ||
910 | p = rrpc_map_page(rrpc, laddr, is_gc); | |
8e53624d | 911 | if (p.ppa == ADDR_EMPTY) { |
ae1519ec MB |
912 | BUG_ON(is_gc); |
913 | rrpc_unlock_rq(rrpc, rqd); | |
914 | rrpc_gc_kick(rrpc); | |
915 | return NVM_IO_REQUEUE; | |
916 | } | |
917 | ||
8e53624d | 918 | rqd->ppa_addr = p; |
ae1519ec | 919 | rqd->opcode = NVM_OP_HBWRITE; |
ae1519ec MB |
920 | |
921 | return NVM_IO_OK; | |
922 | } | |
923 | ||
924 | static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio, | |
925 | struct nvm_rq *rqd, unsigned long flags, uint8_t npages) | |
926 | { | |
8e79b5cb JG |
927 | struct nvm_tgt_dev *dev = rrpc->dev; |
928 | ||
ae1519ec | 929 | if (npages > 1) { |
8e79b5cb | 930 | rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, |
ae1519ec MB |
931 | &rqd->dma_ppa_list); |
932 | if (!rqd->ppa_list) { | |
933 | pr_err("rrpc: not able to allocate ppa list\n"); | |
934 | return NVM_IO_ERR; | |
935 | } | |
936 | ||
70246286 | 937 | if (bio_op(bio) == REQ_OP_WRITE) |
ae1519ec MB |
938 | return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, |
939 | npages); | |
940 | ||
941 | return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); | |
942 | } | |
943 | ||
70246286 | 944 | if (bio_op(bio) == REQ_OP_WRITE) |
ae1519ec MB |
945 | return rrpc_write_rq(rrpc, bio, rqd, flags); |
946 | ||
947 | return rrpc_read_rq(rrpc, bio, rqd, flags); | |
948 | } | |
949 | ||
950 | static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, | |
951 | struct nvm_rq *rqd, unsigned long flags) | |
952 | { | |
8e79b5cb | 953 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
954 | struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd); |
955 | uint8_t nr_pages = rrpc_get_pages(bio); | |
956 | int bio_size = bio_sectors(bio) << 9; | |
8e79b5cb | 957 | int err; |
ae1519ec | 958 | |
8e79b5cb | 959 | if (bio_size < dev->geo.sec_size) |
ae1519ec | 960 | return NVM_IO_ERR; |
8e79b5cb | 961 | else if (bio_size > dev->geo.max_rq_size) |
ae1519ec MB |
962 | return NVM_IO_ERR; |
963 | ||
964 | err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages); | |
965 | if (err) | |
966 | return err; | |
967 | ||
968 | bio_get(bio); | |
969 | rqd->bio = bio; | |
06894efe | 970 | rqd->private = rrpc; |
6d5be959 | 971 | rqd->nr_ppas = nr_pages; |
06894efe | 972 | rqd->end_io = rrpc_end_io; |
ae1519ec MB |
973 | rrq->flags = flags; |
974 | ||
8e53624d | 975 | err = nvm_submit_io(dev, rqd); |
ae1519ec MB |
976 | if (err) { |
977 | pr_err("rrpc: I/O submission failed: %d\n", err); | |
3cd485b1 | 978 | bio_put(bio); |
c27278bd WT |
979 | if (!(flags & NVM_IOTYPE_GC)) { |
980 | rrpc_unlock_rq(rrpc, rqd); | |
6d5be959 | 981 | if (rqd->nr_ppas > 1) |
da2d7cb8 JG |
982 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, |
983 | rqd->dma_ppa_list); | |
c27278bd | 984 | } |
ae1519ec MB |
985 | return NVM_IO_ERR; |
986 | } | |
987 | ||
988 | return NVM_IO_OK; | |
989 | } | |
990 | ||
dece1635 | 991 | static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio) |
ae1519ec MB |
992 | { |
993 | struct rrpc *rrpc = q->queuedata; | |
994 | struct nvm_rq *rqd; | |
995 | int err; | |
996 | ||
af67c31f | 997 | blk_queue_split(q, &bio); |
f0b01b6a | 998 | |
95fe6c1a | 999 | if (bio_op(bio) == REQ_OP_DISCARD) { |
ae1519ec | 1000 | rrpc_discard(rrpc, bio); |
dece1635 | 1001 | return BLK_QC_T_NONE; |
ae1519ec MB |
1002 | } |
1003 | ||
1004 | rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); | |
ae1519ec MB |
1005 | memset(rqd, 0, sizeof(struct nvm_rq)); |
1006 | ||
1007 | err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); | |
1008 | switch (err) { | |
1009 | case NVM_IO_OK: | |
dece1635 | 1010 | return BLK_QC_T_NONE; |
ae1519ec MB |
1011 | case NVM_IO_ERR: |
1012 | bio_io_error(bio); | |
1013 | break; | |
1014 | case NVM_IO_DONE: | |
1015 | bio_endio(bio); | |
1016 | break; | |
1017 | case NVM_IO_REQUEUE: | |
1018 | spin_lock(&rrpc->bio_lock); | |
1019 | bio_list_add(&rrpc->requeue_bios, bio); | |
1020 | spin_unlock(&rrpc->bio_lock); | |
1021 | queue_work(rrpc->kgc_wq, &rrpc->ws_requeue); | |
1022 | break; | |
1023 | } | |
1024 | ||
1025 | mempool_free(rqd, rrpc->rq_pool); | |
dece1635 | 1026 | return BLK_QC_T_NONE; |
ae1519ec MB |
1027 | } |
1028 | ||
1029 | static void rrpc_requeue(struct work_struct *work) | |
1030 | { | |
1031 | struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue); | |
1032 | struct bio_list bios; | |
1033 | struct bio *bio; | |
1034 | ||
1035 | bio_list_init(&bios); | |
1036 | ||
1037 | spin_lock(&rrpc->bio_lock); | |
1038 | bio_list_merge(&bios, &rrpc->requeue_bios); | |
1039 | bio_list_init(&rrpc->requeue_bios); | |
1040 | spin_unlock(&rrpc->bio_lock); | |
1041 | ||
1042 | while ((bio = bio_list_pop(&bios))) | |
1043 | rrpc_make_rq(rrpc->disk->queue, bio); | |
1044 | } | |
1045 | ||
1046 | static void rrpc_gc_free(struct rrpc *rrpc) | |
1047 | { | |
ae1519ec MB |
1048 | if (rrpc->krqd_wq) |
1049 | destroy_workqueue(rrpc->krqd_wq); | |
1050 | ||
1051 | if (rrpc->kgc_wq) | |
1052 | destroy_workqueue(rrpc->kgc_wq); | |
ae1519ec MB |
1053 | } |
1054 | ||
1055 | static int rrpc_gc_init(struct rrpc *rrpc) | |
1056 | { | |
1057 | rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND, | |
1058 | rrpc->nr_luns); | |
1059 | if (!rrpc->krqd_wq) | |
1060 | return -ENOMEM; | |
1061 | ||
1062 | rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1); | |
1063 | if (!rrpc->kgc_wq) | |
1064 | return -ENOMEM; | |
1065 | ||
87c1d2d3 | 1066 | timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0); |
ae1519ec MB |
1067 | |
1068 | return 0; | |
1069 | } | |
1070 | ||
1071 | static void rrpc_map_free(struct rrpc *rrpc) | |
1072 | { | |
1073 | vfree(rrpc->rev_trans_map); | |
1074 | vfree(rrpc->trans_map); | |
1075 | } | |
1076 | ||
1077 | static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private) | |
1078 | { | |
1079 | struct rrpc *rrpc = (struct rrpc *)private; | |
8e79b5cb | 1080 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
1081 | struct rrpc_addr *addr = rrpc->trans_map + slba; |
1082 | struct rrpc_rev_addr *raddr = rrpc->rev_trans_map; | |
2a02e627 JG |
1083 | struct rrpc_lun *rlun; |
1084 | struct rrpc_block *rblk; | |
ae1519ec MB |
1085 | u64 i; |
1086 | ||
ae1519ec | 1087 | for (i = 0; i < nlb; i++) { |
2a02e627 | 1088 | struct ppa_addr gaddr; |
ae1519ec | 1089 | u64 pba = le64_to_cpu(entries[i]); |
afb18e0e | 1090 | unsigned int mod; |
2a02e627 | 1091 | |
ae1519ec MB |
1092 | /* LNVM treats address-spaces as silos, LBA and PBA are |
1093 | * equally large and zero-indexed. | |
1094 | */ | |
4ece44af | 1095 | if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) { |
ae1519ec | 1096 | pr_err("nvm: L2P data entry is out of bounds!\n"); |
2a02e627 | 1097 | pr_err("nvm: Maybe loaded an old target L2P\n"); |
ae1519ec MB |
1098 | return -EINVAL; |
1099 | } | |
1100 | ||
1101 | /* Address zero is a special one. The first page on a disk is | |
1102 | * protected. As it often holds internal device boot | |
1103 | * information. | |
1104 | */ | |
1105 | if (!pba) | |
1106 | continue; | |
1107 | ||
afb18e0e JG |
1108 | div_u64_rem(pba, rrpc->nr_sects, &mod); |
1109 | ||
da2d7cb8 | 1110 | gaddr = rrpc_recov_addr(dev, pba); |
2a02e627 JG |
1111 | rlun = rrpc_ppa_to_lun(rrpc, gaddr); |
1112 | if (!rlun) { | |
1113 | pr_err("rrpc: l2p corruption on lba %llu\n", | |
1114 | slba + i); | |
1115 | return -EINVAL; | |
1116 | } | |
1117 | ||
1118 | rblk = &rlun->blocks[gaddr.g.blk]; | |
1119 | if (!rblk->state) { | |
1120 | /* at this point, we don't know anything about the | |
1121 | * block. It's up to the FTL on top to re-etablish the | |
1122 | * block state. The block is assumed to be open. | |
1123 | */ | |
1124 | list_move_tail(&rblk->list, &rlun->used_list); | |
1125 | rblk->state = NVM_BLK_ST_TGT; | |
1126 | rlun->nr_free_blocks--; | |
1127 | } | |
8e53624d JG |
1128 | |
1129 | addr[i].addr = pba; | |
1130 | addr[i].rblk = rblk; | |
1131 | raddr[mod].addr = slba + i; | |
ae1519ec MB |
1132 | } |
1133 | ||
1134 | return 0; | |
1135 | } | |
1136 | ||
1137 | static int rrpc_map_init(struct rrpc *rrpc) | |
1138 | { | |
8e79b5cb | 1139 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
1140 | sector_t i; |
1141 | int ret; | |
1142 | ||
4ece44af | 1143 | rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects); |
ae1519ec MB |
1144 | if (!rrpc->trans_map) |
1145 | return -ENOMEM; | |
1146 | ||
1147 | rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) | |
4ece44af | 1148 | * rrpc->nr_sects); |
ae1519ec MB |
1149 | if (!rrpc->rev_trans_map) |
1150 | return -ENOMEM; | |
1151 | ||
4ece44af | 1152 | for (i = 0; i < rrpc->nr_sects; i++) { |
ae1519ec MB |
1153 | struct rrpc_addr *p = &rrpc->trans_map[i]; |
1154 | struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i]; | |
1155 | ||
1156 | p->addr = ADDR_EMPTY; | |
1157 | r->addr = ADDR_EMPTY; | |
1158 | } | |
1159 | ||
ae1519ec | 1160 | /* Bring up the mapping table from device */ |
da2d7cb8 JG |
1161 | ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects, |
1162 | rrpc_l2p_update, rrpc); | |
ae1519ec MB |
1163 | if (ret) { |
1164 | pr_err("nvm: rrpc: could not read L2P table.\n"); | |
1165 | return -EINVAL; | |
1166 | } | |
1167 | ||
1168 | return 0; | |
1169 | } | |
1170 | ||
ae1519ec MB |
1171 | /* Minimum pages needed within a lun */ |
1172 | #define PAGE_POOL_SIZE 16 | |
1173 | #define ADDR_POOL_SIZE 64 | |
1174 | ||
1175 | static int rrpc_core_init(struct rrpc *rrpc) | |
1176 | { | |
1177 | down_write(&rrpc_lock); | |
1178 | if (!rrpc_gcb_cache) { | |
1179 | rrpc_gcb_cache = kmem_cache_create("rrpc_gcb", | |
1180 | sizeof(struct rrpc_block_gc), 0, 0, NULL); | |
1181 | if (!rrpc_gcb_cache) { | |
1182 | up_write(&rrpc_lock); | |
1183 | return -ENOMEM; | |
1184 | } | |
1185 | ||
1186 | rrpc_rq_cache = kmem_cache_create("rrpc_rq", | |
1187 | sizeof(struct nvm_rq) + sizeof(struct rrpc_rq), | |
1188 | 0, 0, NULL); | |
1189 | if (!rrpc_rq_cache) { | |
1190 | kmem_cache_destroy(rrpc_gcb_cache); | |
1191 | up_write(&rrpc_lock); | |
1192 | return -ENOMEM; | |
1193 | } | |
1194 | } | |
1195 | up_write(&rrpc_lock); | |
1196 | ||
1197 | rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); | |
1198 | if (!rrpc->page_pool) | |
1199 | return -ENOMEM; | |
1200 | ||
8e79b5cb | 1201 | rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns, |
ae1519ec MB |
1202 | rrpc_gcb_cache); |
1203 | if (!rrpc->gcb_pool) | |
1204 | return -ENOMEM; | |
1205 | ||
1206 | rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache); | |
1207 | if (!rrpc->rq_pool) | |
1208 | return -ENOMEM; | |
1209 | ||
1210 | spin_lock_init(&rrpc->inflights.lock); | |
1211 | INIT_LIST_HEAD(&rrpc->inflights.reqs); | |
1212 | ||
1213 | return 0; | |
1214 | } | |
1215 | ||
1216 | static void rrpc_core_free(struct rrpc *rrpc) | |
1217 | { | |
1218 | mempool_destroy(rrpc->page_pool); | |
1219 | mempool_destroy(rrpc->gcb_pool); | |
1220 | mempool_destroy(rrpc->rq_pool); | |
1221 | } | |
1222 | ||
1223 | static void rrpc_luns_free(struct rrpc *rrpc) | |
1224 | { | |
da1e2849 WT |
1225 | struct rrpc_lun *rlun; |
1226 | int i; | |
1227 | ||
1228 | if (!rrpc->luns) | |
1229 | return; | |
1230 | ||
1231 | for (i = 0; i < rrpc->nr_luns; i++) { | |
1232 | rlun = &rrpc->luns[i]; | |
da1e2849 WT |
1233 | vfree(rlun->blocks); |
1234 | } | |
1235 | ||
ae1519ec MB |
1236 | kfree(rrpc->luns); |
1237 | } | |
1238 | ||
2a02e627 JG |
1239 | static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun) |
1240 | { | |
1241 | struct nvm_geo *geo = &dev->geo; | |
1242 | struct rrpc_block *rblk; | |
1243 | struct ppa_addr ppa; | |
1244 | u8 *blks; | |
1245 | int nr_blks; | |
1246 | int i; | |
1247 | int ret; | |
1248 | ||
959e911b JG |
1249 | if (!dev->parent->ops->get_bb_tbl) |
1250 | return 0; | |
1251 | ||
2a02e627 JG |
1252 | nr_blks = geo->blks_per_lun * geo->plane_mode; |
1253 | blks = kmalloc(nr_blks, GFP_KERNEL); | |
1254 | if (!blks) | |
1255 | return -ENOMEM; | |
1256 | ||
1257 | ppa.ppa = 0; | |
8e53624d JG |
1258 | ppa.g.ch = rlun->bppa.g.ch; |
1259 | ppa.g.lun = rlun->bppa.g.lun; | |
2a02e627 | 1260 | |
333ba053 | 1261 | ret = nvm_get_tgt_bb_tbl(dev, ppa, blks); |
2a02e627 JG |
1262 | if (ret) { |
1263 | pr_err("rrpc: could not get BB table\n"); | |
1264 | goto out; | |
1265 | } | |
1266 | ||
1267 | nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks); | |
654a01b7 CJ |
1268 | if (nr_blks < 0) { |
1269 | ret = nr_blks; | |
1270 | goto out; | |
1271 | } | |
2a02e627 | 1272 | |
2a02e627 JG |
1273 | for (i = 0; i < nr_blks; i++) { |
1274 | if (blks[i] == NVM_BLK_T_FREE) | |
1275 | continue; | |
1276 | ||
1277 | rblk = &rlun->blocks[i]; | |
1278 | list_move_tail(&rblk->list, &rlun->bb_list); | |
1279 | rblk->state = NVM_BLK_ST_BAD; | |
1280 | rlun->nr_free_blocks--; | |
1281 | } | |
1282 | ||
1283 | out: | |
1284 | kfree(blks); | |
1285 | return ret; | |
1286 | } | |
1287 | ||
8e53624d JG |
1288 | static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa) |
1289 | { | |
1290 | rlun->bppa.ppa = 0; | |
1291 | rlun->bppa.g.ch = ppa.g.ch; | |
1292 | rlun->bppa.g.lun = ppa.g.lun; | |
1293 | } | |
1294 | ||
1295 | static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns) | |
ae1519ec | 1296 | { |
8e79b5cb JG |
1297 | struct nvm_tgt_dev *dev = rrpc->dev; |
1298 | struct nvm_geo *geo = &dev->geo; | |
ae1519ec | 1299 | struct rrpc_lun *rlun; |
da1e2849 | 1300 | int i, j, ret = -EINVAL; |
ae1519ec | 1301 | |
8e79b5cb | 1302 | if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { |
4b79beb4 WT |
1303 | pr_err("rrpc: number of pages per block too high."); |
1304 | return -EINVAL; | |
1305 | } | |
1306 | ||
ae1519ec MB |
1307 | spin_lock_init(&rrpc->rev_lock); |
1308 | ||
1309 | rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun), | |
1310 | GFP_KERNEL); | |
1311 | if (!rrpc->luns) | |
1312 | return -ENOMEM; | |
1313 | ||
0ac4072e | 1314 | /* 1:1 mapping */ |
8e53624d JG |
1315 | for (i = 0; i < rrpc->nr_luns; i++) { |
1316 | rlun = &rrpc->luns[i]; | |
1317 | rlun->id = i; | |
1318 | rrpc_set_lun_ppa(rlun, luns[i]); | |
ae1519ec | 1319 | rlun->blocks = vzalloc(sizeof(struct rrpc_block) * |
8e79b5cb | 1320 | geo->blks_per_lun); |
da1e2849 WT |
1321 | if (!rlun->blocks) { |
1322 | ret = -ENOMEM; | |
ae1519ec | 1323 | goto err; |
da1e2849 | 1324 | } |
ae1519ec | 1325 | |
2a02e627 JG |
1326 | INIT_LIST_HEAD(&rlun->free_list); |
1327 | INIT_LIST_HEAD(&rlun->used_list); | |
1328 | INIT_LIST_HEAD(&rlun->bb_list); | |
1329 | ||
8e79b5cb | 1330 | for (j = 0; j < geo->blks_per_lun; j++) { |
ae1519ec | 1331 | struct rrpc_block *rblk = &rlun->blocks[j]; |
ae1519ec | 1332 | |
2a02e627 | 1333 | rblk->id = j; |
d7a64d27 | 1334 | rblk->rlun = rlun; |
2a02e627 | 1335 | rblk->state = NVM_BLK_T_FREE; |
ae1519ec | 1336 | INIT_LIST_HEAD(&rblk->prio); |
2a02e627 | 1337 | INIT_LIST_HEAD(&rblk->list); |
ae1519ec | 1338 | spin_lock_init(&rblk->lock); |
2a02e627 JG |
1339 | |
1340 | list_add_tail(&rblk->list, &rlun->free_list); | |
ae1519ec | 1341 | } |
da1e2849 | 1342 | |
959e911b JG |
1343 | rlun->rrpc = rrpc; |
1344 | rlun->nr_free_blocks = geo->blks_per_lun; | |
8e79b5cb JG |
1345 | rlun->reserved_blocks = 2; /* for GC only */ |
1346 | ||
da1e2849 | 1347 | INIT_LIST_HEAD(&rlun->prio_list); |
855cdd2c | 1348 | INIT_LIST_HEAD(&rlun->wblk_list); |
da1e2849 WT |
1349 | |
1350 | INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); | |
1351 | spin_lock_init(&rlun->lock); | |
959e911b JG |
1352 | |
1353 | if (rrpc_bb_discovery(dev, rlun)) | |
1354 | goto err; | |
1355 | ||
ae1519ec MB |
1356 | } |
1357 | ||
1358 | return 0; | |
1359 | err: | |
da1e2849 | 1360 | return ret; |
ae1519ec MB |
1361 | } |
1362 | ||
4c9dacb8 WT |
1363 | /* returns 0 on success and stores the beginning address in *begin */ |
1364 | static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin) | |
1365 | { | |
8e79b5cb | 1366 | struct nvm_tgt_dev *dev = rrpc->dev; |
8e79b5cb | 1367 | sector_t size = rrpc->nr_sects * dev->geo.sec_size; |
909049a7 | 1368 | int ret; |
4c9dacb8 WT |
1369 | |
1370 | size >>= 9; | |
1371 | ||
da2d7cb8 | 1372 | ret = nvm_get_area(dev, begin, size); |
909049a7 | 1373 | if (!ret) |
8e79b5cb | 1374 | *begin >>= (ilog2(dev->geo.sec_size) - 9); |
909049a7 WT |
1375 | |
1376 | return ret; | |
4c9dacb8 WT |
1377 | } |
1378 | ||
1379 | static void rrpc_area_free(struct rrpc *rrpc) | |
1380 | { | |
8e79b5cb | 1381 | struct nvm_tgt_dev *dev = rrpc->dev; |
8e79b5cb | 1382 | sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9); |
4c9dacb8 | 1383 | |
da2d7cb8 | 1384 | nvm_put_area(dev, begin); |
4c9dacb8 WT |
1385 | } |
1386 | ||
ae1519ec MB |
1387 | static void rrpc_free(struct rrpc *rrpc) |
1388 | { | |
1389 | rrpc_gc_free(rrpc); | |
1390 | rrpc_map_free(rrpc); | |
1391 | rrpc_core_free(rrpc); | |
1392 | rrpc_luns_free(rrpc); | |
4c9dacb8 | 1393 | rrpc_area_free(rrpc); |
ae1519ec MB |
1394 | |
1395 | kfree(rrpc); | |
1396 | } | |
1397 | ||
1398 | static void rrpc_exit(void *private) | |
1399 | { | |
1400 | struct rrpc *rrpc = private; | |
1401 | ||
1402 | del_timer(&rrpc->gc_timer); | |
1403 | ||
1404 | flush_workqueue(rrpc->krqd_wq); | |
1405 | flush_workqueue(rrpc->kgc_wq); | |
1406 | ||
1407 | rrpc_free(rrpc); | |
1408 | } | |
1409 | ||
1410 | static sector_t rrpc_capacity(void *private) | |
1411 | { | |
1412 | struct rrpc *rrpc = private; | |
8e79b5cb | 1413 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
1414 | sector_t reserved, provisioned; |
1415 | ||
1416 | /* cur, gc, and two emergency blocks for each lun */ | |
8e79b5cb | 1417 | reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4; |
4ece44af | 1418 | provisioned = rrpc->nr_sects - reserved; |
ae1519ec | 1419 | |
4ece44af | 1420 | if (reserved > rrpc->nr_sects) { |
ae1519ec MB |
1421 | pr_err("rrpc: not enough space available to expose storage.\n"); |
1422 | return 0; | |
1423 | } | |
1424 | ||
1425 | sector_div(provisioned, 10); | |
1426 | return provisioned * 9 * NR_PHY_IN_LOG; | |
1427 | } | |
1428 | ||
1429 | /* | |
1430 | * Looks up the logical address from reverse trans map and check if its valid by | |
1431 | * comparing the logical to physical address with the physical address. | |
1432 | * Returns 0 on free, otherwise 1 if in use | |
1433 | */ | |
1434 | static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk) | |
1435 | { | |
8e79b5cb | 1436 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
1437 | int offset; |
1438 | struct rrpc_addr *laddr; | |
afb18e0e | 1439 | u64 bpaddr, paddr, pladdr; |
ae1519ec | 1440 | |
afb18e0e | 1441 | bpaddr = block_to_rel_addr(rrpc, rblk); |
8e79b5cb | 1442 | for (offset = 0; offset < dev->geo.sec_per_blk; offset++) { |
afb18e0e | 1443 | paddr = bpaddr + offset; |
ae1519ec MB |
1444 | |
1445 | pladdr = rrpc->rev_trans_map[paddr].addr; | |
1446 | if (pladdr == ADDR_EMPTY) | |
1447 | continue; | |
1448 | ||
1449 | laddr = &rrpc->trans_map[pladdr]; | |
1450 | ||
1451 | if (paddr == laddr->addr) { | |
1452 | laddr->rblk = rblk; | |
1453 | } else { | |
1454 | set_bit(offset, rblk->invalid_pages); | |
1455 | rblk->nr_invalid_pages++; | |
1456 | } | |
1457 | } | |
1458 | } | |
1459 | ||
1460 | static int rrpc_blocks_init(struct rrpc *rrpc) | |
1461 | { | |
8e79b5cb | 1462 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
1463 | struct rrpc_lun *rlun; |
1464 | struct rrpc_block *rblk; | |
1465 | int lun_iter, blk_iter; | |
1466 | ||
1467 | for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) { | |
1468 | rlun = &rrpc->luns[lun_iter]; | |
1469 | ||
8e79b5cb | 1470 | for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun; |
ae1519ec MB |
1471 | blk_iter++) { |
1472 | rblk = &rlun->blocks[blk_iter]; | |
1473 | rrpc_block_map_update(rrpc, rblk); | |
1474 | } | |
1475 | } | |
1476 | ||
1477 | return 0; | |
1478 | } | |
1479 | ||
1480 | static int rrpc_luns_configure(struct rrpc *rrpc) | |
1481 | { | |
1482 | struct rrpc_lun *rlun; | |
1483 | struct rrpc_block *rblk; | |
1484 | int i; | |
1485 | ||
1486 | for (i = 0; i < rrpc->nr_luns; i++) { | |
1487 | rlun = &rrpc->luns[i]; | |
1488 | ||
1489 | rblk = rrpc_get_blk(rrpc, rlun, 0); | |
1490 | if (!rblk) | |
d3d1a438 | 1491 | goto err; |
855cdd2c | 1492 | rrpc_set_lun_cur(rlun, rblk, &rlun->cur); |
ae1519ec MB |
1493 | |
1494 | /* Emergency gc block */ | |
1495 | rblk = rrpc_get_blk(rrpc, rlun, 1); | |
1496 | if (!rblk) | |
d3d1a438 | 1497 | goto err; |
855cdd2c | 1498 | rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur); |
ae1519ec MB |
1499 | } |
1500 | ||
1501 | return 0; | |
d3d1a438 WT |
1502 | err: |
1503 | rrpc_put_blks(rrpc); | |
1504 | return -EINVAL; | |
ae1519ec MB |
1505 | } |
1506 | ||
1507 | static struct nvm_tgt_type tt_rrpc; | |
1508 | ||
4af3f75d JG |
1509 | static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, |
1510 | int flags) | |
ae1519ec MB |
1511 | { |
1512 | struct request_queue *bqueue = dev->q; | |
1513 | struct request_queue *tqueue = tdisk->queue; | |
8e79b5cb | 1514 | struct nvm_geo *geo = &dev->geo; |
ae1519ec | 1515 | struct rrpc *rrpc; |
4c9dacb8 | 1516 | sector_t soffset; |
ae1519ec MB |
1517 | int ret; |
1518 | ||
1519 | if (!(dev->identity.dom & NVM_RSP_L2P)) { | |
1520 | pr_err("nvm: rrpc: device does not support l2p (%x)\n", | |
1521 | dev->identity.dom); | |
1522 | return ERR_PTR(-EINVAL); | |
1523 | } | |
1524 | ||
1525 | rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL); | |
1526 | if (!rrpc) | |
1527 | return ERR_PTR(-ENOMEM); | |
1528 | ||
ae1519ec MB |
1529 | rrpc->dev = dev; |
1530 | rrpc->disk = tdisk; | |
1531 | ||
1532 | bio_list_init(&rrpc->requeue_bios); | |
1533 | spin_lock_init(&rrpc->bio_lock); | |
1534 | INIT_WORK(&rrpc->ws_requeue, rrpc_requeue); | |
1535 | ||
8e79b5cb JG |
1536 | rrpc->nr_luns = geo->nr_luns; |
1537 | rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns; | |
ae1519ec MB |
1538 | |
1539 | /* simple round-robin strategy */ | |
1540 | atomic_set(&rrpc->next_lun, -1); | |
1541 | ||
4c9dacb8 WT |
1542 | ret = rrpc_area_init(rrpc, &soffset); |
1543 | if (ret < 0) { | |
1544 | pr_err("nvm: rrpc: could not initialize area\n"); | |
1545 | return ERR_PTR(ret); | |
1546 | } | |
1547 | rrpc->soffset = soffset; | |
1548 | ||
8e53624d | 1549 | ret = rrpc_luns_init(rrpc, dev->luns); |
ae1519ec MB |
1550 | if (ret) { |
1551 | pr_err("nvm: rrpc: could not initialize luns\n"); | |
1552 | goto err; | |
1553 | } | |
1554 | ||
ae1519ec MB |
1555 | ret = rrpc_core_init(rrpc); |
1556 | if (ret) { | |
1557 | pr_err("nvm: rrpc: could not initialize core\n"); | |
1558 | goto err; | |
1559 | } | |
1560 | ||
1561 | ret = rrpc_map_init(rrpc); | |
1562 | if (ret) { | |
1563 | pr_err("nvm: rrpc: could not initialize maps\n"); | |
1564 | goto err; | |
1565 | } | |
1566 | ||
1567 | ret = rrpc_blocks_init(rrpc); | |
1568 | if (ret) { | |
1569 | pr_err("nvm: rrpc: could not initialize state for blocks\n"); | |
1570 | goto err; | |
1571 | } | |
1572 | ||
1573 | ret = rrpc_luns_configure(rrpc); | |
1574 | if (ret) { | |
1575 | pr_err("nvm: rrpc: not enough blocks available in LUNs.\n"); | |
1576 | goto err; | |
1577 | } | |
1578 | ||
1579 | ret = rrpc_gc_init(rrpc); | |
1580 | if (ret) { | |
1581 | pr_err("nvm: rrpc: could not initialize gc\n"); | |
1582 | goto err; | |
1583 | } | |
1584 | ||
1585 | /* inherit the size from the underlying device */ | |
1586 | blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue)); | |
1587 | blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue)); | |
1588 | ||
1589 | pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n", | |
4ece44af | 1590 | rrpc->nr_luns, (unsigned long long)rrpc->nr_sects); |
ae1519ec MB |
1591 | |
1592 | mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); | |
1593 | ||
1594 | return rrpc; | |
1595 | err: | |
1596 | rrpc_free(rrpc); | |
1597 | return ERR_PTR(ret); | |
1598 | } | |
1599 | ||
1600 | /* round robin, page-based FTL, and cost-based GC */ | |
1601 | static struct nvm_tgt_type tt_rrpc = { | |
1602 | .name = "rrpc", | |
1603 | .version = {1, 0, 0}, | |
1604 | ||
1605 | .make_rq = rrpc_make_rq, | |
1606 | .capacity = rrpc_capacity, | |
ae1519ec MB |
1607 | |
1608 | .init = rrpc_init, | |
1609 | .exit = rrpc_exit, | |
1610 | }; | |
1611 | ||
1612 | static int __init rrpc_module_init(void) | |
1613 | { | |
6063fe39 | 1614 | return nvm_register_tgt_type(&tt_rrpc); |
ae1519ec MB |
1615 | } |
1616 | ||
1617 | static void rrpc_module_exit(void) | |
1618 | { | |
6063fe39 | 1619 | nvm_unregister_tgt_type(&tt_rrpc); |
ae1519ec MB |
1620 | } |
1621 | ||
1622 | module_init(rrpc_module_init); | |
1623 | module_exit(rrpc_module_exit); | |
1624 | MODULE_LICENSE("GPL v2"); | |
1625 | MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs"); |