lightnvm: eliminate nvm_block abstraction on mm
[linux-2.6-block.git] / drivers / lightnvm / rrpc.h
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen
3  * Initial release: Matias Bjorling <m@bjorling.me>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15  */
16
17 #ifndef RRPC_H_
18 #define RRPC_H_
19
20 #include <linux/blkdev.h>
21 #include <linux/blk-mq.h>
22 #include <linux/bio.h>
23 #include <linux/module.h>
24 #include <linux/kthread.h>
25 #include <linux/vmalloc.h>
26
27 #include <linux/lightnvm.h>
28
29 /* Run only GC if less than 1/X blocks are free */
30 #define GC_LIMIT_INVERSE 10
31 #define GC_TIME_SECS 100
32
33 #define RRPC_SECTOR (512)
34 #define RRPC_EXPOSED_PAGE_SIZE (4096)
35
36 #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
37
38 struct rrpc_inflight {
39         struct list_head reqs;
40         spinlock_t lock;
41 };
42
43 struct rrpc_inflight_rq {
44         struct list_head list;
45         sector_t l_start;
46         sector_t l_end;
47 };
48
49 struct rrpc_rq {
50         struct rrpc_inflight_rq inflight_rq;
51         unsigned long flags;
52 };
53
54 struct rrpc_block {
55         int id;                         /* id inside of LUN */
56         struct rrpc_lun *rlun;
57
58         struct list_head prio;          /* LUN CG list */
59         struct list_head list;          /* LUN free, used, bb list */
60
61 #define MAX_INVALID_PAGES_STORAGE 8
62         /* Bitmap for invalid page intries */
63         unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
64         /* points to the next writable page within a block */
65         unsigned int next_page;
66         /* number of pages that are invalid, wrt host page size */
67         unsigned int nr_invalid_pages;
68
69         int state;
70
71         spinlock_t lock;
72         atomic_t data_cmnt_size; /* data pages committed to stable storage */
73 };
74
75 struct rrpc_lun {
76         struct rrpc *rrpc;
77         struct nvm_lun *parent;
78
79         struct rrpc_block *cur, *gc_cur;
80         struct rrpc_block *blocks;      /* Reference to block allocation */
81
82         struct list_head prio_list;     /* Blocks that may be GC'ed */
83         struct list_head wblk_list;     /* Queued blocks to be written to */
84
85         /* lun block lists */
86         struct list_head used_list;     /* In-use blocks */
87         struct list_head free_list;     /* Not used blocks i.e. released
88                                          * and ready for use
89                                          */
90         struct list_head bb_list;       /* Bad blocks. Mutually exclusive with
91                                          * free_list and used_list
92                                          */
93         unsigned int nr_free_blocks;    /* Number of unused blocks */
94
95         struct work_struct ws_gc;
96
97         int reserved_blocks;
98
99         spinlock_t lock;
100 };
101
102 struct rrpc {
103         /* instance must be kept in top to resolve rrpc in unprep */
104         struct nvm_tgt_instance instance;
105
106         struct nvm_tgt_dev *dev;
107         struct gendisk *disk;
108
109         sector_t soffset; /* logical sector offset */
110         u64 poffset; /* physical page offset */
111
112         int nr_luns;
113         struct rrpc_lun *luns;
114
115         /* calculated values */
116         unsigned long long nr_sects;
117
118         /* Write strategy variables. Move these into each for structure for each
119          * strategy
120          */
121         atomic_t next_lun; /* Whenever a page is written, this is updated
122                             * to point to the next write lun
123                             */
124
125         spinlock_t bio_lock;
126         struct bio_list requeue_bios;
127         struct work_struct ws_requeue;
128
129         /* Simple translation map of logical addresses to physical addresses.
130          * The logical addresses is known by the host system, while the physical
131          * addresses are used when writing to the disk block device.
132          */
133         struct rrpc_addr *trans_map;
134         /* also store a reverse map for garbage collection */
135         struct rrpc_rev_addr *rev_trans_map;
136         spinlock_t rev_lock;
137
138         struct rrpc_inflight inflights;
139
140         mempool_t *addr_pool;
141         mempool_t *page_pool;
142         mempool_t *gcb_pool;
143         mempool_t *rq_pool;
144
145         struct timer_list gc_timer;
146         struct workqueue_struct *krqd_wq;
147         struct workqueue_struct *kgc_wq;
148 };
149
150 struct rrpc_block_gc {
151         struct rrpc *rrpc;
152         struct rrpc_block *rblk;
153         struct work_struct ws_gc;
154 };
155
156 /* Logical to physical mapping */
157 struct rrpc_addr {
158         u64 addr;
159         struct rrpc_block *rblk;
160 };
161
162 /* Physical to logical mapping */
163 struct rrpc_rev_addr {
164         u64 addr;
165 };
166
167 static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
168 {
169         struct nvm_tgt_dev *dev = rrpc->dev;
170         struct nvm_geo *geo = &dev->geo;
171         struct rrpc_lun *rlun = rblk->rlun;
172         struct nvm_lun *lun = rlun->parent;
173
174         return (lun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
175 }
176
177 static inline sector_t rrpc_get_laddr(struct bio *bio)
178 {
179         return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
180 }
181
182 static inline unsigned int rrpc_get_pages(struct bio *bio)
183 {
184         return  bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
185 }
186
187 static inline sector_t rrpc_get_sector(sector_t laddr)
188 {
189         return laddr * NR_PHY_IN_LOG;
190 }
191
192 static inline int request_intersects(struct rrpc_inflight_rq *r,
193                                 sector_t laddr_start, sector_t laddr_end)
194 {
195         return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
196 }
197
198 static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
199                              unsigned int pages, struct rrpc_inflight_rq *r)
200 {
201         sector_t laddr_end = laddr + pages - 1;
202         struct rrpc_inflight_rq *rtmp;
203
204         WARN_ON(irqs_disabled());
205
206         spin_lock_irq(&rrpc->inflights.lock);
207         list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
208                 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
209                         /* existing, overlapping request, come back later */
210                         spin_unlock_irq(&rrpc->inflights.lock);
211                         return 1;
212                 }
213         }
214
215         r->l_start = laddr;
216         r->l_end = laddr_end;
217
218         list_add_tail(&r->list, &rrpc->inflights.reqs);
219         spin_unlock_irq(&rrpc->inflights.lock);
220         return 0;
221 }
222
223 static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
224                                  unsigned int pages,
225                                  struct rrpc_inflight_rq *r)
226 {
227         BUG_ON((laddr + pages) > rrpc->nr_sects);
228
229         return __rrpc_lock_laddr(rrpc, laddr, pages, r);
230 }
231
232 static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
233 {
234         struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
235
236         return &rrqd->inflight_rq;
237 }
238
239 static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
240                                                         struct nvm_rq *rqd)
241 {
242         sector_t laddr = rrpc_get_laddr(bio);
243         unsigned int pages = rrpc_get_pages(bio);
244         struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
245
246         return rrpc_lock_laddr(rrpc, laddr, pages, r);
247 }
248
249 static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
250                                                 struct rrpc_inflight_rq *r)
251 {
252         unsigned long flags;
253
254         spin_lock_irqsave(&rrpc->inflights.lock, flags);
255         list_del_init(&r->list);
256         spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
257 }
258
259 static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
260 {
261         struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
262         uint8_t pages = rqd->nr_ppas;
263
264         BUG_ON((r->l_start + pages) > rrpc->nr_sects);
265
266         rrpc_unlock_laddr(rrpc, r);
267 }
268
269 #endif /* RRPC_H_ */