treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 409
[linux-block.git] / drivers / lightnvm / core.c
CommitLineData
cd9e9808
MB
1/*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
cd9e9808
MB
21#include <linux/list.h>
22#include <linux/types.h>
23#include <linux/sem.h>
24#include <linux/bitmap.h>
90014829 25#include <linux/module.h>
389b2a1c 26#include <linux/moduleparam.h>
cd9e9808
MB
27#include <linux/miscdevice.h>
28#include <linux/lightnvm.h>
91276162 29#include <linux/sched/sysctl.h>
cd9e9808 30
6063fe39 31static LIST_HEAD(nvm_tgt_types);
5cd90785 32static DECLARE_RWSEM(nvm_tgtt_lock);
cd9e9808
MB
33static LIST_HEAD(nvm_devices);
34static DECLARE_RWSEM(nvm_lock);
35
ade69e24
MB
36/* Map between virtual and physical channel and lun */
37struct nvm_ch_map {
38 int ch_off;
a40afad9 39 int num_lun;
ade69e24
MB
40 int *lun_offs;
41};
42
43struct nvm_dev_map {
44 struct nvm_ch_map *chnls;
a40afad9 45 int num_ch;
ade69e24
MB
46};
47
e69397ea
IK
48static void nvm_free(struct kref *ref);
49
ade69e24
MB
50static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
51{
52 struct nvm_target *tgt;
53
54 list_for_each_entry(tgt, &dev->targets, list)
55 if (!strcmp(name, tgt->disk->disk_name))
56 return tgt;
57
58 return NULL;
59}
60
bd77b23b
JG
61static bool nvm_target_exists(const char *name)
62{
63 struct nvm_dev *dev;
64 struct nvm_target *tgt;
65 bool ret = false;
66
67 down_write(&nvm_lock);
68 list_for_each_entry(dev, &nvm_devices, devices) {
69 mutex_lock(&dev->mlock);
70 list_for_each_entry(tgt, &dev->targets, list) {
71 if (!strcmp(name, tgt->disk->disk_name)) {
72 ret = true;
73 mutex_unlock(&dev->mlock);
74 goto out;
75 }
76 }
77 mutex_unlock(&dev->mlock);
78 }
79
80out:
81 up_write(&nvm_lock);
82 return ret;
83}
84
ade69e24
MB
85static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
86{
87 int i;
88
89 for (i = lun_begin; i <= lun_end; i++) {
90 if (test_and_set_bit(i, dev->lun_map)) {
91 pr_err("nvm: lun %d already allocated\n", i);
92 goto err;
93 }
94 }
95
96 return 0;
97err:
507f7d68 98 while (--i >= lun_begin)
ade69e24
MB
99 clear_bit(i, dev->lun_map);
100
101 return -EBUSY;
102}
103
104static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
105 int lun_end)
106{
107 int i;
108
109 for (i = lun_begin; i <= lun_end; i++)
110 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
111}
112
edee1bdd 113static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
ade69e24
MB
114{
115 struct nvm_dev *dev = tgt_dev->parent;
116 struct nvm_dev_map *dev_map = tgt_dev->map;
117 int i, j;
118
a40afad9 119 for (i = 0; i < dev_map->num_ch; i++) {
ade69e24
MB
120 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
121 int *lun_offs = ch_map->lun_offs;
122 int ch = i + ch_map->ch_off;
123
edee1bdd 124 if (clear) {
a40afad9 125 for (j = 0; j < ch_map->num_lun; j++) {
edee1bdd 126 int lun = j + lun_offs[j];
a40afad9 127 int lunid = (ch * dev->geo.num_lun) + lun;
ade69e24 128
edee1bdd
JG
129 WARN_ON(!test_and_clear_bit(lunid,
130 dev->lun_map));
131 }
ade69e24
MB
132 }
133
134 kfree(ch_map->lun_offs);
135 }
136
137 kfree(dev_map->chnls);
138 kfree(dev_map);
139
140 kfree(tgt_dev->luns);
141 kfree(tgt_dev);
142}
143
144static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
e5392739
JG
145 u16 lun_begin, u16 lun_end,
146 u16 op)
ade69e24
MB
147{
148 struct nvm_tgt_dev *tgt_dev = NULL;
149 struct nvm_dev_map *dev_rmap = dev->rmap;
150 struct nvm_dev_map *dev_map;
151 struct ppa_addr *luns;
a40afad9
JG
152 int num_lun = lun_end - lun_begin + 1;
153 int luns_left = num_lun;
154 int num_ch = num_lun / dev->geo.num_lun;
155 int num_ch_mod = num_lun % dev->geo.num_lun;
156 int bch = lun_begin / dev->geo.num_lun;
157 int blun = lun_begin % dev->geo.num_lun;
ade69e24
MB
158 int lunid = 0;
159 int lun_balanced = 1;
a40afad9 160 int sec_per_lun, prev_num_lun;
ade69e24
MB
161 int i, j;
162
a40afad9 163 num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
ade69e24
MB
164
165 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
166 if (!dev_map)
167 goto err_dev;
168
a40afad9 169 dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
ade69e24
MB
170 if (!dev_map->chnls)
171 goto err_chnls;
172
a40afad9 173 luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
ade69e24
MB
174 if (!luns)
175 goto err_luns;
176
a40afad9
JG
177 prev_num_lun = (luns_left > dev->geo.num_lun) ?
178 dev->geo.num_lun : luns_left;
179 for (i = 0; i < num_ch; i++) {
ade69e24
MB
180 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
181 int *lun_roffs = ch_rmap->lun_offs;
182 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
183 int *lun_offs;
a40afad9
JG
184 int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
185 dev->geo.num_lun : luns_left;
ade69e24 186
a40afad9 187 if (lun_balanced && prev_num_lun != luns_in_chnl)
ade69e24
MB
188 lun_balanced = 0;
189
190 ch_map->ch_off = ch_rmap->ch_off = bch;
a40afad9 191 ch_map->num_lun = luns_in_chnl;
ade69e24
MB
192
193 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
194 if (!lun_offs)
195 goto err_ch;
196
197 for (j = 0; j < luns_in_chnl; j++) {
198 luns[lunid].ppa = 0;
69471513
JG
199 luns[lunid].a.ch = i;
200 luns[lunid++].a.lun = j;
ade69e24
MB
201
202 lun_offs[j] = blun;
203 lun_roffs[j + blun] = blun;
204 }
205
206 ch_map->lun_offs = lun_offs;
207
208 /* when starting a new channel, lun offset is reset */
209 blun = 0;
210 luns_left -= luns_in_chnl;
211 }
212
a40afad9 213 dev_map->num_ch = num_ch;
ade69e24
MB
214
215 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
216 if (!tgt_dev)
217 goto err_ch;
218
e46f4e48 219 /* Inherit device geometry from parent */
ade69e24 220 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
e46f4e48 221
ade69e24 222 /* Target device only owns a portion of the physical device */
a40afad9
JG
223 tgt_dev->geo.num_ch = num_ch;
224 tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
225 tgt_dev->geo.all_luns = num_lun;
226 tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
e46f4e48 227
e5392739 228 tgt_dev->geo.op = op;
e46f4e48 229
a40afad9
JG
230 sec_per_lun = dev->geo.clba * dev->geo.num_chk;
231 tgt_dev->geo.total_secs = num_lun * sec_per_lun;
e46f4e48 232
ade69e24
MB
233 tgt_dev->q = dev->q;
234 tgt_dev->map = dev_map;
235 tgt_dev->luns = luns;
ade69e24
MB
236 tgt_dev->parent = dev;
237
238 return tgt_dev;
239err_ch:
507f7d68 240 while (--i >= 0)
ade69e24
MB
241 kfree(dev_map->chnls[i].lun_offs);
242 kfree(luns);
243err_luns:
244 kfree(dev_map->chnls);
245err_chnls:
246 kfree(dev_map);
247err_dev:
248 return tgt_dev;
249}
250
251static const struct block_device_operations nvm_fops = {
252 .owner = THIS_MODULE,
253};
254
e29c80e6 255static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
eb6f168f 256{
e29c80e6 257 struct nvm_tgt_type *tt;
eb6f168f 258
e29c80e6
JG
259 list_for_each_entry(tt, &nvm_tgt_types, list)
260 if (!strcmp(name, tt->name))
261 return tt;
eb6f168f 262
e29c80e6
JG
263 return NULL;
264}
265
266static struct nvm_tgt_type *nvm_find_target_type(const char *name)
267{
268 struct nvm_tgt_type *tt;
269
270 down_write(&nvm_tgtt_lock);
271 tt = __nvm_find_target_type(name);
272 up_write(&nvm_tgtt_lock);
eb6f168f 273
eb6f168f
RP
274 return tt;
275}
276
e5392739
JG
277static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
278 int lun_end)
279{
280 if (lun_begin > lun_end || lun_end >= geo->all_luns) {
281 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
282 lun_begin, lun_end, geo->all_luns - 1);
283 return -EINVAL;
284 }
285
286 return 0;
287}
288
289static int __nvm_config_simple(struct nvm_dev *dev,
290 struct nvm_ioctl_create_simple *s)
291{
292 struct nvm_geo *geo = &dev->geo;
293
294 if (s->lun_begin == -1 && s->lun_end == -1) {
295 s->lun_begin = 0;
296 s->lun_end = geo->all_luns - 1;
297 }
298
299 return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
300}
301
302static int __nvm_config_extended(struct nvm_dev *dev,
303 struct nvm_ioctl_create_extended *e)
304{
e5392739
JG
305 if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
306 e->lun_begin = 0;
307 e->lun_end = dev->geo.all_luns - 1;
308 }
309
310 /* op not set falls into target's default */
9d7aa4a4 311 if (e->op == 0xFFFF) {
e5392739 312 e->op = NVM_TARGET_DEFAULT_OP;
9d7aa4a4 313 } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
e5392739
JG
314 pr_err("nvm: invalid over provisioning value\n");
315 return -EINVAL;
316 }
317
e46f4e48 318 return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
e5392739
JG
319}
320
ade69e24
MB
321static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
322{
e5392739 323 struct nvm_ioctl_create_extended e;
ade69e24
MB
324 struct request_queue *tqueue;
325 struct gendisk *tdisk;
326 struct nvm_tgt_type *tt;
327 struct nvm_target *t;
328 struct nvm_tgt_dev *tgt_dev;
329 void *targetdata;
a14669eb 330 unsigned int mdts;
8d77bb82 331 int ret;
ade69e24 332
e5392739
JG
333 switch (create->conf.type) {
334 case NVM_CONFIG_TYPE_SIMPLE:
335 ret = __nvm_config_simple(dev, &create->conf.s);
336 if (ret)
337 return ret;
338
339 e.lun_begin = create->conf.s.lun_begin;
340 e.lun_end = create->conf.s.lun_end;
341 e.op = NVM_TARGET_DEFAULT_OP;
342 break;
343 case NVM_CONFIG_TYPE_EXTENDED:
344 ret = __nvm_config_extended(dev, &create->conf.e);
345 if (ret)
346 return ret;
347
348 e = create->conf.e;
349 break;
350 default:
351 pr_err("nvm: config type not valid\n");
352 return -EINVAL;
353 }
354
e29c80e6 355 tt = nvm_find_target_type(create->tgttype);
ade69e24
MB
356 if (!tt) {
357 pr_err("nvm: target type %s not found\n", create->tgttype);
358 return -EINVAL;
359 }
360
656e33ca
MB
361 if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
362 pr_err("nvm: device is incompatible with target L2P type.\n");
363 return -EINVAL;
364 }
365
bd77b23b
JG
366 if (nvm_target_exists(create->tgtname)) {
367 pr_err("nvm: target name already exists (%s)\n",
368 create->tgtname);
ade69e24
MB
369 return -EINVAL;
370 }
ade69e24 371
e5392739 372 ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
12e9a6d6
RP
373 if (ret)
374 return ret;
ade69e24
MB
375
376 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
8d77bb82
RP
377 if (!t) {
378 ret = -ENOMEM;
ade69e24 379 goto err_reserve;
8d77bb82 380 }
ade69e24 381
e5392739 382 tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
ade69e24
MB
383 if (!tgt_dev) {
384 pr_err("nvm: could not create target device\n");
8d77bb82 385 ret = -ENOMEM;
ade69e24
MB
386 goto err_t;
387 }
388
7d1ef2f4 389 tdisk = alloc_disk(0);
8d77bb82
RP
390 if (!tdisk) {
391 ret = -ENOMEM;
7d1ef2f4 392 goto err_dev;
8d77bb82 393 }
7d1ef2f4 394
6d469642 395 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
8d77bb82
RP
396 if (!tqueue) {
397 ret = -ENOMEM;
7d1ef2f4 398 goto err_disk;
8d77bb82 399 }
ade69e24
MB
400 blk_queue_make_request(tqueue, tt->make_rq);
401
6eb08245 402 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
ade69e24
MB
403 tdisk->flags = GENHD_FL_EXT_DEVT;
404 tdisk->major = 0;
405 tdisk->first_minor = 0;
406 tdisk->fops = &nvm_fops;
407 tdisk->queue = tqueue;
408
4af3f75d 409 targetdata = tt->init(tgt_dev, tdisk, create->flags);
8d77bb82
RP
410 if (IS_ERR(targetdata)) {
411 ret = PTR_ERR(targetdata);
ade69e24 412 goto err_init;
8d77bb82 413 }
ade69e24
MB
414
415 tdisk->private_data = targetdata;
416 tqueue->queuedata = targetdata;
417
a14669eb
IK
418 mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
419 if (dev->geo.mdts) {
420 mdts = min_t(u32, dev->geo.mdts,
421 (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
422 }
423 blk_queue_max_hw_sectors(tqueue, mdts);
ade69e24
MB
424
425 set_capacity(tdisk, tt->capacity(targetdata));
426 add_disk(tdisk);
427
8d77bb82
RP
428 if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
429 ret = -ENOMEM;
9a69b0ed 430 goto err_sysfs;
8d77bb82 431 }
9a69b0ed 432
ade69e24
MB
433 t->type = tt;
434 t->disk = tdisk;
435 t->dev = tgt_dev;
436
437 mutex_lock(&dev->mlock);
438 list_add_tail(&t->list, &dev->targets);
439 mutex_unlock(&dev->mlock);
440
90014829
RP
441 __module_get(tt->owner);
442
ade69e24 443 return 0;
9a69b0ed
JG
444err_sysfs:
445 if (tt->exit)
a7c9e910 446 tt->exit(targetdata, true);
ade69e24 447err_init:
ade69e24 448 blk_cleanup_queue(tqueue);
75ba4ada 449 tdisk->queue = NULL;
7d1ef2f4
JG
450err_disk:
451 put_disk(tdisk);
ade69e24 452err_dev:
edee1bdd 453 nvm_remove_tgt_dev(tgt_dev, 0);
ade69e24
MB
454err_t:
455 kfree(t);
456err_reserve:
e5392739 457 nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
8d77bb82 458 return ret;
ade69e24
MB
459}
460
a7c9e910 461static void __nvm_remove_target(struct nvm_target *t, bool graceful)
ade69e24
MB
462{
463 struct nvm_tgt_type *tt = t->type;
464 struct gendisk *tdisk = t->disk;
465 struct request_queue *q = tdisk->queue;
466
467 del_gendisk(tdisk);
468 blk_cleanup_queue(q);
469
9a69b0ed
JG
470 if (tt->sysfs_exit)
471 tt->sysfs_exit(tdisk);
472
ade69e24 473 if (tt->exit)
a7c9e910 474 tt->exit(tdisk->private_data, graceful);
ade69e24 475
edee1bdd 476 nvm_remove_tgt_dev(t->dev, 1);
ade69e24 477 put_disk(tdisk);
90014829 478 module_put(t->type->owner);
ade69e24
MB
479
480 list_del(&t->list);
481 kfree(t);
482}
483
484/**
485 * nvm_remove_tgt - Removes a target from the media manager
ade69e24
MB
486 * @remove: ioctl structure with target name to remove.
487 *
488 * Returns:
489 * 0: on success
490 * 1: on not found
491 * <0: on error
492 */
843f2edb 493static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
ade69e24
MB
494{
495 struct nvm_target *t;
843f2edb 496 struct nvm_dev *dev;
ade69e24 497
843f2edb
IK
498 down_read(&nvm_lock);
499 list_for_each_entry(dev, &nvm_devices, devices) {
500 mutex_lock(&dev->mlock);
501 t = nvm_find_target(dev, remove->tgtname);
502 if (t) {
503 mutex_unlock(&dev->mlock);
504 break;
505 }
ade69e24 506 mutex_unlock(&dev->mlock);
ade69e24 507 }
843f2edb
IK
508 up_read(&nvm_lock);
509
510 if (!t)
511 return 1;
512
a7c9e910 513 __nvm_remove_target(t, true);
e69397ea 514 kref_put(&dev->ref, nvm_free);
ade69e24
MB
515
516 return 0;
517}
518
519static int nvm_register_map(struct nvm_dev *dev)
520{
521 struct nvm_dev_map *rmap;
522 int i, j;
523
524 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
525 if (!rmap)
526 goto err_rmap;
527
a40afad9 528 rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
ade69e24
MB
529 GFP_KERNEL);
530 if (!rmap->chnls)
531 goto err_chnls;
532
a40afad9 533 for (i = 0; i < dev->geo.num_ch; i++) {
ade69e24
MB
534 struct nvm_ch_map *ch_rmap;
535 int *lun_roffs;
a40afad9 536 int luns_in_chnl = dev->geo.num_lun;
ade69e24
MB
537
538 ch_rmap = &rmap->chnls[i];
539
540 ch_rmap->ch_off = -1;
a40afad9 541 ch_rmap->num_lun = luns_in_chnl;
ade69e24
MB
542
543 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
544 if (!lun_roffs)
545 goto err_ch;
546
547 for (j = 0; j < luns_in_chnl; j++)
548 lun_roffs[j] = -1;
549
550 ch_rmap->lun_offs = lun_roffs;
551 }
552
553 dev->rmap = rmap;
554
555 return 0;
556err_ch:
557 while (--i >= 0)
558 kfree(rmap->chnls[i].lun_offs);
559err_chnls:
560 kfree(rmap);
561err_rmap:
562 return -ENOMEM;
563}
564
7a3de2b3
JG
565static void nvm_unregister_map(struct nvm_dev *dev)
566{
567 struct nvm_dev_map *rmap = dev->rmap;
568 int i;
569
a40afad9 570 for (i = 0; i < dev->geo.num_ch; i++)
7a3de2b3
JG
571 kfree(rmap->chnls[i].lun_offs);
572
573 kfree(rmap->chnls);
574 kfree(rmap);
575}
576
61a561d8 577static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
ade69e24
MB
578{
579 struct nvm_dev_map *dev_map = tgt_dev->map;
69471513
JG
580 struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
581 int lun_off = ch_map->lun_offs[p->a.lun];
ade69e24 582
69471513
JG
583 p->a.ch += ch_map->ch_off;
584 p->a.lun += lun_off;
ade69e24
MB
585}
586
61a561d8 587static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
ade69e24
MB
588{
589 struct nvm_dev *dev = tgt_dev->parent;
590 struct nvm_dev_map *dev_rmap = dev->rmap;
69471513
JG
591 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
592 int lun_roff = ch_rmap->lun_offs[p->a.lun];
ade69e24 593
69471513
JG
594 p->a.ch -= ch_rmap->ch_off;
595 p->a.lun -= lun_roff;
ade69e24
MB
596}
597
dab8ee9e
MB
598static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
599 struct ppa_addr *ppa_list, int nr_ppas)
ade69e24
MB
600{
601 int i;
ade69e24 602
dab8ee9e
MB
603 for (i = 0; i < nr_ppas; i++) {
604 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
7100d50a 605 ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
ade69e24 606 }
dab8ee9e 607}
ade69e24 608
dab8ee9e
MB
609static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
610 struct ppa_addr *ppa_list, int nr_ppas)
611{
612 int i;
613
614 for (i = 0; i < nr_ppas; i++) {
7100d50a 615 ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
dab8ee9e 616 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
ade69e24 617 }
ade69e24
MB
618}
619
dab8ee9e 620static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
ade69e24 621{
d68a9344 622 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
ade69e24 623
d68a9344 624 nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
dab8ee9e
MB
625}
626
627static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
628{
d68a9344 629 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
ade69e24 630
d68a9344 631 nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
ade69e24
MB
632}
633
6063fe39 634int nvm_register_tgt_type(struct nvm_tgt_type *tt)
cd9e9808
MB
635{
636 int ret = 0;
637
5cd90785 638 down_write(&nvm_tgtt_lock);
e29c80e6 639 if (__nvm_find_target_type(tt->name))
cd9e9808
MB
640 ret = -EEXIST;
641 else
6063fe39 642 list_add(&tt->list, &nvm_tgt_types);
5cd90785 643 up_write(&nvm_tgtt_lock);
cd9e9808
MB
644
645 return ret;
646}
6063fe39 647EXPORT_SYMBOL(nvm_register_tgt_type);
cd9e9808 648
6063fe39 649void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
cd9e9808
MB
650{
651 if (!tt)
652 return;
653
88d31ea2 654 down_write(&nvm_tgtt_lock);
cd9e9808 655 list_del(&tt->list);
88d31ea2 656 up_write(&nvm_tgtt_lock);
cd9e9808 657}
6063fe39 658EXPORT_SYMBOL(nvm_unregister_tgt_type);
cd9e9808
MB
659
660void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
661 dma_addr_t *dma_handler)
662{
75b85649 663 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
cd9e9808
MB
664 dma_handler);
665}
666EXPORT_SYMBOL(nvm_dev_dma_alloc);
667
da2d7cb8 668void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
cd9e9808 669{
75b85649 670 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
cd9e9808
MB
671}
672EXPORT_SYMBOL(nvm_dev_dma_free);
673
cd9e9808
MB
674static struct nvm_dev *nvm_find_nvm_dev(const char *name)
675{
676 struct nvm_dev *dev;
677
678 list_for_each_entry(dev, &nvm_devices, devices)
679 if (!strcmp(name, dev->name))
680 return dev;
681
682 return NULL;
683}
684
eb6f168f
RP
685static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
686 const struct ppa_addr *ppas, int nr_ppas)
687{
688 struct nvm_dev *dev = tgt_dev->parent;
689 struct nvm_geo *geo = &tgt_dev->geo;
690 int i, plane_cnt, pl_idx;
691 struct ppa_addr ppa;
692
a40afad9 693 if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
eb6f168f
RP
694 rqd->nr_ppas = nr_ppas;
695 rqd->ppa_addr = ppas[0];
696
697 return 0;
698 }
699
700 rqd->nr_ppas = nr_ppas;
701 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
702 if (!rqd->ppa_list) {
703 pr_err("nvm: failed to allocate dma memory\n");
704 return -ENOMEM;
705 }
706
a40afad9 707 plane_cnt = geo->pln_mode;
eb6f168f
RP
708 rqd->nr_ppas *= plane_cnt;
709
710 for (i = 0; i < nr_ppas; i++) {
711 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
712 ppa = ppas[i];
713 ppa.g.pl = pl_idx;
714 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
715 }
716 }
717
718 return 0;
719}
720
721static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
722 struct nvm_rq *rqd)
723{
724 if (!rqd->ppa_list)
725 return;
726
727 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
728}
729
d7b68016
MB
730static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
731{
732 int flags = 0;
733
734 if (geo->version == NVM_OCSSD_SPEC_20)
735 return 0;
736
737 if (rqd->is_seq)
738 flags |= geo->pln_mode >> 1;
739
740 if (rqd->opcode == NVM_OP_PREAD)
741 flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
742 else if (rqd->opcode == NVM_OP_PWRITE)
743 flags |= NVM_IO_SCRAMBLE_ENABLE;
744
745 return flags;
746}
747
8e53624d 748int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
cd9e9808 749{
8e53624d 750 struct nvm_dev *dev = tgt_dev->parent;
3e505afb 751 int ret;
8e53624d 752
ade69e24
MB
753 if (!dev->ops->submit_io)
754 return -ENODEV;
755
dab8ee9e 756 nvm_rq_tgt_to_dev(tgt_dev, rqd);
ade69e24
MB
757
758 rqd->dev = tgt_dev;
d7b68016 759 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
3e505afb
JG
760
761 /* In case of error, fail with right address format */
762 ret = dev->ops->submit_io(dev, rqd);
763 if (ret)
764 nvm_rq_dev_to_tgt(tgt_dev, rqd);
765 return ret;
cd9e9808
MB
766}
767EXPORT_SYMBOL(nvm_submit_io);
768
1a94b2d4 769int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
cd9e9808 770{
1a94b2d4
JG
771 struct nvm_dev *dev = tgt_dev->parent;
772 int ret;
773
774 if (!dev->ops->submit_io_sync)
775 return -ENODEV;
776
777 nvm_rq_tgt_to_dev(tgt_dev, rqd);
10995c3d 778
1a94b2d4 779 rqd->dev = tgt_dev;
d7b68016 780 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
1a94b2d4
JG
781
782 /* In case of error, fail with right address format */
783 ret = dev->ops->submit_io_sync(dev, rqd);
784 nvm_rq_dev_to_tgt(tgt_dev, rqd);
785
786 return ret;
17912c49 787}
1a94b2d4 788EXPORT_SYMBOL(nvm_submit_io_sync);
10995c3d 789
06894efe 790void nvm_end_io(struct nvm_rq *rqd)
91276162 791{
ade69e24 792 struct nvm_tgt_dev *tgt_dev = rqd->dev;
ade69e24
MB
793
794 /* Convert address space */
795 if (tgt_dev)
dab8ee9e 796 nvm_rq_dev_to_tgt(tgt_dev, rqd);
ade69e24 797
06894efe
MB
798 if (rqd->end_io)
799 rqd->end_io(rqd);
91276162
MB
800}
801EXPORT_SYMBOL(nvm_end_io);
802
aff3fb18
MB
803static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
804{
805 if (!dev->ops->submit_io_sync)
806 return -ENODEV;
807
808 rqd->flags = nvm_set_flags(&dev->geo, rqd);
809
810 return dev->ops->submit_io_sync(dev, rqd);
811}
812
813static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
814{
815 struct nvm_rq rqd = { NULL };
816 struct bio bio;
817 struct bio_vec bio_vec;
818 struct page *page;
819 int ret;
820
821 page = alloc_page(GFP_KERNEL);
822 if (!page)
823 return -ENOMEM;
824
825 bio_init(&bio, &bio_vec, 1);
826 bio_add_page(&bio, page, PAGE_SIZE, 0);
827 bio_set_op_attrs(&bio, REQ_OP_READ, 0);
828
829 rqd.bio = &bio;
830 rqd.opcode = NVM_OP_PREAD;
831 rqd.is_seq = 1;
832 rqd.nr_ppas = 1;
833 rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
834
835 ret = nvm_submit_io_sync_raw(dev, &rqd);
836 if (ret)
837 return ret;
838
839 __free_page(page);
840
841 return rqd.error;
842}
843
22e8c976 844/*
aff3fb18
MB
845 * Scans a 1.2 chunk first and last page to determine if its state.
846 * If the chunk is found to be open, also scan it to update the write
847 * pointer.
22e8c976 848 */
aff3fb18
MB
849static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
850 struct nvm_chk_meta *meta)
22e8c976 851{
8e79b5cb 852 struct nvm_geo *geo = &dev->geo;
aff3fb18 853 int ret, pg, pl;
22e8c976 854
aff3fb18
MB
855 /* sense first page */
856 ret = nvm_bb_chunk_sense(dev, ppa);
857 if (ret < 0) /* io error */
858 return ret;
859 else if (ret == 0) /* valid data */
860 meta->state = NVM_CHK_ST_OPEN;
861 else if (ret > 0) {
862 /*
863 * If empty page, the chunk is free, else it is an
864 * actual io error. In that case, mark it offline.
865 */
866 switch (ret) {
867 case NVM_RSP_ERR_EMPTYPAGE:
868 meta->state = NVM_CHK_ST_FREE;
869 return 0;
870 case NVM_RSP_ERR_FAILCRC:
871 case NVM_RSP_ERR_FAILECC:
872 case NVM_RSP_WARN_HIGHECC:
873 meta->state = NVM_CHK_ST_OPEN;
874 goto scan;
875 default:
876 return -ret; /* other io error */
877 }
878 }
879
880 /* sense last page */
881 ppa.g.pg = geo->num_pg - 1;
882 ppa.g.pl = geo->num_pln - 1;
883
884 ret = nvm_bb_chunk_sense(dev, ppa);
885 if (ret < 0) /* io error */
886 return ret;
887 else if (ret == 0) { /* Chunk fully written */
888 meta->state = NVM_CHK_ST_CLOSED;
889 meta->wp = geo->clba;
890 return 0;
891 } else if (ret > 0) {
892 switch (ret) {
893 case NVM_RSP_ERR_EMPTYPAGE:
894 case NVM_RSP_ERR_FAILCRC:
895 case NVM_RSP_ERR_FAILECC:
896 case NVM_RSP_WARN_HIGHECC:
897 meta->state = NVM_CHK_ST_OPEN;
898 break;
899 default:
900 return -ret; /* other io error */
901 }
902 }
903
904scan:
905 /*
906 * chunk is open, we scan sequentially to update the write pointer.
907 * We make the assumption that targets write data across all planes
908 * before moving to the next page.
909 */
910 for (pg = 0; pg < geo->num_pg; pg++) {
911 for (pl = 0; pl < geo->num_pln; pl++) {
912 ppa.g.pg = pg;
913 ppa.g.pl = pl;
914
915 ret = nvm_bb_chunk_sense(dev, ppa);
916 if (ret < 0) /* io error */
917 return ret;
918 else if (ret == 0) {
919 meta->wp += geo->ws_min;
920 } else if (ret > 0) {
921 switch (ret) {
922 case NVM_RSP_ERR_EMPTYPAGE:
923 return 0;
924 case NVM_RSP_ERR_FAILCRC:
925 case NVM_RSP_ERR_FAILECC:
926 case NVM_RSP_WARN_HIGHECC:
927 meta->wp += geo->ws_min;
928 break;
929 default:
930 return -ret; /* other io error */
931 }
932 }
933 }
934 }
935
936 return 0;
937}
938
939/*
940 * folds a bad block list from its plane representation to its
941 * chunk representation.
942 *
943 * If any of the planes status are bad or grown bad, the chunk is marked
944 * offline. If not bad, the first plane state acts as the chunk state.
945 */
946static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
947 u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
948{
949 struct nvm_geo *geo = &dev->geo;
950 int ret, blk, pl, offset, blktype;
22e8c976 951
a40afad9
JG
952 for (blk = 0; blk < geo->num_chk; blk++) {
953 offset = blk * geo->pln_mode;
22e8c976
MB
954 blktype = blks[offset];
955
a40afad9 956 for (pl = 0; pl < geo->pln_mode; pl++) {
22e8c976
MB
957 if (blks[offset + pl] &
958 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
959 blktype = blks[offset + pl];
960 break;
961 }
962 }
963
aff3fb18
MB
964 ppa.g.blk = blk;
965
966 meta->wp = 0;
967 meta->type = NVM_CHK_TP_W_SEQ;
968 meta->wi = 0;
969 meta->slba = generic_to_dev_addr(dev, ppa).ppa;
970 meta->cnlb = dev->geo.clba;
971
972 if (blktype == NVM_BLK_T_FREE) {
973 ret = nvm_bb_chunk_scan(dev, ppa, meta);
974 if (ret)
975 return ret;
976 } else {
977 meta->state = NVM_CHK_ST_OFFLINE;
978 }
979
980 meta++;
22e8c976
MB
981 }
982
aff3fb18 983 return 0;
22e8c976 984}
22e8c976 985
aff3fb18
MB
986static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
987 int nchks, struct nvm_chk_meta *meta)
988{
989 struct nvm_geo *geo = &dev->geo;
990 struct ppa_addr ppa;
991 u8 *blks;
992 int ch, lun, nr_blks;
55e58c5e 993 int ret = 0;
aff3fb18
MB
994
995 ppa.ppa = slba;
996 ppa = dev_to_generic_addr(dev, ppa);
997
998 if (ppa.g.blk != 0)
999 return -EINVAL;
1000
1001 if ((nchks % geo->num_chk) != 0)
1002 return -EINVAL;
1003
1004 nr_blks = geo->num_chk * geo->pln_mode;
1005
1006 blks = kmalloc(nr_blks, GFP_KERNEL);
1007 if (!blks)
1008 return -ENOMEM;
1009
1010 for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1011 for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1012 struct ppa_addr ppa_gen, ppa_dev;
1013
1014 if (!nchks)
1015 goto done;
1016
1017 ppa_gen.ppa = 0;
1018 ppa_gen.g.ch = ch;
1019 ppa_gen.g.lun = lun;
1020 ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1021
1022 ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1023 if (ret)
1024 goto done;
1025
1026 ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1027 meta);
1028 if (ret)
1029 goto done;
1030
1031 meta += geo->num_chk;
1032 nchks -= geo->num_chk;
1033 }
1034 }
1035done:
1036 kfree(blks);
1037 return ret;
1038}
1039
1040int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1041 int nchks, struct nvm_chk_meta *meta)
333ba053 1042{
8f4fe008
MB
1043 struct nvm_dev *dev = tgt_dev->parent;
1044
dab8ee9e 1045 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
333ba053 1046
aff3fb18
MB
1047 if (dev->geo.version == NVM_OCSSD_SPEC_12)
1048 return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1049
1050 return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1051}
1052EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1053
1054int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1055 int nr_ppas, int type)
1056{
1057 struct nvm_dev *dev = tgt_dev->parent;
1058 struct nvm_rq rqd;
1059 int ret;
1060
1061 if (dev->geo.version == NVM_OCSSD_SPEC_20)
1062 return 0;
1063
1064 if (nr_ppas > NVM_MAX_VLBA) {
1065 pr_err("nvm: unable to update all blocks atomically\n");
1066 return -EINVAL;
1067 }
1068
1069 memset(&rqd, 0, sizeof(struct nvm_rq));
1070
1071 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1072 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1073
1074 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1075 nvm_free_rqd_ppalist(tgt_dev, &rqd);
1076 if (ret)
1077 return -EINVAL;
1078
1079 return 0;
333ba053 1080}
aff3fb18 1081EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
333ba053 1082
cd9e9808
MB
1083static int nvm_core_init(struct nvm_dev *dev)
1084{
8e79b5cb 1085 struct nvm_geo *geo = &dev->geo;
7f7c5d03 1086 int ret;
cd9e9808 1087
fae7fae4 1088 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
da1e2849
WT
1089 sizeof(unsigned long), GFP_KERNEL);
1090 if (!dev->lun_map)
1091 return -ENOMEM;
7f7c5d03 1092
ade69e24
MB
1093 INIT_LIST_HEAD(&dev->area_list);
1094 INIT_LIST_HEAD(&dev->targets);
e3eb3799 1095 mutex_init(&dev->mlock);
4c9dacb8 1096 spin_lock_init(&dev->lock);
cd9e9808 1097
ade69e24
MB
1098 ret = nvm_register_map(dev);
1099 if (ret)
1100 goto err_fmtype;
ac81bfa9 1101
cd9e9808 1102 return 0;
7f7c5d03
MB
1103err_fmtype:
1104 kfree(dev->lun_map);
1105 return ret;
cd9e9808
MB
1106}
1107
e69397ea 1108static void nvm_free(struct kref *ref)
cd9e9808 1109{
e69397ea 1110 struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
cd9e9808 1111
40267efd
SL
1112 if (dev->dma_pool)
1113 dev->ops->destroy_dma_pool(dev->dma_pool);
1114
e69397ea
IK
1115 if (dev->rmap)
1116 nvm_unregister_map(dev);
1117
7f7c5d03 1118 kfree(dev->lun_map);
40267efd 1119 kfree(dev);
cd9e9808
MB
1120}
1121
1122static int nvm_init(struct nvm_dev *dev)
1123{
8e79b5cb 1124 struct nvm_geo *geo = &dev->geo;
480fc0db 1125 int ret = -EINVAL;
cd9e9808 1126
e46f4e48 1127 if (dev->ops->identity(dev)) {
cd9e9808 1128 pr_err("nvm: device could not be identified\n");
cd9e9808
MB
1129 goto err;
1130 }
1131
3cb98f84
JG
1132 pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n",
1133 geo->major_ver_id, geo->minor_ver_id,
e46f4e48 1134 geo->vmnt);
cd9e9808 1135
cd9e9808
MB
1136 ret = nvm_core_init(dev);
1137 if (ret) {
1138 pr_err("nvm: could not initialize core structures.\n");
1139 goto err;
1140 }
1141
e46f4e48 1142 pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n",
a40afad9
JG
1143 dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1144 dev->geo.num_chk, dev->geo.all_luns,
1145 dev->geo.num_ch);
cd9e9808
MB
1146 return 0;
1147err:
cd9e9808
MB
1148 pr_err("nvm: failed to initialize nvm\n");
1149 return ret;
1150}
1151
b0b4e09c 1152struct nvm_dev *nvm_alloc_dev(int node)
cd9e9808 1153{
e69397ea
IK
1154 struct nvm_dev *dev;
1155
1156 dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1157 if (dev)
1158 kref_init(&dev->ref);
1159
1160 return dev;
cd9e9808 1161}
b0b4e09c 1162EXPORT_SYMBOL(nvm_alloc_dev);
cd9e9808 1163
b0b4e09c 1164int nvm_register(struct nvm_dev *dev)
cd9e9808 1165{
24828d05 1166 int ret, exp_pool_size;
cd9e9808 1167
e69397ea
IK
1168 if (!dev->q || !dev->ops) {
1169 kref_put(&dev->ref, nvm_free);
ade69e24 1170 return -EINVAL;
e69397ea 1171 }
cd9e9808 1172
85136c01 1173 ret = nvm_init(dev);
e69397ea
IK
1174 if (ret) {
1175 kref_put(&dev->ref, nvm_free);
85136c01 1176 return ret;
e69397ea 1177 }
85136c01 1178
24828d05
IK
1179 exp_pool_size = max_t(int, PAGE_SIZE,
1180 (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1181 exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1182
1183 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1184 exp_pool_size);
89a09c56
MB
1185 if (!dev->dma_pool) {
1186 pr_err("nvm: could not create dma pool\n");
e69397ea 1187 kref_put(&dev->ref, nvm_free);
89a09c56 1188 return -ENOMEM;
cd9e9808
MB
1189 }
1190
762796bc 1191 /* register device with a supported media manager */
edad2e66
MB
1192 down_write(&nvm_lock);
1193 list_add(&dev->devices, &nvm_devices);
1194 up_write(&nvm_lock);
1195
cd9e9808 1196 return 0;
cd9e9808
MB
1197}
1198EXPORT_SYMBOL(nvm_register);
1199
b0b4e09c 1200void nvm_unregister(struct nvm_dev *dev)
cd9e9808 1201{
ade69e24
MB
1202 struct nvm_target *t, *tmp;
1203
1204 mutex_lock(&dev->mlock);
1205 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1206 if (t->dev->parent != dev)
1207 continue;
a7c9e910 1208 __nvm_remove_target(t, false);
e69397ea 1209 kref_put(&dev->ref, nvm_free);
ade69e24
MB
1210 }
1211 mutex_unlock(&dev->mlock);
1212
d0a712ce 1213 down_write(&nvm_lock);
cd9e9808
MB
1214 list_del(&dev->devices);
1215 up_write(&nvm_lock);
c1480ad5 1216
e69397ea 1217 kref_put(&dev->ref, nvm_free);
cd9e9808
MB
1218}
1219EXPORT_SYMBOL(nvm_unregister);
1220
cd9e9808
MB
1221static int __nvm_configure_create(struct nvm_ioctl_create *create)
1222{
1223 struct nvm_dev *dev;
e69397ea 1224 int ret;
cd9e9808 1225
d0a712ce 1226 down_write(&nvm_lock);
cd9e9808 1227 dev = nvm_find_nvm_dev(create->dev);
d0a712ce 1228 up_write(&nvm_lock);
b76eb20b 1229
cd9e9808
MB
1230 if (!dev) {
1231 pr_err("nvm: device not found\n");
1232 return -EINVAL;
1233 }
1234
e69397ea
IK
1235 kref_get(&dev->ref);
1236 ret = nvm_create_tgt(dev, create);
1237 if (ret)
1238 kref_put(&dev->ref, nvm_free);
1239
1240 return ret;
cd9e9808
MB
1241}
1242
cd9e9808
MB
1243static long nvm_ioctl_info(struct file *file, void __user *arg)
1244{
1245 struct nvm_ioctl_info *info;
1246 struct nvm_tgt_type *tt;
1247 int tgt_iter = 0;
1248
cd9e9808
MB
1249 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1250 if (IS_ERR(info))
1251 return -EFAULT;
1252
1253 info->version[0] = NVM_VERSION_MAJOR;
1254 info->version[1] = NVM_VERSION_MINOR;
1255 info->version[2] = NVM_VERSION_PATCH;
1256
88d31ea2 1257 down_write(&nvm_tgtt_lock);
6063fe39 1258 list_for_each_entry(tt, &nvm_tgt_types, list) {
cd9e9808
MB
1259 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1260
1261 tgt->version[0] = tt->version[0];
1262 tgt->version[1] = tt->version[1];
1263 tgt->version[2] = tt->version[2];
1264 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1265
1266 tgt_iter++;
1267 }
1268
1269 info->tgtsize = tgt_iter;
88d31ea2 1270 up_write(&nvm_tgtt_lock);
cd9e9808 1271
76e25081
SM
1272 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1273 kfree(info);
cd9e9808 1274 return -EFAULT;
76e25081 1275 }
cd9e9808
MB
1276
1277 kfree(info);
1278 return 0;
1279}
1280
1281static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1282{
1283 struct nvm_ioctl_get_devices *devices;
1284 struct nvm_dev *dev;
1285 int i = 0;
1286
cd9e9808
MB
1287 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1288 if (!devices)
1289 return -ENOMEM;
1290
1291 down_write(&nvm_lock);
1292 list_for_each_entry(dev, &nvm_devices, devices) {
1293 struct nvm_ioctl_device_info *info = &devices->info[i];
1294
6eb08245 1295 strlcpy(info->devname, dev->name, sizeof(info->devname));
cd9e9808 1296
ade69e24
MB
1297 /* kept for compatibility */
1298 info->bmversion[0] = 1;
1299 info->bmversion[1] = 0;
1300 info->bmversion[2] = 0;
6eb08245 1301 strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
cd9e9808 1302 i++;
ade69e24 1303
cd9e9808
MB
1304 if (i > 31) {
1305 pr_err("nvm: max 31 devices can be reported.\n");
1306 break;
1307 }
1308 }
1309 up_write(&nvm_lock);
1310
1311 devices->nr_devices = i;
1312
76e25081
SM
1313 if (copy_to_user(arg, devices,
1314 sizeof(struct nvm_ioctl_get_devices))) {
1315 kfree(devices);
cd9e9808 1316 return -EFAULT;
76e25081 1317 }
cd9e9808
MB
1318
1319 kfree(devices);
1320 return 0;
1321}
1322
1323static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1324{
1325 struct nvm_ioctl_create create;
1326
cd9e9808
MB
1327 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1328 return -EFAULT;
1329
e5392739
JG
1330 if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1331 create.conf.e.rsv != 0) {
1332 pr_err("nvm: reserved config field in use\n");
1333 return -EINVAL;
1334 }
1335
cd9e9808
MB
1336 create.dev[DISK_NAME_LEN - 1] = '\0';
1337 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1338 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1339
1340 if (create.flags != 0) {
4af3f75d
JG
1341 __u32 flags = create.flags;
1342
1343 /* Check for valid flags */
1344 if (flags & NVM_TARGET_FACTORY)
1345 flags &= ~NVM_TARGET_FACTORY;
1346
1347 if (flags) {
1348 pr_err("nvm: flag not supported\n");
1349 return -EINVAL;
1350 }
cd9e9808
MB
1351 }
1352
1353 return __nvm_configure_create(&create);
1354}
1355
1356static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1357{
1358 struct nvm_ioctl_remove remove;
1359
cd9e9808
MB
1360 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1361 return -EFAULT;
1362
1363 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1364
1365 if (remove.flags != 0) {
1366 pr_err("nvm: no flags supported\n");
1367 return -EINVAL;
1368 }
1369
843f2edb 1370 return nvm_remove_tgt(&remove);
cd9e9808
MB
1371}
1372
ade69e24 1373/* kept for compatibility reasons */
55696154
MB
1374static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1375{
1376 struct nvm_ioctl_dev_init init;
1377
55696154
MB
1378 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1379 return -EFAULT;
1380
1381 if (init.flags != 0) {
1382 pr_err("nvm: no flags supported\n");
1383 return -EINVAL;
1384 }
1385
ade69e24 1386 return 0;
55696154
MB
1387}
1388
ade69e24 1389/* Kept for compatibility reasons */
8b4970c4
MB
1390static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1391{
1392 struct nvm_ioctl_dev_factory fact;
8b4970c4 1393
8b4970c4
MB
1394 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1395 return -EFAULT;
1396
1397 fact.dev[DISK_NAME_LEN - 1] = '\0';
1398
1399 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1400 return -EINVAL;
1401
bf643185 1402 return 0;
8b4970c4
MB
1403}
1404
cd9e9808
MB
1405static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1406{
1407 void __user *argp = (void __user *)arg;
1408
40f962d7
JT
1409 if (!capable(CAP_SYS_ADMIN))
1410 return -EPERM;
1411
cd9e9808
MB
1412 switch (cmd) {
1413 case NVM_INFO:
1414 return nvm_ioctl_info(file, argp);
1415 case NVM_GET_DEVICES:
1416 return nvm_ioctl_get_devices(file, argp);
1417 case NVM_DEV_CREATE:
1418 return nvm_ioctl_dev_create(file, argp);
1419 case NVM_DEV_REMOVE:
1420 return nvm_ioctl_dev_remove(file, argp);
55696154
MB
1421 case NVM_DEV_INIT:
1422 return nvm_ioctl_dev_init(file, argp);
8b4970c4
MB
1423 case NVM_DEV_FACTORY:
1424 return nvm_ioctl_dev_factory(file, argp);
cd9e9808
MB
1425 }
1426 return 0;
1427}
1428
1429static const struct file_operations _ctl_fops = {
1430 .open = nonseekable_open,
1431 .unlocked_ioctl = nvm_ctl_ioctl,
1432 .owner = THIS_MODULE,
1433 .llseek = noop_llseek,
1434};
1435
1436static struct miscdevice _nvm_misc = {
1437 .minor = MISC_DYNAMIC_MINOR,
1438 .name = "lightnvm",
1439 .nodename = "lightnvm/control",
1440 .fops = &_ctl_fops,
1441};
389b2a1c 1442builtin_misc_device(_nvm_misc);