libnvdimm: write blk label set
[linux-2.6-block.git] / drivers / nvdimm / namespace_devs.c
CommitLineData
3d88002e
DW
1/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/slab.h>
16#include <linux/nd.h>
bf9bccc1 17#include "nd-core.h"
3d88002e
DW
18#include "nd.h"
19
20static void namespace_io_release(struct device *dev)
21{
22 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
23
24 kfree(nsio);
25}
26
bf9bccc1
DW
27static void namespace_pmem_release(struct device *dev)
28{
29 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
30
31 kfree(nspm->alt_name);
32 kfree(nspm->uuid);
33 kfree(nspm);
34}
35
36static void namespace_blk_release(struct device *dev)
37{
1b40e09a
DW
38 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
39 struct nd_region *nd_region = to_nd_region(dev->parent);
40
41 if (nsblk->id >= 0)
42 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
43 kfree(nsblk->alt_name);
44 kfree(nsblk->uuid);
45 kfree(nsblk->res);
46 kfree(nsblk);
bf9bccc1
DW
47}
48
3d88002e
DW
49static struct device_type namespace_io_device_type = {
50 .name = "nd_namespace_io",
51 .release = namespace_io_release,
52};
53
bf9bccc1
DW
54static struct device_type namespace_pmem_device_type = {
55 .name = "nd_namespace_pmem",
56 .release = namespace_pmem_release,
57};
58
59static struct device_type namespace_blk_device_type = {
60 .name = "nd_namespace_blk",
61 .release = namespace_blk_release,
62};
63
64static bool is_namespace_pmem(struct device *dev)
65{
66 return dev ? dev->type == &namespace_pmem_device_type : false;
67}
68
69static bool is_namespace_blk(struct device *dev)
70{
71 return dev ? dev->type == &namespace_blk_device_type : false;
72}
73
74static bool is_namespace_io(struct device *dev)
75{
76 return dev ? dev->type == &namespace_io_device_type : false;
77}
78
3d88002e
DW
79static ssize_t nstype_show(struct device *dev,
80 struct device_attribute *attr, char *buf)
81{
82 struct nd_region *nd_region = to_nd_region(dev->parent);
83
84 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
85}
86static DEVICE_ATTR_RO(nstype);
87
bf9bccc1
DW
88static ssize_t __alt_name_store(struct device *dev, const char *buf,
89 const size_t len)
90{
91 char *input, *pos, *alt_name, **ns_altname;
92 ssize_t rc;
93
94 if (is_namespace_pmem(dev)) {
95 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
96
97 ns_altname = &nspm->alt_name;
98 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
99 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
100
101 ns_altname = &nsblk->alt_name;
bf9bccc1
DW
102 } else
103 return -ENXIO;
104
105 if (dev->driver)
106 return -EBUSY;
107
108 input = kmemdup(buf, len + 1, GFP_KERNEL);
109 if (!input)
110 return -ENOMEM;
111
112 input[len] = '\0';
113 pos = strim(input);
114 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
115 rc = -EINVAL;
116 goto out;
117 }
118
119 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
120 if (!alt_name) {
121 rc = -ENOMEM;
122 goto out;
123 }
124 kfree(*ns_altname);
125 *ns_altname = alt_name;
126 sprintf(*ns_altname, "%s", pos);
127 rc = len;
128
129out:
130 kfree(input);
131 return rc;
132}
133
1b40e09a
DW
134static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
135{
136 struct nd_region *nd_region = to_nd_region(nsblk->dev.parent);
137 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
138 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
139 struct nd_label_id label_id;
140 resource_size_t size = 0;
141 struct resource *res;
142
143 if (!nsblk->uuid)
144 return 0;
145 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
146 for_each_dpa_resource(ndd, res)
147 if (strcmp(res->name, label_id.id) == 0)
148 size += resource_size(res);
149 return size;
150}
151
f524bf27
DW
152static int nd_namespace_label_update(struct nd_region *nd_region,
153 struct device *dev)
154{
155 dev_WARN_ONCE(dev, dev->driver,
156 "namespace must be idle during label update\n");
157 if (dev->driver)
158 return 0;
159
160 /*
161 * Only allow label writes that will result in a valid namespace
162 * or deletion of an existing namespace.
163 */
164 if (is_namespace_pmem(dev)) {
165 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0ba1c634 166 resource_size_t size = resource_size(&nspm->nsio.res);
f524bf27
DW
167
168 if (size == 0 && nspm->uuid)
169 /* delete allocation */;
170 else if (!nspm->uuid)
171 return 0;
172
173 return nd_pmem_namespace_label_update(nd_region, nspm, size);
174 } else if (is_namespace_blk(dev)) {
0ba1c634
DW
175 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
176 resource_size_t size = nd_namespace_blk_size(nsblk);
177
178 if (size == 0 && nsblk->uuid)
179 /* delete allocation */;
180 else if (!nsblk->uuid || !nsblk->lbasize)
181 return 0;
182
183 return nd_blk_namespace_label_update(nd_region, nsblk, size);
f524bf27
DW
184 } else
185 return -ENXIO;
186}
187
bf9bccc1
DW
188static ssize_t alt_name_store(struct device *dev,
189 struct device_attribute *attr, const char *buf, size_t len)
190{
f524bf27 191 struct nd_region *nd_region = to_nd_region(dev->parent);
bf9bccc1
DW
192 ssize_t rc;
193
194 device_lock(dev);
195 nvdimm_bus_lock(dev);
196 wait_nvdimm_bus_probe_idle(dev);
197 rc = __alt_name_store(dev, buf, len);
f524bf27
DW
198 if (rc >= 0)
199 rc = nd_namespace_label_update(nd_region, dev);
bf9bccc1
DW
200 dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
201 nvdimm_bus_unlock(dev);
202 device_unlock(dev);
203
f524bf27 204 return rc < 0 ? rc : len;
bf9bccc1
DW
205}
206
207static ssize_t alt_name_show(struct device *dev,
208 struct device_attribute *attr, char *buf)
209{
210 char *ns_altname;
211
212 if (is_namespace_pmem(dev)) {
213 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
214
215 ns_altname = nspm->alt_name;
216 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
217 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
218
219 ns_altname = nsblk->alt_name;
bf9bccc1
DW
220 } else
221 return -ENXIO;
222
223 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
224}
225static DEVICE_ATTR_RW(alt_name);
226
227static int scan_free(struct nd_region *nd_region,
228 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
229 resource_size_t n)
230{
231 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
232 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
233 int rc = 0;
234
235 while (n) {
236 struct resource *res, *last;
237 resource_size_t new_start;
238
239 last = NULL;
240 for_each_dpa_resource(ndd, res)
241 if (strcmp(res->name, label_id->id) == 0)
242 last = res;
243 res = last;
244 if (!res)
245 return 0;
246
247 if (n >= resource_size(res)) {
248 n -= resource_size(res);
249 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
250 nvdimm_free_dpa(ndd, res);
251 /* retry with last resource deleted */
252 continue;
253 }
254
255 /*
256 * Keep BLK allocations relegated to high DPA as much as
257 * possible
258 */
259 if (is_blk)
260 new_start = res->start + n;
261 else
262 new_start = res->start;
263
264 rc = adjust_resource(res, new_start, resource_size(res) - n);
1b40e09a
DW
265 if (rc == 0)
266 res->flags |= DPA_RESOURCE_ADJUSTED;
bf9bccc1
DW
267 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
268 break;
269 }
270
271 return rc;
272}
273
274/**
275 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
276 * @nd_region: the set of dimms to reclaim @n bytes from
277 * @label_id: unique identifier for the namespace consuming this dpa range
278 * @n: number of bytes per-dimm to release
279 *
280 * Assumes resources are ordered. Starting from the end try to
281 * adjust_resource() the allocation to @n, but if @n is larger than the
282 * allocation delete it and find the 'new' last allocation in the label
283 * set.
284 */
285static int shrink_dpa_allocation(struct nd_region *nd_region,
286 struct nd_label_id *label_id, resource_size_t n)
287{
288 int i;
289
290 for (i = 0; i < nd_region->ndr_mappings; i++) {
291 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
292 int rc;
293
294 rc = scan_free(nd_region, nd_mapping, label_id, n);
295 if (rc)
296 return rc;
297 }
298
299 return 0;
300}
301
302static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
303 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
304 resource_size_t n)
305{
306 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
307 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
308 resource_size_t first_dpa;
309 struct resource *res;
310 int rc = 0;
311
312 /* allocate blk from highest dpa first */
313 if (is_blk)
314 first_dpa = nd_mapping->start + nd_mapping->size - n;
315 else
316 first_dpa = nd_mapping->start;
317
318 /* first resource allocation for this label-id or dimm */
319 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
320 if (!res)
321 rc = -EBUSY;
322
323 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
324 return rc ? n : 0;
325}
326
1b40e09a
DW
327static bool space_valid(bool is_pmem, bool is_reserve,
328 struct nd_label_id *label_id, struct resource *res)
bf9bccc1
DW
329{
330 /*
331 * For BLK-space any space is valid, for PMEM-space, it must be
1b40e09a
DW
332 * contiguous with an existing allocation unless we are
333 * reserving pmem.
bf9bccc1 334 */
1b40e09a 335 if (is_reserve || !is_pmem)
bf9bccc1
DW
336 return true;
337 if (!res || strcmp(res->name, label_id->id) == 0)
338 return true;
339 return false;
340}
341
342enum alloc_loc {
343 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
344};
345
346static resource_size_t scan_allocate(struct nd_region *nd_region,
347 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
348 resource_size_t n)
349{
350 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
1b40e09a 351 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
bf9bccc1
DW
352 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
353 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
354 const resource_size_t to_allocate = n;
355 struct resource *res;
356 int first;
357
358 retry:
359 first = 0;
360 for_each_dpa_resource(ndd, res) {
361 resource_size_t allocate, available = 0, free_start, free_end;
362 struct resource *next = res->sibling, *new_res = NULL;
363 enum alloc_loc loc = ALLOC_ERR;
364 const char *action;
365 int rc = 0;
366
367 /* ignore resources outside this nd_mapping */
368 if (res->start > mapping_end)
369 continue;
370 if (res->end < nd_mapping->start)
371 continue;
372
373 /* space at the beginning of the mapping */
374 if (!first++ && res->start > nd_mapping->start) {
375 free_start = nd_mapping->start;
376 available = res->start - free_start;
1b40e09a 377 if (space_valid(is_pmem, is_reserve, label_id, NULL))
bf9bccc1
DW
378 loc = ALLOC_BEFORE;
379 }
380
381 /* space between allocations */
382 if (!loc && next) {
383 free_start = res->start + resource_size(res);
384 free_end = min(mapping_end, next->start - 1);
1b40e09a 385 if (space_valid(is_pmem, is_reserve, label_id, res)
bf9bccc1
DW
386 && free_start < free_end) {
387 available = free_end + 1 - free_start;
388 loc = ALLOC_MID;
389 }
390 }
391
392 /* space at the end of the mapping */
393 if (!loc && !next) {
394 free_start = res->start + resource_size(res);
395 free_end = mapping_end;
1b40e09a 396 if (space_valid(is_pmem, is_reserve, label_id, res)
bf9bccc1
DW
397 && free_start < free_end) {
398 available = free_end + 1 - free_start;
399 loc = ALLOC_AFTER;
400 }
401 }
402
403 if (!loc || !available)
404 continue;
405 allocate = min(available, n);
406 switch (loc) {
407 case ALLOC_BEFORE:
408 if (strcmp(res->name, label_id->id) == 0) {
409 /* adjust current resource up */
1b40e09a 410 if (is_pmem && !is_reserve)
bf9bccc1
DW
411 return n;
412 rc = adjust_resource(res, res->start - allocate,
413 resource_size(res) + allocate);
414 action = "cur grow up";
415 } else
416 action = "allocate";
417 break;
418 case ALLOC_MID:
419 if (strcmp(next->name, label_id->id) == 0) {
420 /* adjust next resource up */
1b40e09a 421 if (is_pmem && !is_reserve)
bf9bccc1
DW
422 return n;
423 rc = adjust_resource(next, next->start
424 - allocate, resource_size(next)
425 + allocate);
426 new_res = next;
427 action = "next grow up";
428 } else if (strcmp(res->name, label_id->id) == 0) {
429 action = "grow down";
430 } else
431 action = "allocate";
432 break;
433 case ALLOC_AFTER:
434 if (strcmp(res->name, label_id->id) == 0)
435 action = "grow down";
436 else
437 action = "allocate";
438 break;
439 default:
440 return n;
441 }
442
443 if (strcmp(action, "allocate") == 0) {
444 /* BLK allocate bottom up */
445 if (!is_pmem)
446 free_start += available - allocate;
1b40e09a 447 else if (!is_reserve && free_start != nd_mapping->start)
bf9bccc1
DW
448 return n;
449
450 new_res = nvdimm_allocate_dpa(ndd, label_id,
451 free_start, allocate);
452 if (!new_res)
453 rc = -EBUSY;
454 } else if (strcmp(action, "grow down") == 0) {
455 /* adjust current resource down */
456 rc = adjust_resource(res, res->start, resource_size(res)
457 + allocate);
1b40e09a
DW
458 if (rc == 0)
459 res->flags |= DPA_RESOURCE_ADJUSTED;
bf9bccc1
DW
460 }
461
462 if (!new_res)
463 new_res = res;
464
465 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
466 action, loc, rc);
467
468 if (rc)
469 return n;
470
471 n -= allocate;
472 if (n) {
473 /*
474 * Retry scan with newly inserted resources.
475 * For example, if we did an ALLOC_BEFORE
476 * insertion there may also have been space
477 * available for an ALLOC_AFTER insertion, so we
478 * need to check this same resource again
479 */
480 goto retry;
481 } else
482 return 0;
483 }
484
1b40e09a
DW
485 /*
486 * If we allocated nothing in the BLK case it may be because we are in
487 * an initial "pmem-reserve pass". Only do an initial BLK allocation
488 * when none of the DPA space is reserved.
489 */
490 if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
bf9bccc1
DW
491 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
492 return n;
493}
494
1b40e09a
DW
495static int merge_dpa(struct nd_region *nd_region,
496 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
497{
498 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
499 struct resource *res;
500
501 if (strncmp("pmem", label_id->id, 4) == 0)
502 return 0;
503 retry:
504 for_each_dpa_resource(ndd, res) {
505 int rc;
506 struct resource *next = res->sibling;
507 resource_size_t end = res->start + resource_size(res);
508
509 if (!next || strcmp(res->name, label_id->id) != 0
510 || strcmp(next->name, label_id->id) != 0
511 || end != next->start)
512 continue;
513 end += resource_size(next);
514 nvdimm_free_dpa(ndd, next);
515 rc = adjust_resource(res, res->start, end - res->start);
516 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
517 if (rc)
518 return rc;
519 res->flags |= DPA_RESOURCE_ADJUSTED;
520 goto retry;
521 }
522
523 return 0;
524}
525
526static int __reserve_free_pmem(struct device *dev, void *data)
527{
528 struct nvdimm *nvdimm = data;
529 struct nd_region *nd_region;
530 struct nd_label_id label_id;
531 int i;
532
533 if (!is_nd_pmem(dev))
534 return 0;
535
536 nd_region = to_nd_region(dev);
537 if (nd_region->ndr_mappings == 0)
538 return 0;
539
540 memset(&label_id, 0, sizeof(label_id));
541 strcat(label_id.id, "pmem-reserve");
542 for (i = 0; i < nd_region->ndr_mappings; i++) {
543 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
544 resource_size_t n, rem = 0;
545
546 if (nd_mapping->nvdimm != nvdimm)
547 continue;
548
549 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
550 if (n == 0)
551 return 0;
552 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
553 dev_WARN_ONCE(&nd_region->dev, rem,
554 "pmem reserve underrun: %#llx of %#llx bytes\n",
555 (unsigned long long) n - rem,
556 (unsigned long long) n);
557 return rem ? -ENXIO : 0;
558 }
559
560 return 0;
561}
562
563static void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
564 struct nd_mapping *nd_mapping)
565{
566 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
567 struct resource *res, *_res;
568
569 for_each_dpa_resource_safe(ndd, res, _res)
570 if (strcmp(res->name, "pmem-reserve") == 0)
571 nvdimm_free_dpa(ndd, res);
572}
573
574static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
575 struct nd_mapping *nd_mapping)
576{
577 struct nvdimm *nvdimm = nd_mapping->nvdimm;
578 int rc;
579
580 rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
581 __reserve_free_pmem);
582 if (rc)
583 release_free_pmem(nvdimm_bus, nd_mapping);
584 return rc;
585}
586
bf9bccc1
DW
587/**
588 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
589 * @nd_region: the set of dimms to allocate @n more bytes from
590 * @label_id: unique identifier for the namespace consuming this dpa range
591 * @n: number of bytes per-dimm to add to the existing allocation
592 *
593 * Assumes resources are ordered. For BLK regions, first consume
594 * BLK-only available DPA free space, then consume PMEM-aliased DPA
595 * space starting at the highest DPA. For PMEM regions start
596 * allocations from the start of an interleave set and end at the first
597 * BLK allocation or the end of the interleave set, whichever comes
598 * first.
599 */
600static int grow_dpa_allocation(struct nd_region *nd_region,
601 struct nd_label_id *label_id, resource_size_t n)
602{
1b40e09a
DW
603 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
604 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
bf9bccc1
DW
605 int i;
606
607 for (i = 0; i < nd_region->ndr_mappings; i++) {
608 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1b40e09a
DW
609 resource_size_t rem = n;
610 int rc, j;
611
612 /*
613 * In the BLK case try once with all unallocated PMEM
614 * reserved, and once without
615 */
616 for (j = is_pmem; j < 2; j++) {
617 bool blk_only = j == 0;
618
619 if (blk_only) {
620 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
621 if (rc)
622 return rc;
623 }
624 rem = scan_allocate(nd_region, nd_mapping,
625 label_id, rem);
626 if (blk_only)
627 release_free_pmem(nvdimm_bus, nd_mapping);
bf9bccc1 628
1b40e09a
DW
629 /* try again and allow encroachments into PMEM */
630 if (rem == 0)
631 break;
632 }
633
634 dev_WARN_ONCE(&nd_region->dev, rem,
635 "allocation underrun: %#llx of %#llx bytes\n",
636 (unsigned long long) n - rem,
637 (unsigned long long) n);
638 if (rem)
639 return -ENXIO;
640
641 rc = merge_dpa(nd_region, nd_mapping, label_id);
bf9bccc1
DW
642 if (rc)
643 return rc;
644 }
645
646 return 0;
647}
648
649static void nd_namespace_pmem_set_size(struct nd_region *nd_region,
650 struct nd_namespace_pmem *nspm, resource_size_t size)
651{
652 struct resource *res = &nspm->nsio.res;
653
654 res->start = nd_region->ndr_start;
655 res->end = nd_region->ndr_start + size - 1;
656}
657
658static ssize_t __size_store(struct device *dev, unsigned long long val)
659{
660 resource_size_t allocated = 0, available = 0;
661 struct nd_region *nd_region = to_nd_region(dev->parent);
662 struct nd_mapping *nd_mapping;
663 struct nvdimm_drvdata *ndd;
664 struct nd_label_id label_id;
665 u32 flags = 0, remainder;
666 u8 *uuid = NULL;
667 int rc, i;
668
669 if (dev->driver)
670 return -EBUSY;
671
672 if (is_namespace_pmem(dev)) {
673 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
674
675 uuid = nspm->uuid;
676 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
677 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
678
679 uuid = nsblk->uuid;
680 flags = NSLABEL_FLAG_LOCAL;
bf9bccc1
DW
681 }
682
683 /*
684 * We need a uuid for the allocation-label and dimm(s) on which
685 * to store the label.
686 */
687 if (!uuid || nd_region->ndr_mappings == 0)
688 return -ENXIO;
689
690 div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
691 if (remainder) {
692 dev_dbg(dev, "%llu is not %dK aligned\n", val,
693 (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
694 return -EINVAL;
695 }
696
697 nd_label_gen_id(&label_id, uuid, flags);
698 for (i = 0; i < nd_region->ndr_mappings; i++) {
699 nd_mapping = &nd_region->mapping[i];
700 ndd = to_ndd(nd_mapping);
701
702 /*
703 * All dimms in an interleave set, or the base dimm for a blk
704 * region, need to be enabled for the size to be changed.
705 */
706 if (!ndd)
707 return -ENXIO;
708
709 allocated += nvdimm_allocated_dpa(ndd, &label_id);
710 }
711 available = nd_region_available_dpa(nd_region);
712
713 if (val > available + allocated)
714 return -ENOSPC;
715
716 if (val == allocated)
717 return 0;
718
719 val = div_u64(val, nd_region->ndr_mappings);
720 allocated = div_u64(allocated, nd_region->ndr_mappings);
721 if (val < allocated)
722 rc = shrink_dpa_allocation(nd_region, &label_id,
723 allocated - val);
724 else
725 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
726
727 if (rc)
728 return rc;
729
730 if (is_namespace_pmem(dev)) {
731 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
732
733 nd_namespace_pmem_set_size(nd_region, nspm,
734 val * nd_region->ndr_mappings);
1b40e09a
DW
735 } else if (is_namespace_blk(dev)) {
736 /*
737 * Try to delete the namespace if we deleted all of its
738 * allocation and this is not the seed device for the
739 * region.
740 */
741 if (val == 0 && nd_region->ns_seed != dev)
742 nd_device_unregister(dev, ND_ASYNC);
bf9bccc1
DW
743 }
744
745 return rc;
746}
747
748static ssize_t size_store(struct device *dev,
749 struct device_attribute *attr, const char *buf, size_t len)
750{
f524bf27 751 struct nd_region *nd_region = to_nd_region(dev->parent);
bf9bccc1
DW
752 unsigned long long val;
753 u8 **uuid = NULL;
754 int rc;
755
756 rc = kstrtoull(buf, 0, &val);
757 if (rc)
758 return rc;
759
760 device_lock(dev);
761 nvdimm_bus_lock(dev);
762 wait_nvdimm_bus_probe_idle(dev);
763 rc = __size_store(dev, val);
f524bf27
DW
764 if (rc >= 0)
765 rc = nd_namespace_label_update(nd_region, dev);
bf9bccc1
DW
766
767 if (is_namespace_pmem(dev)) {
768 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
769
770 uuid = &nspm->uuid;
771 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
772 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
773
774 uuid = &nsblk->uuid;
bf9bccc1
DW
775 }
776
777 if (rc == 0 && val == 0 && uuid) {
778 /* setting size zero == 'delete namespace' */
779 kfree(*uuid);
780 *uuid = NULL;
781 }
782
783 dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0
784 ? "fail" : "success", rc);
785
786 nvdimm_bus_unlock(dev);
787 device_unlock(dev);
788
f524bf27 789 return rc < 0 ? rc : len;
bf9bccc1
DW
790}
791
792static ssize_t size_show(struct device *dev,
793 struct device_attribute *attr, char *buf)
794{
1b40e09a
DW
795 unsigned long long size = 0;
796
797 nvdimm_bus_lock(dev);
bf9bccc1
DW
798 if (is_namespace_pmem(dev)) {
799 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
800
1b40e09a 801 size = resource_size(&nspm->nsio.res);
bf9bccc1 802 } else if (is_namespace_blk(dev)) {
1b40e09a 803 size = nd_namespace_blk_size(to_nd_namespace_blk(dev));
bf9bccc1
DW
804 } else if (is_namespace_io(dev)) {
805 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
806
1b40e09a
DW
807 size = resource_size(&nsio->res);
808 }
809 nvdimm_bus_unlock(dev);
810
811 return sprintf(buf, "%llu\n", size);
bf9bccc1
DW
812}
813static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
814
815static ssize_t uuid_show(struct device *dev,
816 struct device_attribute *attr, char *buf)
817{
818 u8 *uuid;
819
820 if (is_namespace_pmem(dev)) {
821 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
822
823 uuid = nspm->uuid;
824 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
825 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
826
827 uuid = nsblk->uuid;
bf9bccc1
DW
828 } else
829 return -ENXIO;
830
831 if (uuid)
832 return sprintf(buf, "%pUb\n", uuid);
833 return sprintf(buf, "\n");
834}
835
836/**
837 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
838 * @nd_region: parent region so we can updates all dimms in the set
839 * @dev: namespace type for generating label_id
840 * @new_uuid: incoming uuid
841 * @old_uuid: reference to the uuid storage location in the namespace object
842 */
843static int namespace_update_uuid(struct nd_region *nd_region,
844 struct device *dev, u8 *new_uuid, u8 **old_uuid)
845{
846 u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
847 struct nd_label_id old_label_id;
848 struct nd_label_id new_label_id;
f524bf27 849 int i;
bf9bccc1 850
f524bf27
DW
851 if (!nd_is_uuid_unique(dev, new_uuid))
852 return -EINVAL;
bf9bccc1
DW
853
854 if (*old_uuid == NULL)
855 goto out;
856
f524bf27
DW
857 /*
858 * If we've already written a label with this uuid, then it's
859 * too late to rename because we can't reliably update the uuid
860 * without losing the old namespace. Userspace must delete this
861 * namespace to abandon the old uuid.
862 */
863 for (i = 0; i < nd_region->ndr_mappings; i++) {
864 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
865
866 /*
867 * This check by itself is sufficient because old_uuid
868 * would be NULL above if this uuid did not exist in the
869 * currently written set.
870 *
871 * FIXME: can we delete uuid with zero dpa allocated?
872 */
873 if (nd_mapping->labels)
874 return -EBUSY;
875 }
876
bf9bccc1
DW
877 nd_label_gen_id(&old_label_id, *old_uuid, flags);
878 nd_label_gen_id(&new_label_id, new_uuid, flags);
879 for (i = 0; i < nd_region->ndr_mappings; i++) {
880 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
881 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
882 struct resource *res;
883
884 for_each_dpa_resource(ndd, res)
885 if (strcmp(res->name, old_label_id.id) == 0)
886 sprintf((void *) res->name, "%s",
887 new_label_id.id);
888 }
889 kfree(*old_uuid);
890 out:
891 *old_uuid = new_uuid;
892 return 0;
893}
894
895static ssize_t uuid_store(struct device *dev,
896 struct device_attribute *attr, const char *buf, size_t len)
897{
898 struct nd_region *nd_region = to_nd_region(dev->parent);
899 u8 *uuid = NULL;
900 u8 **ns_uuid;
901 ssize_t rc;
902
903 if (is_namespace_pmem(dev)) {
904 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
905
906 ns_uuid = &nspm->uuid;
907 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
908 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
909
910 ns_uuid = &nsblk->uuid;
bf9bccc1
DW
911 } else
912 return -ENXIO;
913
914 device_lock(dev);
915 nvdimm_bus_lock(dev);
916 wait_nvdimm_bus_probe_idle(dev);
917 rc = nd_uuid_store(dev, &uuid, buf, len);
918 if (rc >= 0)
919 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
f524bf27
DW
920 if (rc >= 0)
921 rc = nd_namespace_label_update(nd_region, dev);
922 else
923 kfree(uuid);
bf9bccc1
DW
924 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
925 rc, buf, buf[len - 1] == '\n' ? "" : "\n");
926 nvdimm_bus_unlock(dev);
927 device_unlock(dev);
928
f524bf27 929 return rc < 0 ? rc : len;
bf9bccc1
DW
930}
931static DEVICE_ATTR_RW(uuid);
932
933static ssize_t resource_show(struct device *dev,
934 struct device_attribute *attr, char *buf)
935{
936 struct resource *res;
937
938 if (is_namespace_pmem(dev)) {
939 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
940
941 res = &nspm->nsio.res;
942 } else if (is_namespace_io(dev)) {
943 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
944
945 res = &nsio->res;
946 } else
947 return -ENXIO;
948
949 /* no address to convey if the namespace has no allocation */
950 if (resource_size(res) == 0)
951 return -ENXIO;
952 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
953}
954static DEVICE_ATTR_RO(resource);
955
1b40e09a
DW
956static const unsigned long ns_lbasize_supported[] = { 512, 0 };
957
958static ssize_t sector_size_show(struct device *dev,
959 struct device_attribute *attr, char *buf)
960{
961 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
962
963 if (!is_namespace_blk(dev))
964 return -ENXIO;
965
966 return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf);
967}
968
969static ssize_t sector_size_store(struct device *dev,
970 struct device_attribute *attr, const char *buf, size_t len)
971{
972 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
f524bf27 973 struct nd_region *nd_region = to_nd_region(dev->parent);
1b40e09a
DW
974 ssize_t rc;
975
976 if (!is_namespace_blk(dev))
977 return -ENXIO;
978
979 device_lock(dev);
980 nvdimm_bus_lock(dev);
981 rc = nd_sector_size_store(dev, buf, &nsblk->lbasize,
982 ns_lbasize_supported);
f524bf27
DW
983 if (rc >= 0)
984 rc = nd_namespace_label_update(nd_region, dev);
985 dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
986 rc, rc < 0 ? "tried" : "wrote", buf,
987 buf[len - 1] == '\n' ? "" : "\n");
1b40e09a
DW
988 nvdimm_bus_unlock(dev);
989 device_unlock(dev);
990
991 return rc ? rc : len;
992}
993static DEVICE_ATTR_RW(sector_size);
994
0ba1c634
DW
995static ssize_t dpa_extents_show(struct device *dev,
996 struct device_attribute *attr, char *buf)
997{
998 struct nd_region *nd_region = to_nd_region(dev->parent);
999 struct nd_label_id label_id;
1000 int count = 0, i;
1001 u8 *uuid = NULL;
1002 u32 flags = 0;
1003
1004 nvdimm_bus_lock(dev);
1005 if (is_namespace_pmem(dev)) {
1006 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1007
1008 uuid = nspm->uuid;
1009 flags = 0;
1010 } else if (is_namespace_blk(dev)) {
1011 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1012
1013 uuid = nsblk->uuid;
1014 flags = NSLABEL_FLAG_LOCAL;
1015 }
1016
1017 if (!uuid)
1018 goto out;
1019
1020 nd_label_gen_id(&label_id, uuid, flags);
1021 for (i = 0; i < nd_region->ndr_mappings; i++) {
1022 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1023 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1024 struct resource *res;
1025
1026 for_each_dpa_resource(ndd, res)
1027 if (strcmp(res->name, label_id.id) == 0)
1028 count++;
1029 }
1030 out:
1031 nvdimm_bus_unlock(dev);
1032
1033 return sprintf(buf, "%d\n", count);
1034}
1035static DEVICE_ATTR_RO(dpa_extents);
1036
3d88002e
DW
1037static struct attribute *nd_namespace_attributes[] = {
1038 &dev_attr_nstype.attr,
bf9bccc1
DW
1039 &dev_attr_size.attr,
1040 &dev_attr_uuid.attr,
1041 &dev_attr_resource.attr,
1042 &dev_attr_alt_name.attr,
1b40e09a 1043 &dev_attr_sector_size.attr,
0ba1c634 1044 &dev_attr_dpa_extents.attr,
3d88002e
DW
1045 NULL,
1046};
1047
bf9bccc1
DW
1048static umode_t namespace_visible(struct kobject *kobj,
1049 struct attribute *a, int n)
1050{
1051 struct device *dev = container_of(kobj, struct device, kobj);
1052
1053 if (a == &dev_attr_resource.attr) {
1054 if (is_namespace_blk(dev))
1055 return 0;
1056 return a->mode;
1057 }
1058
1059 if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1060 if (a == &dev_attr_size.attr)
1061 return S_IWUSR | S_IRUGO;
1b40e09a
DW
1062
1063 if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
1064 return 0;
1065
bf9bccc1
DW
1066 return a->mode;
1067 }
1068
1069 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr)
1070 return a->mode;
1071
1072 return 0;
1073}
1074
3d88002e
DW
1075static struct attribute_group nd_namespace_attribute_group = {
1076 .attrs = nd_namespace_attributes,
bf9bccc1 1077 .is_visible = namespace_visible,
3d88002e
DW
1078};
1079
1080static const struct attribute_group *nd_namespace_attribute_groups[] = {
1081 &nd_device_attribute_group,
1082 &nd_namespace_attribute_group,
1083 NULL,
1084};
1085
1086static struct device **create_namespace_io(struct nd_region *nd_region)
1087{
1088 struct nd_namespace_io *nsio;
1089 struct device *dev, **devs;
1090 struct resource *res;
1091
1092 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1093 if (!nsio)
1094 return NULL;
1095
1096 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1097 if (!devs) {
1098 kfree(nsio);
1099 return NULL;
1100 }
1101
1102 dev = &nsio->dev;
1103 dev->type = &namespace_io_device_type;
1104 dev->parent = &nd_region->dev;
1105 res = &nsio->res;
1106 res->name = dev_name(&nd_region->dev);
1107 res->flags = IORESOURCE_MEM;
1108 res->start = nd_region->ndr_start;
1109 res->end = res->start + nd_region->ndr_size - 1;
1110
1111 devs[0] = dev;
1112 return devs;
1113}
1114
bf9bccc1
DW
1115static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1116 u64 cookie, u16 pos)
1117{
1118 struct nd_namespace_label *found = NULL;
1119 int i;
1120
1121 for (i = 0; i < nd_region->ndr_mappings; i++) {
1122 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1123 struct nd_namespace_label *nd_label;
1124 bool found_uuid = false;
1125 int l;
1126
1127 for_each_label(l, nd_label, nd_mapping->labels) {
1128 u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1129 u16 position = __le16_to_cpu(nd_label->position);
1130 u16 nlabel = __le16_to_cpu(nd_label->nlabel);
1131
1132 if (isetcookie != cookie)
1133 continue;
1134
1135 if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1136 continue;
1137
1138 if (found_uuid) {
1139 dev_dbg(to_ndd(nd_mapping)->dev,
1140 "%s duplicate entry for uuid\n",
1141 __func__);
1142 return false;
1143 }
1144 found_uuid = true;
1145 if (nlabel != nd_region->ndr_mappings)
1146 continue;
1147 if (position != pos)
1148 continue;
1149 found = nd_label;
1150 break;
1151 }
1152 if (found)
1153 break;
1154 }
1155 return found != NULL;
1156}
1157
1158static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1159{
1160 struct nd_namespace_label *select = NULL;
1161 int i;
1162
1163 if (!pmem_id)
1164 return -ENODEV;
1165
1166 for (i = 0; i < nd_region->ndr_mappings; i++) {
1167 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1168 struct nd_namespace_label *nd_label;
1169 u64 hw_start, hw_end, pmem_start, pmem_end;
1170 int l;
1171
1172 for_each_label(l, nd_label, nd_mapping->labels)
1173 if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1174 break;
1175
1176 if (!nd_label) {
1177 WARN_ON(1);
1178 return -EINVAL;
1179 }
1180
1181 select = nd_label;
1182 /*
1183 * Check that this label is compliant with the dpa
1184 * range published in NFIT
1185 */
1186 hw_start = nd_mapping->start;
1187 hw_end = hw_start + nd_mapping->size;
1188 pmem_start = __le64_to_cpu(select->dpa);
1189 pmem_end = pmem_start + __le64_to_cpu(select->rawsize);
1190 if (pmem_start == hw_start && pmem_end <= hw_end)
1191 /* pass */;
1192 else
1193 return -EINVAL;
1194
1195 nd_mapping->labels[0] = select;
1196 nd_mapping->labels[1] = NULL;
1197 }
1198 return 0;
1199}
1200
1201/**
1202 * find_pmem_label_set - validate interleave set labelling, retrieve label0
1203 * @nd_region: region with mappings to validate
1204 */
1205static int find_pmem_label_set(struct nd_region *nd_region,
1206 struct nd_namespace_pmem *nspm)
1207{
1208 u64 cookie = nd_region_interleave_set_cookie(nd_region);
1209 struct nd_namespace_label *nd_label;
1210 u8 select_id[NSLABEL_UUID_LEN];
1211 resource_size_t size = 0;
1212 u8 *pmem_id = NULL;
1213 int rc = -ENODEV, l;
1214 u16 i;
1215
1216 if (cookie == 0)
1217 return -ENXIO;
1218
1219 /*
1220 * Find a complete set of labels by uuid. By definition we can start
1221 * with any mapping as the reference label
1222 */
1223 for_each_label(l, nd_label, nd_region->mapping[0].labels) {
1224 u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1225
1226 if (isetcookie != cookie)
1227 continue;
1228
1229 for (i = 0; nd_region->ndr_mappings; i++)
1230 if (!has_uuid_at_pos(nd_region, nd_label->uuid,
1231 cookie, i))
1232 break;
1233 if (i < nd_region->ndr_mappings) {
1234 /*
1235 * Give up if we don't find an instance of a
1236 * uuid at each position (from 0 to
1237 * nd_region->ndr_mappings - 1), or if we find a
1238 * dimm with two instances of the same uuid.
1239 */
1240 rc = -EINVAL;
1241 goto err;
1242 } else if (pmem_id) {
1243 /*
1244 * If there is more than one valid uuid set, we
1245 * need userspace to clean this up.
1246 */
1247 rc = -EBUSY;
1248 goto err;
1249 }
1250 memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
1251 pmem_id = select_id;
1252 }
1253
1254 /*
1255 * Fix up each mapping's 'labels' to have the validated pmem label for
1256 * that position at labels[0], and NULL at labels[1]. In the process,
1257 * check that the namespace aligns with interleave-set. We know
1258 * that it does not overlap with any blk namespaces by virtue of
1259 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1260 * succeeded).
1261 */
1262 rc = select_pmem_id(nd_region, pmem_id);
1263 if (rc)
1264 goto err;
1265
1266 /* Calculate total size and populate namespace properties from label0 */
1267 for (i = 0; i < nd_region->ndr_mappings; i++) {
1268 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1269 struct nd_namespace_label *label0 = nd_mapping->labels[0];
1270
1271 size += __le64_to_cpu(label0->rawsize);
1272 if (__le16_to_cpu(label0->position) != 0)
1273 continue;
1274 WARN_ON(nspm->alt_name || nspm->uuid);
1275 nspm->alt_name = kmemdup((void __force *) label0->name,
1276 NSLABEL_NAME_LEN, GFP_KERNEL);
1277 nspm->uuid = kmemdup((void __force *) label0->uuid,
1278 NSLABEL_UUID_LEN, GFP_KERNEL);
1279 }
1280
1281 if (!nspm->alt_name || !nspm->uuid) {
1282 rc = -ENOMEM;
1283 goto err;
1284 }
1285
1286 nd_namespace_pmem_set_size(nd_region, nspm, size);
1287
1288 return 0;
1289 err:
1290 switch (rc) {
1291 case -EINVAL:
1292 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
1293 break;
1294 case -ENODEV:
1295 dev_dbg(&nd_region->dev, "%s: label not found\n", __func__);
1296 break;
1297 default:
1298 dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n",
1299 __func__, rc);
1300 break;
1301 }
1302 return rc;
1303}
1304
1305static struct device **create_namespace_pmem(struct nd_region *nd_region)
1306{
1307 struct nd_namespace_pmem *nspm;
1308 struct device *dev, **devs;
1309 struct resource *res;
1310 int rc;
1311
1312 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1313 if (!nspm)
1314 return NULL;
1315
1316 dev = &nspm->nsio.dev;
1317 dev->type = &namespace_pmem_device_type;
1318 dev->parent = &nd_region->dev;
1319 res = &nspm->nsio.res;
1320 res->name = dev_name(&nd_region->dev);
1321 res->flags = IORESOURCE_MEM;
1322 rc = find_pmem_label_set(nd_region, nspm);
1323 if (rc == -ENODEV) {
1324 int i;
1325
1326 /* Pass, try to permit namespace creation... */
1327 for (i = 0; i < nd_region->ndr_mappings; i++) {
1328 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1329
1330 kfree(nd_mapping->labels);
1331 nd_mapping->labels = NULL;
1332 }
1333
1334 /* Publish a zero-sized namespace for userspace to configure. */
1335 nd_namespace_pmem_set_size(nd_region, nspm, 0);
1336
1337 rc = 0;
1338 } else if (rc)
1339 goto err;
1340
1341 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1342 if (!devs)
1343 goto err;
1344
1345 devs[0] = dev;
1346 return devs;
1347
1348 err:
1349 namespace_pmem_release(&nspm->nsio.dev);
1350 return NULL;
1351}
1352
1b40e09a
DW
1353struct resource *nsblk_add_resource(struct nd_region *nd_region,
1354 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
1355 resource_size_t start)
1356{
1357 struct nd_label_id label_id;
1358 struct resource *res;
1359
1360 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
1361 res = krealloc(nsblk->res,
1362 sizeof(void *) * (nsblk->num_resources + 1),
1363 GFP_KERNEL);
1364 if (!res)
1365 return NULL;
1366 nsblk->res = (struct resource **) res;
1367 for_each_dpa_resource(ndd, res)
1368 if (strcmp(res->name, label_id.id) == 0
1369 && res->start == start) {
1370 nsblk->res[nsblk->num_resources++] = res;
1371 return res;
1372 }
1373 return NULL;
1374}
1375
1376static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
1377{
1378 struct nd_namespace_blk *nsblk;
1379 struct device *dev;
1380
1381 if (!is_nd_blk(&nd_region->dev))
1382 return NULL;
1383
1384 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1385 if (!nsblk)
1386 return NULL;
1387
1388 dev = &nsblk->dev;
1389 dev->type = &namespace_blk_device_type;
1390 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1391 if (nsblk->id < 0) {
1392 kfree(nsblk);
1393 return NULL;
1394 }
1395 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
1396 dev->parent = &nd_region->dev;
1397 dev->groups = nd_namespace_attribute_groups;
1398
1399 return &nsblk->dev;
1400}
1401
1402void nd_region_create_blk_seed(struct nd_region *nd_region)
1403{
1404 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1405 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
1406 /*
1407 * Seed creation failures are not fatal, provisioning is simply
1408 * disabled until memory becomes available
1409 */
1410 if (!nd_region->ns_seed)
1411 dev_err(&nd_region->dev, "failed to create blk namespace\n");
1412 else
1413 nd_device_register(nd_region->ns_seed);
1414}
1415
1416static struct device **create_namespace_blk(struct nd_region *nd_region)
1417{
1418 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1419 struct nd_namespace_label *nd_label;
1420 struct device *dev, **devs = NULL;
1421 struct nd_namespace_blk *nsblk;
1422 struct nvdimm_drvdata *ndd;
1423 int i, l, count = 0;
1424 struct resource *res;
1425
1426 if (nd_region->ndr_mappings == 0)
1427 return NULL;
1428
1429 ndd = to_ndd(nd_mapping);
1430 for_each_label(l, nd_label, nd_mapping->labels) {
1431 u32 flags = __le32_to_cpu(nd_label->flags);
1432 char *name[NSLABEL_NAME_LEN];
1433 struct device **__devs;
1434
1435 if (flags & NSLABEL_FLAG_LOCAL)
1436 /* pass */;
1437 else
1438 continue;
1439
1440 for (i = 0; i < count; i++) {
1441 nsblk = to_nd_namespace_blk(devs[i]);
1442 if (memcmp(nsblk->uuid, nd_label->uuid,
1443 NSLABEL_UUID_LEN) == 0) {
1444 res = nsblk_add_resource(nd_region, ndd, nsblk,
1445 __le64_to_cpu(nd_label->dpa));
1446 if (!res)
1447 goto err;
1448 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1449 dev_name(&nsblk->dev));
1450 break;
1451 }
1452 }
1453 if (i < count)
1454 continue;
1455 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1456 if (!__devs)
1457 goto err;
1458 memcpy(__devs, devs, sizeof(dev) * count);
1459 kfree(devs);
1460 devs = __devs;
1461
1462 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1463 if (!nsblk)
1464 goto err;
1465 dev = &nsblk->dev;
1466 dev->type = &namespace_blk_device_type;
1467 dev->parent = &nd_region->dev;
1468 dev_set_name(dev, "namespace%d.%d", nd_region->id, count);
1469 devs[count++] = dev;
1470 nsblk->id = -1;
1471 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
1472 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
1473 GFP_KERNEL);
1474 if (!nsblk->uuid)
1475 goto err;
1476 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
1477 if (name[0])
1478 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
1479 GFP_KERNEL);
1480 res = nsblk_add_resource(nd_region, ndd, nsblk,
1481 __le64_to_cpu(nd_label->dpa));
1482 if (!res)
1483 goto err;
1484 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1485 dev_name(&nsblk->dev));
1486 }
1487
1488 dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n",
1489 __func__, count, count == 1 ? "" : "s");
1490
1491 if (count == 0) {
1492 /* Publish a zero-sized namespace for userspace to configure. */
1493 for (i = 0; i < nd_region->ndr_mappings; i++) {
1494 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1495
1496 kfree(nd_mapping->labels);
1497 nd_mapping->labels = NULL;
1498 }
1499
1500 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1501 if (!devs)
1502 goto err;
1503 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1504 if (!nsblk)
1505 goto err;
1506 dev = &nsblk->dev;
1507 dev->type = &namespace_blk_device_type;
1508 dev->parent = &nd_region->dev;
1509 devs[count++] = dev;
1510 }
1511
1512 return devs;
1513
1514err:
1515 for (i = 0; i < count; i++) {
1516 nsblk = to_nd_namespace_blk(devs[i]);
1517 namespace_blk_release(&nsblk->dev);
1518 }
1519 kfree(devs);
1520 return NULL;
1521}
1522
bf9bccc1
DW
1523static int init_active_labels(struct nd_region *nd_region)
1524{
1525 int i;
1526
1527 for (i = 0; i < nd_region->ndr_mappings; i++) {
1528 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1529 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1530 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1531 int count, j;
1532
1533 /*
1534 * If the dimm is disabled then prevent the region from
1535 * being activated if it aliases DPA.
1536 */
1537 if (!ndd) {
1538 if ((nvdimm->flags & NDD_ALIASING) == 0)
1539 return 0;
1540 dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
1541 dev_name(&nd_mapping->nvdimm->dev));
1542 return -ENXIO;
1543 }
1544 nd_mapping->ndd = ndd;
1545 atomic_inc(&nvdimm->busy);
1546 get_ndd(ndd);
1547
1548 count = nd_label_active_count(ndd);
1549 dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
1550 if (!count)
1551 continue;
1552 nd_mapping->labels = kcalloc(count + 1, sizeof(void *),
1553 GFP_KERNEL);
1554 if (!nd_mapping->labels)
1555 return -ENOMEM;
1556 for (j = 0; j < count; j++) {
1557 struct nd_namespace_label *label;
1558
1559 label = nd_label_active(ndd, j);
1560 nd_mapping->labels[j] = label;
1561 }
1562 }
1563
1564 return 0;
1565}
1566
3d88002e
DW
1567int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
1568{
1569 struct device **devs = NULL;
bf9bccc1 1570 int i, rc = 0, type;
3d88002e
DW
1571
1572 *err = 0;
bf9bccc1
DW
1573 nvdimm_bus_lock(&nd_region->dev);
1574 rc = init_active_labels(nd_region);
1575 if (rc) {
1576 nvdimm_bus_unlock(&nd_region->dev);
1577 return rc;
1578 }
1579
1580 type = nd_region_to_nstype(nd_region);
1581 switch (type) {
3d88002e
DW
1582 case ND_DEVICE_NAMESPACE_IO:
1583 devs = create_namespace_io(nd_region);
1584 break;
bf9bccc1
DW
1585 case ND_DEVICE_NAMESPACE_PMEM:
1586 devs = create_namespace_pmem(nd_region);
1587 break;
1b40e09a
DW
1588 case ND_DEVICE_NAMESPACE_BLK:
1589 devs = create_namespace_blk(nd_region);
1590 break;
3d88002e
DW
1591 default:
1592 break;
1593 }
bf9bccc1 1594 nvdimm_bus_unlock(&nd_region->dev);
3d88002e
DW
1595
1596 if (!devs)
1597 return -ENODEV;
1598
1599 for (i = 0; devs[i]; i++) {
1600 struct device *dev = devs[i];
1b40e09a 1601 int id;
3d88002e 1602
1b40e09a
DW
1603 if (type == ND_DEVICE_NAMESPACE_BLK) {
1604 struct nd_namespace_blk *nsblk;
1605
1606 nsblk = to_nd_namespace_blk(dev);
1607 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
1608 GFP_KERNEL);
1609 nsblk->id = id;
1610 } else
1611 id = i;
1612
1613 if (id < 0)
1614 break;
1615 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
3d88002e
DW
1616 dev->groups = nd_namespace_attribute_groups;
1617 nd_device_register(dev);
1618 }
1b40e09a
DW
1619 if (i)
1620 nd_region->ns_seed = devs[0];
1621
1622 if (devs[i]) {
1623 int j;
1624
1625 for (j = i; devs[j]; j++) {
1626 struct device *dev = devs[j];
1627
1628 device_initialize(dev);
1629 put_device(dev);
1630 }
1631 *err = j - i;
1632 /*
1633 * All of the namespaces we tried to register failed, so
1634 * fail region activation.
1635 */
1636 if (*err == 0)
1637 rc = -ENODEV;
1638 }
3d88002e
DW
1639 kfree(devs);
1640
1b40e09a
DW
1641 if (rc == -ENODEV)
1642 return rc;
1643
3d88002e
DW
1644 return i;
1645}