Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / nvdimm / label.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/device.h>
14 #include <linux/ndctl.h>
15 #include <linux/uuid.h>
16 #include <linux/slab.h>
17 #include <linux/io.h>
18 #include <linux/nd.h>
19 #include "nd-core.h"
20 #include "label.h"
21 #include "nd.h"
22
23 static guid_t nvdimm_btt_guid;
24 static guid_t nvdimm_btt2_guid;
25 static guid_t nvdimm_pfn_guid;
26 static guid_t nvdimm_dax_guid;
27
28 static u32 best_seq(u32 a, u32 b)
29 {
30         a &= NSINDEX_SEQ_MASK;
31         b &= NSINDEX_SEQ_MASK;
32
33         if (a == 0 || a == b)
34                 return b;
35         else if (b == 0)
36                 return a;
37         else if (nd_inc_seq(a) == b)
38                 return b;
39         else
40                 return a;
41 }
42
43 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
44 {
45         return ndd->nslabel_size;
46 }
47
48 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
49 {
50         return ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1);
51 }
52
53 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
54 {
55         u32 nslot, space, size;
56
57         /*
58          * The minimum index space is 512 bytes, with that amount of
59          * index we can describe ~1400 labels which is less than a byte
60          * of overhead per label.  Round up to a byte of overhead per
61          * label and determine the size of the index region.  Yes, this
62          * starts to waste space at larger config_sizes, but it's
63          * unlikely we'll ever see anything but 128K.
64          */
65         nslot = nvdimm_num_label_slots(ndd);
66         space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
67         size = ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
68                         NSINDEX_ALIGN) * 2;
69         if (size <= space)
70                 return size / 2;
71
72         dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
73                         ndd->nsarea.config_size, sizeof_namespace_label(ndd));
74         return 0;
75 }
76
77 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
78 {
79         /*
80          * On media label format consists of two index blocks followed
81          * by an array of labels.  None of these structures are ever
82          * updated in place.  A sequence number tracks the current
83          * active index and the next one to write, while labels are
84          * written to free slots.
85          *
86          *     +------------+
87          *     |            |
88          *     |  nsindex0  |
89          *     |            |
90          *     +------------+
91          *     |            |
92          *     |  nsindex1  |
93          *     |            |
94          *     +------------+
95          *     |   label0   |
96          *     +------------+
97          *     |   label1   |
98          *     +------------+
99          *     |            |
100          *      ....nslot...
101          *     |            |
102          *     +------------+
103          *     |   labelN   |
104          *     +------------+
105          */
106         struct nd_namespace_index *nsindex[] = {
107                 to_namespace_index(ndd, 0),
108                 to_namespace_index(ndd, 1),
109         };
110         const int num_index = ARRAY_SIZE(nsindex);
111         struct device *dev = ndd->dev;
112         bool valid[2] = { 0 };
113         int i, num_valid = 0;
114         u32 seq;
115
116         for (i = 0; i < num_index; i++) {
117                 u32 nslot;
118                 u8 sig[NSINDEX_SIG_LEN];
119                 u64 sum_save, sum, size;
120                 unsigned int version, labelsize;
121
122                 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
123                 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
124                         dev_dbg(dev, "%s: nsindex%d signature invalid\n",
125                                         __func__, i);
126                         continue;
127                 }
128
129                 /* label sizes larger than 128 arrived with v1.2 */
130                 version = __le16_to_cpu(nsindex[i]->major) * 100
131                         + __le16_to_cpu(nsindex[i]->minor);
132                 if (version >= 102)
133                         labelsize = 1 << (7 + nsindex[i]->labelsize);
134                 else
135                         labelsize = 128;
136
137                 if (labelsize != sizeof_namespace_label(ndd)) {
138                         dev_dbg(dev, "%s: nsindex%d labelsize %d invalid\n",
139                                         __func__, i, nsindex[i]->labelsize);
140                         continue;
141                 }
142
143                 sum_save = __le64_to_cpu(nsindex[i]->checksum);
144                 nsindex[i]->checksum = __cpu_to_le64(0);
145                 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
146                 nsindex[i]->checksum = __cpu_to_le64(sum_save);
147                 if (sum != sum_save) {
148                         dev_dbg(dev, "%s: nsindex%d checksum invalid\n",
149                                         __func__, i);
150                         continue;
151                 }
152
153                 seq = __le32_to_cpu(nsindex[i]->seq);
154                 if ((seq & NSINDEX_SEQ_MASK) == 0) {
155                         dev_dbg(dev, "%s: nsindex%d sequence: %#x invalid\n",
156                                         __func__, i, seq);
157                         continue;
158                 }
159
160                 /* sanity check the index against expected values */
161                 if (__le64_to_cpu(nsindex[i]->myoff)
162                                 != i * sizeof_namespace_index(ndd)) {
163                         dev_dbg(dev, "%s: nsindex%d myoff: %#llx invalid\n",
164                                         __func__, i, (unsigned long long)
165                                         __le64_to_cpu(nsindex[i]->myoff));
166                         continue;
167                 }
168                 if (__le64_to_cpu(nsindex[i]->otheroff)
169                                 != (!i) * sizeof_namespace_index(ndd)) {
170                         dev_dbg(dev, "%s: nsindex%d otheroff: %#llx invalid\n",
171                                         __func__, i, (unsigned long long)
172                                         __le64_to_cpu(nsindex[i]->otheroff));
173                         continue;
174                 }
175
176                 size = __le64_to_cpu(nsindex[i]->mysize);
177                 if (size > sizeof_namespace_index(ndd)
178                                 || size < sizeof(struct nd_namespace_index)) {
179                         dev_dbg(dev, "%s: nsindex%d mysize: %#llx invalid\n",
180                                         __func__, i, size);
181                         continue;
182                 }
183
184                 nslot = __le32_to_cpu(nsindex[i]->nslot);
185                 if (nslot * sizeof_namespace_label(ndd)
186                                 + 2 * sizeof_namespace_index(ndd)
187                                 > ndd->nsarea.config_size) {
188                         dev_dbg(dev, "%s: nsindex%d nslot: %u invalid, config_size: %#x\n",
189                                         __func__, i, nslot,
190                                         ndd->nsarea.config_size);
191                         continue;
192                 }
193                 valid[i] = true;
194                 num_valid++;
195         }
196
197         switch (num_valid) {
198         case 0:
199                 break;
200         case 1:
201                 for (i = 0; i < num_index; i++)
202                         if (valid[i])
203                                 return i;
204                 /* can't have num_valid > 0 but valid[] = { false, false } */
205                 WARN_ON(1);
206                 break;
207         default:
208                 /* pick the best index... */
209                 seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
210                                 __le32_to_cpu(nsindex[1]->seq));
211                 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
212                         return 1;
213                 else
214                         return 0;
215                 break;
216         }
217
218         return -1;
219 }
220
221 int nd_label_validate(struct nvdimm_drvdata *ndd)
222 {
223         /*
224          * In order to probe for and validate namespace index blocks we
225          * need to know the size of the labels, and we can't trust the
226          * size of the labels until we validate the index blocks.
227          * Resolve this dependency loop by probing for known label
228          * sizes, but default to v1.2 256-byte namespace labels if
229          * discovery fails.
230          */
231         int label_size[] = { 128, 256 };
232         int i, rc;
233
234         for (i = 0; i < ARRAY_SIZE(label_size); i++) {
235                 ndd->nslabel_size = label_size[i];
236                 rc = __nd_label_validate(ndd);
237                 if (rc >= 0)
238                         return rc;
239         }
240
241         return -1;
242 }
243
244 void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
245                 struct nd_namespace_index *src)
246 {
247         if (dst && src)
248                 /* pass */;
249         else
250                 return;
251
252         memcpy(dst, src, sizeof_namespace_index(ndd));
253 }
254
255 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
256 {
257         void *base = to_namespace_index(ndd, 0);
258
259         return base + 2 * sizeof_namespace_index(ndd);
260 }
261
262 static int to_slot(struct nvdimm_drvdata *ndd,
263                 struct nd_namespace_label *nd_label)
264 {
265         unsigned long label, base;
266
267         label = (unsigned long) nd_label;
268         base = (unsigned long) nd_label_base(ndd);
269
270         return (label - base) / sizeof_namespace_label(ndd);
271 }
272
273 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
274 {
275         unsigned long label, base;
276
277         base = (unsigned long) nd_label_base(ndd);
278         label = base + sizeof_namespace_label(ndd) * slot;
279
280         return (struct nd_namespace_label *) label;
281 }
282
283 #define for_each_clear_bit_le(bit, addr, size) \
284         for ((bit) = find_next_zero_bit_le((addr), (size), 0);  \
285              (bit) < (size);                                    \
286              (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
287
288 /**
289  * preamble_index - common variable initialization for nd_label_* routines
290  * @ndd: dimm container for the relevant label set
291  * @idx: namespace_index index
292  * @nsindex_out: on return set to the currently active namespace index
293  * @free: on return set to the free label bitmap in the index
294  * @nslot: on return set to the number of slots in the label space
295  */
296 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
297                 struct nd_namespace_index **nsindex_out,
298                 unsigned long **free, u32 *nslot)
299 {
300         struct nd_namespace_index *nsindex;
301
302         nsindex = to_namespace_index(ndd, idx);
303         if (nsindex == NULL)
304                 return false;
305
306         *free = (unsigned long *) nsindex->free;
307         *nslot = __le32_to_cpu(nsindex->nslot);
308         *nsindex_out = nsindex;
309
310         return true;
311 }
312
313 char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
314 {
315         if (!label_id || !uuid)
316                 return NULL;
317         snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
318                         flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
319         return label_id->id;
320 }
321
322 static bool preamble_current(struct nvdimm_drvdata *ndd,
323                 struct nd_namespace_index **nsindex,
324                 unsigned long **free, u32 *nslot)
325 {
326         return preamble_index(ndd, ndd->ns_current, nsindex,
327                         free, nslot);
328 }
329
330 static bool preamble_next(struct nvdimm_drvdata *ndd,
331                 struct nd_namespace_index **nsindex,
332                 unsigned long **free, u32 *nslot)
333 {
334         return preamble_index(ndd, ndd->ns_next, nsindex,
335                         free, nslot);
336 }
337
338 static bool slot_valid(struct nvdimm_drvdata *ndd,
339                 struct nd_namespace_label *nd_label, u32 slot)
340 {
341         /* check that we are written where we expect to be written */
342         if (slot != __le32_to_cpu(nd_label->slot))
343                 return false;
344
345         /* check that DPA allocations are page aligned */
346         if ((__le64_to_cpu(nd_label->dpa)
347                                 | __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
348                 return false;
349
350         /* check checksum */
351         if (namespace_label_has(ndd, checksum)) {
352                 u64 sum, sum_save;
353
354                 sum_save = __le64_to_cpu(nd_label->checksum);
355                 nd_label->checksum = __cpu_to_le64(0);
356                 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
357                 nd_label->checksum = __cpu_to_le64(sum_save);
358                 if (sum != sum_save) {
359                         dev_dbg(ndd->dev, "%s fail checksum. slot: %d expect: %#llx\n",
360                                 __func__, slot, sum);
361                         return false;
362                 }
363         }
364
365         return true;
366 }
367
368 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
369 {
370         struct nd_namespace_index *nsindex;
371         unsigned long *free;
372         u32 nslot, slot;
373
374         if (!preamble_current(ndd, &nsindex, &free, &nslot))
375                 return 0; /* no label, nothing to reserve */
376
377         for_each_clear_bit_le(slot, free, nslot) {
378                 struct nd_namespace_label *nd_label;
379                 struct nd_region *nd_region = NULL;
380                 u8 label_uuid[NSLABEL_UUID_LEN];
381                 struct nd_label_id label_id;
382                 struct resource *res;
383                 u32 flags;
384
385                 nd_label = to_label(ndd, slot);
386
387                 if (!slot_valid(ndd, nd_label, slot))
388                         continue;
389
390                 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
391                 flags = __le32_to_cpu(nd_label->flags);
392                 nd_label_gen_id(&label_id, label_uuid, flags);
393                 res = nvdimm_allocate_dpa(ndd, &label_id,
394                                 __le64_to_cpu(nd_label->dpa),
395                                 __le64_to_cpu(nd_label->rawsize));
396                 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
397                 if (!res)
398                         return -EBUSY;
399         }
400
401         return 0;
402 }
403
404 int nd_label_active_count(struct nvdimm_drvdata *ndd)
405 {
406         struct nd_namespace_index *nsindex;
407         unsigned long *free;
408         u32 nslot, slot;
409         int count = 0;
410
411         if (!preamble_current(ndd, &nsindex, &free, &nslot))
412                 return 0;
413
414         for_each_clear_bit_le(slot, free, nslot) {
415                 struct nd_namespace_label *nd_label;
416
417                 nd_label = to_label(ndd, slot);
418
419                 if (!slot_valid(ndd, nd_label, slot)) {
420                         u32 label_slot = __le32_to_cpu(nd_label->slot);
421                         u64 size = __le64_to_cpu(nd_label->rawsize);
422                         u64 dpa = __le64_to_cpu(nd_label->dpa);
423
424                         dev_dbg(ndd->dev,
425                                 "%s: slot%d invalid slot: %d dpa: %llx size: %llx\n",
426                                         __func__, slot, label_slot, dpa, size);
427                         continue;
428                 }
429                 count++;
430         }
431         return count;
432 }
433
434 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
435 {
436         struct nd_namespace_index *nsindex;
437         unsigned long *free;
438         u32 nslot, slot;
439
440         if (!preamble_current(ndd, &nsindex, &free, &nslot))
441                 return NULL;
442
443         for_each_clear_bit_le(slot, free, nslot) {
444                 struct nd_namespace_label *nd_label;
445
446                 nd_label = to_label(ndd, slot);
447                 if (!slot_valid(ndd, nd_label, slot))
448                         continue;
449
450                 if (n-- == 0)
451                         return to_label(ndd, slot);
452         }
453
454         return NULL;
455 }
456
457 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
458 {
459         struct nd_namespace_index *nsindex;
460         unsigned long *free;
461         u32 nslot, slot;
462
463         if (!preamble_next(ndd, &nsindex, &free, &nslot))
464                 return UINT_MAX;
465
466         WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
467
468         slot = find_next_bit_le(free, nslot, 0);
469         if (slot == nslot)
470                 return UINT_MAX;
471
472         clear_bit_le(slot, free);
473
474         return slot;
475 }
476
477 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
478 {
479         struct nd_namespace_index *nsindex;
480         unsigned long *free;
481         u32 nslot;
482
483         if (!preamble_next(ndd, &nsindex, &free, &nslot))
484                 return false;
485
486         WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
487
488         if (slot < nslot)
489                 return !test_and_set_bit_le(slot, free);
490         return false;
491 }
492
493 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
494 {
495         struct nd_namespace_index *nsindex;
496         unsigned long *free;
497         u32 nslot;
498
499         WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
500
501         if (!preamble_next(ndd, &nsindex, &free, &nslot))
502                 return nvdimm_num_label_slots(ndd);
503
504         return bitmap_weight(free, nslot);
505 }
506
507 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
508                 unsigned long flags)
509 {
510         struct nd_namespace_index *nsindex;
511         unsigned long offset;
512         u64 checksum;
513         u32 nslot;
514         int rc;
515
516         nsindex = to_namespace_index(ndd, index);
517         if (flags & ND_NSINDEX_INIT)
518                 nslot = nvdimm_num_label_slots(ndd);
519         else
520                 nslot = __le32_to_cpu(nsindex->nslot);
521
522         memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
523         memset(&nsindex->flags, 0, 3);
524         nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
525         nsindex->seq = __cpu_to_le32(seq);
526         offset = (unsigned long) nsindex
527                 - (unsigned long) to_namespace_index(ndd, 0);
528         nsindex->myoff = __cpu_to_le64(offset);
529         nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
530         offset = (unsigned long) to_namespace_index(ndd,
531                         nd_label_next_nsindex(index))
532                 - (unsigned long) to_namespace_index(ndd, 0);
533         nsindex->otheroff = __cpu_to_le64(offset);
534         offset = (unsigned long) nd_label_base(ndd)
535                 - (unsigned long) to_namespace_index(ndd, 0);
536         nsindex->labeloff = __cpu_to_le64(offset);
537         nsindex->nslot = __cpu_to_le32(nslot);
538         nsindex->major = __cpu_to_le16(1);
539         if (sizeof_namespace_label(ndd) < 256)
540                 nsindex->minor = __cpu_to_le16(1);
541         else
542                 nsindex->minor = __cpu_to_le16(2);
543         nsindex->checksum = __cpu_to_le64(0);
544         if (flags & ND_NSINDEX_INIT) {
545                 unsigned long *free = (unsigned long *) nsindex->free;
546                 u32 nfree = ALIGN(nslot, BITS_PER_LONG);
547                 int last_bits, i;
548
549                 memset(nsindex->free, 0xff, nfree / 8);
550                 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
551                         clear_bit_le(nslot + i, free);
552         }
553         checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
554         nsindex->checksum = __cpu_to_le64(checksum);
555         rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
556                         nsindex, sizeof_namespace_index(ndd));
557         if (rc < 0)
558                 return rc;
559
560         if (flags & ND_NSINDEX_INIT)
561                 return 0;
562
563         /* copy the index we just wrote to the new 'next' */
564         WARN_ON(index != ndd->ns_next);
565         nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
566         ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
567         ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
568         WARN_ON(ndd->ns_current == ndd->ns_next);
569
570         return 0;
571 }
572
573 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
574                 struct nd_namespace_label *nd_label)
575 {
576         return (unsigned long) nd_label
577                 - (unsigned long) to_namespace_index(ndd, 0);
578 }
579
580 enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
581 {
582         if (guid_equal(guid, &nvdimm_btt_guid))
583                 return NVDIMM_CCLASS_BTT;
584         else if (guid_equal(guid, &nvdimm_btt2_guid))
585                 return NVDIMM_CCLASS_BTT2;
586         else if (guid_equal(guid, &nvdimm_pfn_guid))
587                 return NVDIMM_CCLASS_PFN;
588         else if (guid_equal(guid, &nvdimm_dax_guid))
589                 return NVDIMM_CCLASS_DAX;
590         else if (guid_equal(guid, &guid_null))
591                 return NVDIMM_CCLASS_NONE;
592
593         return NVDIMM_CCLASS_UNKNOWN;
594 }
595
596 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
597         guid_t *target)
598 {
599         if (claim_class == NVDIMM_CCLASS_BTT)
600                 return &nvdimm_btt_guid;
601         else if (claim_class == NVDIMM_CCLASS_BTT2)
602                 return &nvdimm_btt2_guid;
603         else if (claim_class == NVDIMM_CCLASS_PFN)
604                 return &nvdimm_pfn_guid;
605         else if (claim_class == NVDIMM_CCLASS_DAX)
606                 return &nvdimm_dax_guid;
607         else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
608                 /*
609                  * If we're modifying a namespace for which we don't
610                  * know the claim_class, don't touch the existing guid.
611                  */
612                 return target;
613         } else
614                 return &guid_null;
615 }
616
617 static int __pmem_label_update(struct nd_region *nd_region,
618                 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
619                 int pos)
620 {
621         struct nd_namespace_common *ndns = &nspm->nsio.common;
622         struct nd_interleave_set *nd_set = nd_region->nd_set;
623         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
624         struct nd_label_ent *label_ent, *victim = NULL;
625         struct nd_namespace_label *nd_label;
626         struct nd_namespace_index *nsindex;
627         struct nd_label_id label_id;
628         struct resource *res;
629         unsigned long *free;
630         u32 nslot, slot;
631         size_t offset;
632         u64 cookie;
633         int rc;
634
635         if (!preamble_next(ndd, &nsindex, &free, &nslot))
636                 return -ENXIO;
637
638         cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
639         nd_label_gen_id(&label_id, nspm->uuid, 0);
640         for_each_dpa_resource(ndd, res)
641                 if (strcmp(res->name, label_id.id) == 0)
642                         break;
643
644         if (!res) {
645                 WARN_ON_ONCE(1);
646                 return -ENXIO;
647         }
648
649         /* allocate and write the label to the staging (next) index */
650         slot = nd_label_alloc_slot(ndd);
651         if (slot == UINT_MAX)
652                 return -ENXIO;
653         dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
654
655         nd_label = to_label(ndd, slot);
656         memset(nd_label, 0, sizeof_namespace_label(ndd));
657         memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
658         if (nspm->alt_name)
659                 memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
660         nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
661         nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
662         nd_label->position = __cpu_to_le16(pos);
663         nd_label->isetcookie = __cpu_to_le64(cookie);
664         nd_label->rawsize = __cpu_to_le64(resource_size(res));
665         nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
666         nd_label->dpa = __cpu_to_le64(res->start);
667         nd_label->slot = __cpu_to_le32(slot);
668         if (namespace_label_has(ndd, type_guid))
669                 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
670         if (namespace_label_has(ndd, abstraction_guid))
671                 guid_copy(&nd_label->abstraction_guid,
672                                 to_abstraction_guid(ndns->claim_class,
673                                         &nd_label->abstraction_guid));
674         if (namespace_label_has(ndd, checksum)) {
675                 u64 sum;
676
677                 nd_label->checksum = __cpu_to_le64(0);
678                 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
679                 nd_label->checksum = __cpu_to_le64(sum);
680         }
681         nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__);
682
683         /* update label */
684         offset = nd_label_offset(ndd, nd_label);
685         rc = nvdimm_set_config_data(ndd, offset, nd_label,
686                         sizeof_namespace_label(ndd));
687         if (rc < 0)
688                 return rc;
689
690         /* Garbage collect the previous label */
691         mutex_lock(&nd_mapping->lock);
692         list_for_each_entry(label_ent, &nd_mapping->labels, list) {
693                 if (!label_ent->label)
694                         continue;
695                 if (memcmp(nspm->uuid, label_ent->label->uuid,
696                                         NSLABEL_UUID_LEN) != 0)
697                         continue;
698                 victim = label_ent;
699                 list_move_tail(&victim->list, &nd_mapping->labels);
700                 break;
701         }
702         if (victim) {
703                 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
704                 slot = to_slot(ndd, victim->label);
705                 nd_label_free_slot(ndd, slot);
706                 victim->label = NULL;
707         }
708
709         /* update index */
710         rc = nd_label_write_index(ndd, ndd->ns_next,
711                         nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
712         if (rc == 0) {
713                 list_for_each_entry(label_ent, &nd_mapping->labels, list)
714                         if (!label_ent->label) {
715                                 label_ent->label = nd_label;
716                                 nd_label = NULL;
717                                 break;
718                         }
719                 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
720                                 "failed to track label: %d\n",
721                                 to_slot(ndd, nd_label));
722                 if (nd_label)
723                         rc = -ENXIO;
724         }
725         mutex_unlock(&nd_mapping->lock);
726
727         return rc;
728 }
729
730 static bool is_old_resource(struct resource *res, struct resource **list, int n)
731 {
732         int i;
733
734         if (res->flags & DPA_RESOURCE_ADJUSTED)
735                 return false;
736         for (i = 0; i < n; i++)
737                 if (res == list[i])
738                         return true;
739         return false;
740 }
741
742 static struct resource *to_resource(struct nvdimm_drvdata *ndd,
743                 struct nd_namespace_label *nd_label)
744 {
745         struct resource *res;
746
747         for_each_dpa_resource(ndd, res) {
748                 if (res->start != __le64_to_cpu(nd_label->dpa))
749                         continue;
750                 if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
751                         continue;
752                 return res;
753         }
754
755         return NULL;
756 }
757
758 /*
759  * 1/ Account all the labels that can be freed after this update
760  * 2/ Allocate and write the label to the staging (next) index
761  * 3/ Record the resources in the namespace device
762  */
763 static int __blk_label_update(struct nd_region *nd_region,
764                 struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
765                 int num_labels)
766 {
767         int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
768         struct nd_interleave_set *nd_set = nd_region->nd_set;
769         struct nd_namespace_common *ndns = &nsblk->common;
770         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
771         struct nd_namespace_label *nd_label;
772         struct nd_label_ent *label_ent, *e;
773         struct nd_namespace_index *nsindex;
774         unsigned long *free, *victim_map = NULL;
775         struct resource *res, **old_res_list;
776         struct nd_label_id label_id;
777         u8 uuid[NSLABEL_UUID_LEN];
778         int min_dpa_idx = 0;
779         LIST_HEAD(list);
780         u32 nslot, slot;
781
782         if (!preamble_next(ndd, &nsindex, &free, &nslot))
783                 return -ENXIO;
784
785         old_res_list = nsblk->res;
786         nfree = nd_label_nfree(ndd);
787         old_num_resources = nsblk->num_resources;
788         nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
789
790         /*
791          * We need to loop over the old resources a few times, which seems a
792          * bit inefficient, but we need to know that we have the label
793          * space before we start mutating the tracking structures.
794          * Otherwise the recovery method of last resort for userspace is
795          * disable and re-enable the parent region.
796          */
797         alloc = 0;
798         for_each_dpa_resource(ndd, res) {
799                 if (strcmp(res->name, label_id.id) != 0)
800                         continue;
801                 if (!is_old_resource(res, old_res_list, old_num_resources))
802                         alloc++;
803         }
804
805         victims = 0;
806         if (old_num_resources) {
807                 /* convert old local-label-map to dimm-slot victim-map */
808                 victim_map = kcalloc(BITS_TO_LONGS(nslot), sizeof(long),
809                                 GFP_KERNEL);
810                 if (!victim_map)
811                         return -ENOMEM;
812
813                 /* mark unused labels for garbage collection */
814                 for_each_clear_bit_le(slot, free, nslot) {
815                         nd_label = to_label(ndd, slot);
816                         memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
817                         if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
818                                 continue;
819                         res = to_resource(ndd, nd_label);
820                         if (res && is_old_resource(res, old_res_list,
821                                                 old_num_resources))
822                                 continue;
823                         slot = to_slot(ndd, nd_label);
824                         set_bit(slot, victim_map);
825                         victims++;
826                 }
827         }
828
829         /* don't allow updates that consume the last label */
830         if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
831                 dev_info(&nsblk->common.dev, "insufficient label space\n");
832                 kfree(victim_map);
833                 return -ENOSPC;
834         }
835         /* from here on we need to abort on error */
836
837
838         /* assign all resources to the namespace before writing the labels */
839         nsblk->res = NULL;
840         nsblk->num_resources = 0;
841         for_each_dpa_resource(ndd, res) {
842                 if (strcmp(res->name, label_id.id) != 0)
843                         continue;
844                 if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
845                         rc = -ENOMEM;
846                         goto abort;
847                 }
848         }
849
850         /*
851          * Find the resource associated with the first label in the set
852          * per the v1.2 namespace specification.
853          */
854         for (i = 0; i < nsblk->num_resources; i++) {
855                 struct resource *min = nsblk->res[min_dpa_idx];
856
857                 res = nsblk->res[i];
858                 if (res->start < min->start)
859                         min_dpa_idx = i;
860         }
861
862         for (i = 0; i < nsblk->num_resources; i++) {
863                 size_t offset;
864
865                 res = nsblk->res[i];
866                 if (is_old_resource(res, old_res_list, old_num_resources))
867                         continue; /* carry-over */
868                 slot = nd_label_alloc_slot(ndd);
869                 if (slot == UINT_MAX)
870                         goto abort;
871                 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
872
873                 nd_label = to_label(ndd, slot);
874                 memset(nd_label, 0, sizeof_namespace_label(ndd));
875                 memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
876                 if (nsblk->alt_name)
877                         memcpy(nd_label->name, nsblk->alt_name,
878                                         NSLABEL_NAME_LEN);
879                 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
880
881                 /*
882                  * Use the presence of the type_guid as a flag to
883                  * determine isetcookie usage and nlabel + position
884                  * policy for blk-aperture namespaces.
885                  */
886                 if (namespace_label_has(ndd, type_guid)) {
887                         if (i == min_dpa_idx) {
888                                 nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
889                                 nd_label->position = __cpu_to_le16(0);
890                         } else {
891                                 nd_label->nlabel = __cpu_to_le16(0xffff);
892                                 nd_label->position = __cpu_to_le16(0xffff);
893                         }
894                         nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
895                 } else {
896                         nd_label->nlabel = __cpu_to_le16(0); /* N/A */
897                         nd_label->position = __cpu_to_le16(0); /* N/A */
898                         nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
899                 }
900
901                 nd_label->dpa = __cpu_to_le64(res->start);
902                 nd_label->rawsize = __cpu_to_le64(resource_size(res));
903                 nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
904                 nd_label->slot = __cpu_to_le32(slot);
905                 if (namespace_label_has(ndd, type_guid))
906                         guid_copy(&nd_label->type_guid, &nd_set->type_guid);
907                 if (namespace_label_has(ndd, abstraction_guid))
908                         guid_copy(&nd_label->abstraction_guid,
909                                         to_abstraction_guid(ndns->claim_class,
910                                                 &nd_label->abstraction_guid));
911
912                 if (namespace_label_has(ndd, checksum)) {
913                         u64 sum;
914
915                         nd_label->checksum = __cpu_to_le64(0);
916                         sum = nd_fletcher64(nd_label,
917                                         sizeof_namespace_label(ndd), 1);
918                         nd_label->checksum = __cpu_to_le64(sum);
919                 }
920
921                 /* update label */
922                 offset = nd_label_offset(ndd, nd_label);
923                 rc = nvdimm_set_config_data(ndd, offset, nd_label,
924                                 sizeof_namespace_label(ndd));
925                 if (rc < 0)
926                         goto abort;
927         }
928
929         /* free up now unused slots in the new index */
930         for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
931                 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
932                 nd_label_free_slot(ndd, slot);
933         }
934
935         /* update index */
936         rc = nd_label_write_index(ndd, ndd->ns_next,
937                         nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
938         if (rc)
939                 goto abort;
940
941         /*
942          * Now that the on-dimm labels are up to date, fix up the tracking
943          * entries in nd_mapping->labels
944          */
945         nlabel = 0;
946         mutex_lock(&nd_mapping->lock);
947         list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
948                 nd_label = label_ent->label;
949                 if (!nd_label)
950                         continue;
951                 nlabel++;
952                 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
953                 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
954                         continue;
955                 nlabel--;
956                 list_move(&label_ent->list, &list);
957                 label_ent->label = NULL;
958         }
959         list_splice_tail_init(&list, &nd_mapping->labels);
960         mutex_unlock(&nd_mapping->lock);
961
962         if (nlabel + nsblk->num_resources > num_labels) {
963                 /*
964                  * Bug, we can't end up with more resources than
965                  * available labels
966                  */
967                 WARN_ON_ONCE(1);
968                 rc = -ENXIO;
969                 goto out;
970         }
971
972         mutex_lock(&nd_mapping->lock);
973         label_ent = list_first_entry_or_null(&nd_mapping->labels,
974                         typeof(*label_ent), list);
975         if (!label_ent) {
976                 WARN_ON(1);
977                 mutex_unlock(&nd_mapping->lock);
978                 rc = -ENXIO;
979                 goto out;
980         }
981         for_each_clear_bit_le(slot, free, nslot) {
982                 nd_label = to_label(ndd, slot);
983                 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
984                 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
985                         continue;
986                 res = to_resource(ndd, nd_label);
987                 res->flags &= ~DPA_RESOURCE_ADJUSTED;
988                 dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
989                 list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
990                         if (label_ent->label)
991                                 continue;
992                         label_ent->label = nd_label;
993                         nd_label = NULL;
994                         break;
995                 }
996                 if (nd_label)
997                         dev_WARN(&nsblk->common.dev,
998                                         "failed to track label slot%d\n", slot);
999         }
1000         mutex_unlock(&nd_mapping->lock);
1001
1002  out:
1003         kfree(old_res_list);
1004         kfree(victim_map);
1005         return rc;
1006
1007  abort:
1008         /*
1009          * 1/ repair the allocated label bitmap in the index
1010          * 2/ restore the resource list
1011          */
1012         nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1013         kfree(nsblk->res);
1014         nsblk->res = old_res_list;
1015         nsblk->num_resources = old_num_resources;
1016         old_res_list = NULL;
1017         goto out;
1018 }
1019
1020 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1021 {
1022         int i, old_num_labels = 0;
1023         struct nd_label_ent *label_ent;
1024         struct nd_namespace_index *nsindex;
1025         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1026
1027         mutex_lock(&nd_mapping->lock);
1028         list_for_each_entry(label_ent, &nd_mapping->labels, list)
1029                 old_num_labels++;
1030         mutex_unlock(&nd_mapping->lock);
1031
1032         /*
1033          * We need to preserve all the old labels for the mapping so
1034          * they can be garbage collected after writing the new labels.
1035          */
1036         for (i = old_num_labels; i < num_labels; i++) {
1037                 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1038                 if (!label_ent)
1039                         return -ENOMEM;
1040                 mutex_lock(&nd_mapping->lock);
1041                 list_add_tail(&label_ent->list, &nd_mapping->labels);
1042                 mutex_unlock(&nd_mapping->lock);
1043         }
1044
1045         if (ndd->ns_current == -1 || ndd->ns_next == -1)
1046                 /* pass */;
1047         else
1048                 return max(num_labels, old_num_labels);
1049
1050         nsindex = to_namespace_index(ndd, 0);
1051         memset(nsindex, 0, ndd->nsarea.config_size);
1052         for (i = 0; i < 2; i++) {
1053                 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1054
1055                 if (rc)
1056                         return rc;
1057         }
1058         ndd->ns_next = 1;
1059         ndd->ns_current = 0;
1060
1061         return max(num_labels, old_num_labels);
1062 }
1063
1064 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1065 {
1066         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1067         struct nd_label_ent *label_ent, *e;
1068         struct nd_namespace_index *nsindex;
1069         u8 label_uuid[NSLABEL_UUID_LEN];
1070         unsigned long *free;
1071         LIST_HEAD(list);
1072         u32 nslot, slot;
1073         int active = 0;
1074
1075         if (!uuid)
1076                 return 0;
1077
1078         /* no index || no labels == nothing to delete */
1079         if (!preamble_next(ndd, &nsindex, &free, &nslot))
1080                 return 0;
1081
1082         mutex_lock(&nd_mapping->lock);
1083         list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1084                 struct nd_namespace_label *nd_label = label_ent->label;
1085
1086                 if (!nd_label)
1087                         continue;
1088                 active++;
1089                 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1090                 if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1091                         continue;
1092                 active--;
1093                 slot = to_slot(ndd, nd_label);
1094                 nd_label_free_slot(ndd, slot);
1095                 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
1096                 list_move_tail(&label_ent->list, &list);
1097                 label_ent->label = NULL;
1098         }
1099         list_splice_tail_init(&list, &nd_mapping->labels);
1100
1101         if (active == 0) {
1102                 nd_mapping_free_labels(nd_mapping);
1103                 dev_dbg(ndd->dev, "%s: no more active labels\n", __func__);
1104         }
1105         mutex_unlock(&nd_mapping->lock);
1106
1107         return nd_label_write_index(ndd, ndd->ns_next,
1108                         nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1109 }
1110
1111 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1112                 struct nd_namespace_pmem *nspm, resource_size_t size)
1113 {
1114         int i;
1115
1116         for (i = 0; i < nd_region->ndr_mappings; i++) {
1117                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1118                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1119                 struct resource *res;
1120                 int rc, count = 0;
1121
1122                 if (size == 0) {
1123                         rc = del_labels(nd_mapping, nspm->uuid);
1124                         if (rc)
1125                                 return rc;
1126                         continue;
1127                 }
1128
1129                 for_each_dpa_resource(ndd, res)
1130                         if (strncmp(res->name, "pmem", 4) == 0)
1131                                 count++;
1132                 WARN_ON_ONCE(!count);
1133
1134                 rc = init_labels(nd_mapping, count);
1135                 if (rc < 0)
1136                         return rc;
1137
1138                 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
1139                 if (rc)
1140                         return rc;
1141         }
1142
1143         return 0;
1144 }
1145
1146 int nd_blk_namespace_label_update(struct nd_region *nd_region,
1147                 struct nd_namespace_blk *nsblk, resource_size_t size)
1148 {
1149         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1150         struct resource *res;
1151         int count = 0;
1152
1153         if (size == 0)
1154                 return del_labels(nd_mapping, nsblk->uuid);
1155
1156         for_each_dpa_resource(to_ndd(nd_mapping), res)
1157                 count++;
1158
1159         count = init_labels(nd_mapping, count);
1160         if (count < 0)
1161                 return count;
1162
1163         return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1164 }
1165
1166 int __init nd_label_init(void)
1167 {
1168         WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1169         WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1170         WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1171         WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
1172
1173         return 0;
1174 }