nfit, address-range-scrub: introduce nfit_spa->ars_state
[linux-block.git] / drivers / acpi / nfit / core.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/acpi.h>
22 #include <linux/sort.h>
23 #include <linux/io.h>
24 #include <linux/nd.h>
25 #include <asm/cacheflush.h>
26 #include "nfit.h"
27
28 /*
29  * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
30  * irrelevant.
31  */
32 #include <linux/io-64-nonatomic-hi-lo.h>
33
34 static bool force_enable_dimms;
35 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
36 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
37
38 static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
39 module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
40 MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
41
42 /* after three payloads of overflow, it's dead jim */
43 static unsigned int scrub_overflow_abort = 3;
44 module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
45 MODULE_PARM_DESC(scrub_overflow_abort,
46                 "Number of times we overflow ARS results before abort");
47
48 static bool disable_vendor_specific;
49 module_param(disable_vendor_specific, bool, S_IRUGO);
50 MODULE_PARM_DESC(disable_vendor_specific,
51                 "Limit commands to the publicly specified set");
52
53 static unsigned long override_dsm_mask;
54 module_param(override_dsm_mask, ulong, S_IRUGO);
55 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
56
57 static int default_dsm_family = -1;
58 module_param(default_dsm_family, int, S_IRUGO);
59 MODULE_PARM_DESC(default_dsm_family,
60                 "Try this DSM type first when identifying NVDIMM family");
61
62 LIST_HEAD(acpi_descs);
63 DEFINE_MUTEX(acpi_desc_lock);
64
65 static struct workqueue_struct *nfit_wq;
66
67 struct nfit_table_prev {
68         struct list_head spas;
69         struct list_head memdevs;
70         struct list_head dcrs;
71         struct list_head bdws;
72         struct list_head idts;
73         struct list_head flushes;
74 };
75
76 static guid_t nfit_uuid[NFIT_UUID_MAX];
77
78 const guid_t *to_nfit_uuid(enum nfit_uuids id)
79 {
80         return &nfit_uuid[id];
81 }
82 EXPORT_SYMBOL(to_nfit_uuid);
83
84 static struct acpi_nfit_desc *to_acpi_nfit_desc(
85                 struct nvdimm_bus_descriptor *nd_desc)
86 {
87         return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
88 }
89
90 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
91 {
92         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
93
94         /*
95          * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
96          * acpi_device.
97          */
98         if (!nd_desc->provider_name
99                         || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
100                 return NULL;
101
102         return to_acpi_device(acpi_desc->dev);
103 }
104
105 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
106 {
107         struct nd_cmd_clear_error *clear_err;
108         struct nd_cmd_ars_status *ars_status;
109         u16 flags;
110
111         switch (cmd) {
112         case ND_CMD_ARS_CAP:
113                 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
114                         return -ENOTTY;
115
116                 /* Command failed */
117                 if (status & 0xffff)
118                         return -EIO;
119
120                 /* No supported scan types for this range */
121                 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
122                 if ((status >> 16 & flags) == 0)
123                         return -ENOTTY;
124                 return 0;
125         case ND_CMD_ARS_START:
126                 /* ARS is in progress */
127                 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
128                         return -EBUSY;
129
130                 /* Command failed */
131                 if (status & 0xffff)
132                         return -EIO;
133                 return 0;
134         case ND_CMD_ARS_STATUS:
135                 ars_status = buf;
136                 /* Command failed */
137                 if (status & 0xffff)
138                         return -EIO;
139                 /* Check extended status (Upper two bytes) */
140                 if (status == NFIT_ARS_STATUS_DONE)
141                         return 0;
142
143                 /* ARS is in progress */
144                 if (status == NFIT_ARS_STATUS_BUSY)
145                         return -EBUSY;
146
147                 /* No ARS performed for the current boot */
148                 if (status == NFIT_ARS_STATUS_NONE)
149                         return -EAGAIN;
150
151                 /*
152                  * ARS interrupted, either we overflowed or some other
153                  * agent wants the scan to stop.  If we didn't overflow
154                  * then just continue with the returned results.
155                  */
156                 if (status == NFIT_ARS_STATUS_INTR) {
157                         if (ars_status->out_length >= 40 && (ars_status->flags
158                                                 & NFIT_ARS_F_OVERFLOW))
159                                 return -ENOSPC;
160                         return 0;
161                 }
162
163                 /* Unknown status */
164                 if (status >> 16)
165                         return -EIO;
166                 return 0;
167         case ND_CMD_CLEAR_ERROR:
168                 clear_err = buf;
169                 if (status & 0xffff)
170                         return -EIO;
171                 if (!clear_err->cleared)
172                         return -EIO;
173                 if (clear_err->length > clear_err->cleared)
174                         return clear_err->cleared;
175                 return 0;
176         default:
177                 break;
178         }
179
180         /* all other non-zero status results in an error */
181         if (status)
182                 return -EIO;
183         return 0;
184 }
185
186 #define ACPI_LABELS_LOCKED 3
187
188 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
189                 u32 status)
190 {
191         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
192
193         switch (cmd) {
194         case ND_CMD_GET_CONFIG_SIZE:
195                 /*
196                  * In the _LSI, _LSR, _LSW case the locked status is
197                  * communicated via the read/write commands
198                  */
199                 if (nfit_mem->has_lsr)
200                         break;
201
202                 if (status >> 16 & ND_CONFIG_LOCKED)
203                         return -EACCES;
204                 break;
205         case ND_CMD_GET_CONFIG_DATA:
206                 if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED)
207                         return -EACCES;
208                 break;
209         case ND_CMD_SET_CONFIG_DATA:
210                 if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED)
211                         return -EACCES;
212                 break;
213         default:
214                 break;
215         }
216
217         /* all other non-zero status results in an error */
218         if (status)
219                 return -EIO;
220         return 0;
221 }
222
223 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
224                 u32 status)
225 {
226         if (!nvdimm)
227                 return xlat_bus_status(buf, cmd, status);
228         return xlat_nvdimm_status(nvdimm, buf, cmd, status);
229 }
230
231 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
232 static union acpi_object *pkg_to_buf(union acpi_object *pkg)
233 {
234         int i;
235         void *dst;
236         size_t size = 0;
237         union acpi_object *buf = NULL;
238
239         if (pkg->type != ACPI_TYPE_PACKAGE) {
240                 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
241                                 pkg->type);
242                 goto err;
243         }
244
245         for (i = 0; i < pkg->package.count; i++) {
246                 union acpi_object *obj = &pkg->package.elements[i];
247
248                 if (obj->type == ACPI_TYPE_INTEGER)
249                         size += 4;
250                 else if (obj->type == ACPI_TYPE_BUFFER)
251                         size += obj->buffer.length;
252                 else {
253                         WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
254                                         obj->type);
255                         goto err;
256                 }
257         }
258
259         buf = ACPI_ALLOCATE(sizeof(*buf) + size);
260         if (!buf)
261                 goto err;
262
263         dst = buf + 1;
264         buf->type = ACPI_TYPE_BUFFER;
265         buf->buffer.length = size;
266         buf->buffer.pointer = dst;
267         for (i = 0; i < pkg->package.count; i++) {
268                 union acpi_object *obj = &pkg->package.elements[i];
269
270                 if (obj->type == ACPI_TYPE_INTEGER) {
271                         memcpy(dst, &obj->integer.value, 4);
272                         dst += 4;
273                 } else if (obj->type == ACPI_TYPE_BUFFER) {
274                         memcpy(dst, obj->buffer.pointer, obj->buffer.length);
275                         dst += obj->buffer.length;
276                 }
277         }
278 err:
279         ACPI_FREE(pkg);
280         return buf;
281 }
282
283 static union acpi_object *int_to_buf(union acpi_object *integer)
284 {
285         union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
286         void *dst = NULL;
287
288         if (!buf)
289                 goto err;
290
291         if (integer->type != ACPI_TYPE_INTEGER) {
292                 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
293                                 integer->type);
294                 goto err;
295         }
296
297         dst = buf + 1;
298         buf->type = ACPI_TYPE_BUFFER;
299         buf->buffer.length = 4;
300         buf->buffer.pointer = dst;
301         memcpy(dst, &integer->integer.value, 4);
302 err:
303         ACPI_FREE(integer);
304         return buf;
305 }
306
307 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
308                 u32 len, void *data)
309 {
310         acpi_status rc;
311         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
312         struct acpi_object_list input = {
313                 .count = 3,
314                 .pointer = (union acpi_object []) {
315                         [0] = {
316                                 .integer.type = ACPI_TYPE_INTEGER,
317                                 .integer.value = offset,
318                         },
319                         [1] = {
320                                 .integer.type = ACPI_TYPE_INTEGER,
321                                 .integer.value = len,
322                         },
323                         [2] = {
324                                 .buffer.type = ACPI_TYPE_BUFFER,
325                                 .buffer.pointer = data,
326                                 .buffer.length = len,
327                         },
328                 },
329         };
330
331         rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
332         if (ACPI_FAILURE(rc))
333                 return NULL;
334         return int_to_buf(buf.pointer);
335 }
336
337 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
338                 u32 len)
339 {
340         acpi_status rc;
341         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
342         struct acpi_object_list input = {
343                 .count = 2,
344                 .pointer = (union acpi_object []) {
345                         [0] = {
346                                 .integer.type = ACPI_TYPE_INTEGER,
347                                 .integer.value = offset,
348                         },
349                         [1] = {
350                                 .integer.type = ACPI_TYPE_INTEGER,
351                                 .integer.value = len,
352                         },
353                 },
354         };
355
356         rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
357         if (ACPI_FAILURE(rc))
358                 return NULL;
359         return pkg_to_buf(buf.pointer);
360 }
361
362 static union acpi_object *acpi_label_info(acpi_handle handle)
363 {
364         acpi_status rc;
365         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
366
367         rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
368         if (ACPI_FAILURE(rc))
369                 return NULL;
370         return pkg_to_buf(buf.pointer);
371 }
372
373 static u8 nfit_dsm_revid(unsigned family, unsigned func)
374 {
375         static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
376                 [NVDIMM_FAMILY_INTEL] = {
377                         [NVDIMM_INTEL_GET_MODES] = 2,
378                         [NVDIMM_INTEL_GET_FWINFO] = 2,
379                         [NVDIMM_INTEL_START_FWUPDATE] = 2,
380                         [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
381                         [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
382                         [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
383                         [NVDIMM_INTEL_SET_THRESHOLD] = 2,
384                         [NVDIMM_INTEL_INJECT_ERROR] = 2,
385                 },
386         };
387         u8 id;
388
389         if (family > NVDIMM_FAMILY_MAX)
390                 return 0;
391         if (func > 31)
392                 return 0;
393         id = revid_table[family][func];
394         if (id == 0)
395                 return 1; /* default */
396         return id;
397 }
398
399 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
400                 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
401 {
402         struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
403         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
404         union acpi_object in_obj, in_buf, *out_obj;
405         const struct nd_cmd_desc *desc = NULL;
406         struct device *dev = acpi_desc->dev;
407         struct nd_cmd_pkg *call_pkg = NULL;
408         const char *cmd_name, *dimm_name;
409         unsigned long cmd_mask, dsm_mask;
410         u32 offset, fw_status = 0;
411         acpi_handle handle;
412         unsigned int func;
413         const guid_t *guid;
414         int rc, i;
415
416         func = cmd;
417         if (cmd == ND_CMD_CALL) {
418                 call_pkg = buf;
419                 func = call_pkg->nd_command;
420
421                 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
422                         if (call_pkg->nd_reserved2[i])
423                                 return -EINVAL;
424         }
425
426         if (nvdimm) {
427                 struct acpi_device *adev = nfit_mem->adev;
428
429                 if (!adev)
430                         return -ENOTTY;
431                 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
432                         return -ENOTTY;
433
434                 dimm_name = nvdimm_name(nvdimm);
435                 cmd_name = nvdimm_cmd_name(cmd);
436                 cmd_mask = nvdimm_cmd_mask(nvdimm);
437                 dsm_mask = nfit_mem->dsm_mask;
438                 desc = nd_cmd_dimm_desc(cmd);
439                 guid = to_nfit_uuid(nfit_mem->family);
440                 handle = adev->handle;
441         } else {
442                 struct acpi_device *adev = to_acpi_dev(acpi_desc);
443
444                 cmd_name = nvdimm_bus_cmd_name(cmd);
445                 cmd_mask = nd_desc->cmd_mask;
446                 dsm_mask = cmd_mask;
447                 if (cmd == ND_CMD_CALL)
448                         dsm_mask = nd_desc->bus_dsm_mask;
449                 desc = nd_cmd_bus_desc(cmd);
450                 guid = to_nfit_uuid(NFIT_DEV_BUS);
451                 handle = adev->handle;
452                 dimm_name = "bus";
453         }
454
455         if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
456                 return -ENOTTY;
457
458         if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
459                 return -ENOTTY;
460
461         in_obj.type = ACPI_TYPE_PACKAGE;
462         in_obj.package.count = 1;
463         in_obj.package.elements = &in_buf;
464         in_buf.type = ACPI_TYPE_BUFFER;
465         in_buf.buffer.pointer = buf;
466         in_buf.buffer.length = 0;
467
468         /* libnvdimm has already validated the input envelope */
469         for (i = 0; i < desc->in_num; i++)
470                 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
471                                 i, buf);
472
473         if (call_pkg) {
474                 /* skip over package wrapper */
475                 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
476                 in_buf.buffer.length = call_pkg->nd_size_in;
477         }
478
479         dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n",
480                 dimm_name, cmd, func, in_buf.buffer.length);
481         print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 4, 4,
482                         in_buf.buffer.pointer,
483                         min_t(u32, 256, in_buf.buffer.length), true);
484
485         /* call the BIOS, prefer the named methods over _DSM if available */
486         if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr)
487                 out_obj = acpi_label_info(handle);
488         else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
489                 struct nd_cmd_get_config_data_hdr *p = buf;
490
491                 out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
492         } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
493                         && nfit_mem->has_lsw) {
494                 struct nd_cmd_set_config_hdr *p = buf;
495
496                 out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
497                                 p->in_buf);
498         } else {
499                 u8 revid;
500
501                 if (nvdimm)
502                         revid = nfit_dsm_revid(nfit_mem->family, func);
503                 else
504                         revid = 1;
505                 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
506         }
507
508         if (!out_obj) {
509                 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name);
510                 return -EINVAL;
511         }
512
513         if (call_pkg) {
514                 call_pkg->nd_fw_size = out_obj->buffer.length;
515                 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
516                         out_obj->buffer.pointer,
517                         min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
518
519                 ACPI_FREE(out_obj);
520                 /*
521                  * Need to support FW function w/o known size in advance.
522                  * Caller can determine required size based upon nd_fw_size.
523                  * If we return an error (like elsewhere) then caller wouldn't
524                  * be able to rely upon data returned to make calculation.
525                  */
526                 return 0;
527         }
528
529         if (out_obj->package.type != ACPI_TYPE_BUFFER) {
530                 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
531                                 dimm_name, cmd_name, out_obj->type);
532                 rc = -EINVAL;
533                 goto out;
534         }
535
536         dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
537                         cmd_name, out_obj->buffer.length);
538         print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
539                         out_obj->buffer.pointer,
540                         min_t(u32, 128, out_obj->buffer.length), true);
541
542         for (i = 0, offset = 0; i < desc->out_num; i++) {
543                 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
544                                 (u32 *) out_obj->buffer.pointer,
545                                 out_obj->buffer.length - offset);
546
547                 if (offset + out_size > out_obj->buffer.length) {
548                         dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n",
549                                         dimm_name, cmd_name, i);
550                         break;
551                 }
552
553                 if (in_buf.buffer.length + offset + out_size > buf_len) {
554                         dev_dbg(dev, "%s output overrun cmd: %s field: %d\n",
555                                         dimm_name, cmd_name, i);
556                         rc = -ENXIO;
557                         goto out;
558                 }
559                 memcpy(buf + in_buf.buffer.length + offset,
560                                 out_obj->buffer.pointer + offset, out_size);
561                 offset += out_size;
562         }
563
564         /*
565          * Set fw_status for all the commands with a known format to be
566          * later interpreted by xlat_status().
567          */
568         if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
569                                         && cmd <= ND_CMD_CLEAR_ERROR)
570                                 || (nvdimm && cmd >= ND_CMD_SMART
571                                         && cmd <= ND_CMD_VENDOR)))
572                 fw_status = *(u32 *) out_obj->buffer.pointer;
573
574         if (offset + in_buf.buffer.length < buf_len) {
575                 if (i >= 1) {
576                         /*
577                          * status valid, return the number of bytes left
578                          * unfilled in the output buffer
579                          */
580                         rc = buf_len - offset - in_buf.buffer.length;
581                         if (cmd_rc)
582                                 *cmd_rc = xlat_status(nvdimm, buf, cmd,
583                                                 fw_status);
584                 } else {
585                         dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
586                                         __func__, dimm_name, cmd_name, buf_len,
587                                         offset);
588                         rc = -ENXIO;
589                 }
590         } else {
591                 rc = 0;
592                 if (cmd_rc)
593                         *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
594         }
595
596  out:
597         ACPI_FREE(out_obj);
598
599         return rc;
600 }
601 EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
602
603 static const char *spa_type_name(u16 type)
604 {
605         static const char *to_name[] = {
606                 [NFIT_SPA_VOLATILE] = "volatile",
607                 [NFIT_SPA_PM] = "pmem",
608                 [NFIT_SPA_DCR] = "dimm-control-region",
609                 [NFIT_SPA_BDW] = "block-data-window",
610                 [NFIT_SPA_VDISK] = "volatile-disk",
611                 [NFIT_SPA_VCD] = "volatile-cd",
612                 [NFIT_SPA_PDISK] = "persistent-disk",
613                 [NFIT_SPA_PCD] = "persistent-cd",
614
615         };
616
617         if (type > NFIT_SPA_PCD)
618                 return "unknown";
619
620         return to_name[type];
621 }
622
623 int nfit_spa_type(struct acpi_nfit_system_address *spa)
624 {
625         int i;
626
627         for (i = 0; i < NFIT_UUID_MAX; i++)
628                 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
629                         return i;
630         return -1;
631 }
632
633 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
634                 struct nfit_table_prev *prev,
635                 struct acpi_nfit_system_address *spa)
636 {
637         struct device *dev = acpi_desc->dev;
638         struct nfit_spa *nfit_spa;
639
640         if (spa->header.length != sizeof(*spa))
641                 return false;
642
643         list_for_each_entry(nfit_spa, &prev->spas, list) {
644                 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
645                         list_move_tail(&nfit_spa->list, &acpi_desc->spas);
646                         return true;
647                 }
648         }
649
650         nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
651                         GFP_KERNEL);
652         if (!nfit_spa)
653                 return false;
654         INIT_LIST_HEAD(&nfit_spa->list);
655         memcpy(nfit_spa->spa, spa, sizeof(*spa));
656         list_add_tail(&nfit_spa->list, &acpi_desc->spas);
657         dev_dbg(dev, "spa index: %d type: %s\n",
658                         spa->range_index,
659                         spa_type_name(nfit_spa_type(spa)));
660         return true;
661 }
662
663 static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
664                 struct nfit_table_prev *prev,
665                 struct acpi_nfit_memory_map *memdev)
666 {
667         struct device *dev = acpi_desc->dev;
668         struct nfit_memdev *nfit_memdev;
669
670         if (memdev->header.length != sizeof(*memdev))
671                 return false;
672
673         list_for_each_entry(nfit_memdev, &prev->memdevs, list)
674                 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
675                         list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
676                         return true;
677                 }
678
679         nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
680                         GFP_KERNEL);
681         if (!nfit_memdev)
682                 return false;
683         INIT_LIST_HEAD(&nfit_memdev->list);
684         memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
685         list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
686         dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
687                         memdev->device_handle, memdev->range_index,
688                         memdev->region_index, memdev->flags);
689         return true;
690 }
691
692 /*
693  * An implementation may provide a truncated control region if no block windows
694  * are defined.
695  */
696 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
697 {
698         if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
699                                 window_size))
700                 return 0;
701         if (dcr->windows)
702                 return sizeof(*dcr);
703         return offsetof(struct acpi_nfit_control_region, window_size);
704 }
705
706 static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
707                 struct nfit_table_prev *prev,
708                 struct acpi_nfit_control_region *dcr)
709 {
710         struct device *dev = acpi_desc->dev;
711         struct nfit_dcr *nfit_dcr;
712
713         if (!sizeof_dcr(dcr))
714                 return false;
715
716         list_for_each_entry(nfit_dcr, &prev->dcrs, list)
717                 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
718                         list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
719                         return true;
720                 }
721
722         nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
723                         GFP_KERNEL);
724         if (!nfit_dcr)
725                 return false;
726         INIT_LIST_HEAD(&nfit_dcr->list);
727         memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
728         list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
729         dev_dbg(dev, "dcr index: %d windows: %d\n",
730                         dcr->region_index, dcr->windows);
731         return true;
732 }
733
734 static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
735                 struct nfit_table_prev *prev,
736                 struct acpi_nfit_data_region *bdw)
737 {
738         struct device *dev = acpi_desc->dev;
739         struct nfit_bdw *nfit_bdw;
740
741         if (bdw->header.length != sizeof(*bdw))
742                 return false;
743         list_for_each_entry(nfit_bdw, &prev->bdws, list)
744                 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
745                         list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
746                         return true;
747                 }
748
749         nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
750                         GFP_KERNEL);
751         if (!nfit_bdw)
752                 return false;
753         INIT_LIST_HEAD(&nfit_bdw->list);
754         memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
755         list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
756         dev_dbg(dev, "bdw dcr: %d windows: %d\n",
757                         bdw->region_index, bdw->windows);
758         return true;
759 }
760
761 static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
762 {
763         if (idt->header.length < sizeof(*idt))
764                 return 0;
765         return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
766 }
767
768 static bool add_idt(struct acpi_nfit_desc *acpi_desc,
769                 struct nfit_table_prev *prev,
770                 struct acpi_nfit_interleave *idt)
771 {
772         struct device *dev = acpi_desc->dev;
773         struct nfit_idt *nfit_idt;
774
775         if (!sizeof_idt(idt))
776                 return false;
777
778         list_for_each_entry(nfit_idt, &prev->idts, list) {
779                 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
780                         continue;
781
782                 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
783                         list_move_tail(&nfit_idt->list, &acpi_desc->idts);
784                         return true;
785                 }
786         }
787
788         nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
789                         GFP_KERNEL);
790         if (!nfit_idt)
791                 return false;
792         INIT_LIST_HEAD(&nfit_idt->list);
793         memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
794         list_add_tail(&nfit_idt->list, &acpi_desc->idts);
795         dev_dbg(dev, "idt index: %d num_lines: %d\n",
796                         idt->interleave_index, idt->line_count);
797         return true;
798 }
799
800 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
801 {
802         if (flush->header.length < sizeof(*flush))
803                 return 0;
804         return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
805 }
806
807 static bool add_flush(struct acpi_nfit_desc *acpi_desc,
808                 struct nfit_table_prev *prev,
809                 struct acpi_nfit_flush_address *flush)
810 {
811         struct device *dev = acpi_desc->dev;
812         struct nfit_flush *nfit_flush;
813
814         if (!sizeof_flush(flush))
815                 return false;
816
817         list_for_each_entry(nfit_flush, &prev->flushes, list) {
818                 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
819                         continue;
820
821                 if (memcmp(nfit_flush->flush, flush,
822                                         sizeof_flush(flush)) == 0) {
823                         list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
824                         return true;
825                 }
826         }
827
828         nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
829                         + sizeof_flush(flush), GFP_KERNEL);
830         if (!nfit_flush)
831                 return false;
832         INIT_LIST_HEAD(&nfit_flush->list);
833         memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
834         list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
835         dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n",
836                         flush->device_handle, flush->hint_count);
837         return true;
838 }
839
840 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
841                 struct acpi_nfit_capabilities *pcap)
842 {
843         struct device *dev = acpi_desc->dev;
844         u32 mask;
845
846         mask = (1 << (pcap->highest_capability + 1)) - 1;
847         acpi_desc->platform_cap = pcap->capabilities & mask;
848         dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap);
849         return true;
850 }
851
852 static void *add_table(struct acpi_nfit_desc *acpi_desc,
853                 struct nfit_table_prev *prev, void *table, const void *end)
854 {
855         struct device *dev = acpi_desc->dev;
856         struct acpi_nfit_header *hdr;
857         void *err = ERR_PTR(-ENOMEM);
858
859         if (table >= end)
860                 return NULL;
861
862         hdr = table;
863         if (!hdr->length) {
864                 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
865                         hdr->type);
866                 return NULL;
867         }
868
869         switch (hdr->type) {
870         case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
871                 if (!add_spa(acpi_desc, prev, table))
872                         return err;
873                 break;
874         case ACPI_NFIT_TYPE_MEMORY_MAP:
875                 if (!add_memdev(acpi_desc, prev, table))
876                         return err;
877                 break;
878         case ACPI_NFIT_TYPE_CONTROL_REGION:
879                 if (!add_dcr(acpi_desc, prev, table))
880                         return err;
881                 break;
882         case ACPI_NFIT_TYPE_DATA_REGION:
883                 if (!add_bdw(acpi_desc, prev, table))
884                         return err;
885                 break;
886         case ACPI_NFIT_TYPE_INTERLEAVE:
887                 if (!add_idt(acpi_desc, prev, table))
888                         return err;
889                 break;
890         case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
891                 if (!add_flush(acpi_desc, prev, table))
892                         return err;
893                 break;
894         case ACPI_NFIT_TYPE_SMBIOS:
895                 dev_dbg(dev, "smbios\n");
896                 break;
897         case ACPI_NFIT_TYPE_CAPABILITIES:
898                 if (!add_platform_cap(acpi_desc, table))
899                         return err;
900                 break;
901         default:
902                 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
903                 break;
904         }
905
906         return table + hdr->length;
907 }
908
909 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
910                 struct nfit_mem *nfit_mem)
911 {
912         u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
913         u16 dcr = nfit_mem->dcr->region_index;
914         struct nfit_spa *nfit_spa;
915
916         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
917                 u16 range_index = nfit_spa->spa->range_index;
918                 int type = nfit_spa_type(nfit_spa->spa);
919                 struct nfit_memdev *nfit_memdev;
920
921                 if (type != NFIT_SPA_BDW)
922                         continue;
923
924                 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
925                         if (nfit_memdev->memdev->range_index != range_index)
926                                 continue;
927                         if (nfit_memdev->memdev->device_handle != device_handle)
928                                 continue;
929                         if (nfit_memdev->memdev->region_index != dcr)
930                                 continue;
931
932                         nfit_mem->spa_bdw = nfit_spa->spa;
933                         return;
934                 }
935         }
936
937         dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
938                         nfit_mem->spa_dcr->range_index);
939         nfit_mem->bdw = NULL;
940 }
941
942 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
943                 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
944 {
945         u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
946         struct nfit_memdev *nfit_memdev;
947         struct nfit_bdw *nfit_bdw;
948         struct nfit_idt *nfit_idt;
949         u16 idt_idx, range_index;
950
951         list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
952                 if (nfit_bdw->bdw->region_index != dcr)
953                         continue;
954                 nfit_mem->bdw = nfit_bdw->bdw;
955                 break;
956         }
957
958         if (!nfit_mem->bdw)
959                 return;
960
961         nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
962
963         if (!nfit_mem->spa_bdw)
964                 return;
965
966         range_index = nfit_mem->spa_bdw->range_index;
967         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
968                 if (nfit_memdev->memdev->range_index != range_index ||
969                                 nfit_memdev->memdev->region_index != dcr)
970                         continue;
971                 nfit_mem->memdev_bdw = nfit_memdev->memdev;
972                 idt_idx = nfit_memdev->memdev->interleave_index;
973                 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
974                         if (nfit_idt->idt->interleave_index != idt_idx)
975                                 continue;
976                         nfit_mem->idt_bdw = nfit_idt->idt;
977                         break;
978                 }
979                 break;
980         }
981 }
982
983 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
984                 struct acpi_nfit_system_address *spa)
985 {
986         struct nfit_mem *nfit_mem, *found;
987         struct nfit_memdev *nfit_memdev;
988         int type = spa ? nfit_spa_type(spa) : 0;
989
990         switch (type) {
991         case NFIT_SPA_DCR:
992         case NFIT_SPA_PM:
993                 break;
994         default:
995                 if (spa)
996                         return 0;
997         }
998
999         /*
1000          * This loop runs in two modes, when a dimm is mapped the loop
1001          * adds memdev associations to an existing dimm, or creates a
1002          * dimm. In the unmapped dimm case this loop sweeps for memdev
1003          * instances with an invalid / zero range_index and adds those
1004          * dimms without spa associations.
1005          */
1006         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1007                 struct nfit_flush *nfit_flush;
1008                 struct nfit_dcr *nfit_dcr;
1009                 u32 device_handle;
1010                 u16 dcr;
1011
1012                 if (spa && nfit_memdev->memdev->range_index != spa->range_index)
1013                         continue;
1014                 if (!spa && nfit_memdev->memdev->range_index)
1015                         continue;
1016                 found = NULL;
1017                 dcr = nfit_memdev->memdev->region_index;
1018                 device_handle = nfit_memdev->memdev->device_handle;
1019                 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1020                         if (__to_nfit_memdev(nfit_mem)->device_handle
1021                                         == device_handle) {
1022                                 found = nfit_mem;
1023                                 break;
1024                         }
1025
1026                 if (found)
1027                         nfit_mem = found;
1028                 else {
1029                         nfit_mem = devm_kzalloc(acpi_desc->dev,
1030                                         sizeof(*nfit_mem), GFP_KERNEL);
1031                         if (!nfit_mem)
1032                                 return -ENOMEM;
1033                         INIT_LIST_HEAD(&nfit_mem->list);
1034                         nfit_mem->acpi_desc = acpi_desc;
1035                         list_add(&nfit_mem->list, &acpi_desc->dimms);
1036                 }
1037
1038                 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1039                         if (nfit_dcr->dcr->region_index != dcr)
1040                                 continue;
1041                         /*
1042                          * Record the control region for the dimm.  For
1043                          * the ACPI 6.1 case, where there are separate
1044                          * control regions for the pmem vs blk
1045                          * interfaces, be sure to record the extended
1046                          * blk details.
1047                          */
1048                         if (!nfit_mem->dcr)
1049                                 nfit_mem->dcr = nfit_dcr->dcr;
1050                         else if (nfit_mem->dcr->windows == 0
1051                                         && nfit_dcr->dcr->windows)
1052                                 nfit_mem->dcr = nfit_dcr->dcr;
1053                         break;
1054                 }
1055
1056                 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
1057                         struct acpi_nfit_flush_address *flush;
1058                         u16 i;
1059
1060                         if (nfit_flush->flush->device_handle != device_handle)
1061                                 continue;
1062                         nfit_mem->nfit_flush = nfit_flush;
1063                         flush = nfit_flush->flush;
1064                         nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
1065                                         flush->hint_count
1066                                         * sizeof(struct resource), GFP_KERNEL);
1067                         if (!nfit_mem->flush_wpq)
1068                                 return -ENOMEM;
1069                         for (i = 0; i < flush->hint_count; i++) {
1070                                 struct resource *res = &nfit_mem->flush_wpq[i];
1071
1072                                 res->start = flush->hint_address[i];
1073                                 res->end = res->start + 8 - 1;
1074                         }
1075                         break;
1076                 }
1077
1078                 if (dcr && !nfit_mem->dcr) {
1079                         dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1080                                         spa->range_index, dcr);
1081                         return -ENODEV;
1082                 }
1083
1084                 if (type == NFIT_SPA_DCR) {
1085                         struct nfit_idt *nfit_idt;
1086                         u16 idt_idx;
1087
1088                         /* multiple dimms may share a SPA when interleaved */
1089                         nfit_mem->spa_dcr = spa;
1090                         nfit_mem->memdev_dcr = nfit_memdev->memdev;
1091                         idt_idx = nfit_memdev->memdev->interleave_index;
1092                         list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1093                                 if (nfit_idt->idt->interleave_index != idt_idx)
1094                                         continue;
1095                                 nfit_mem->idt_dcr = nfit_idt->idt;
1096                                 break;
1097                         }
1098                         nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1099                 } else if (type == NFIT_SPA_PM) {
1100                         /*
1101                          * A single dimm may belong to multiple SPA-PM
1102                          * ranges, record at least one in addition to
1103                          * any SPA-DCR range.
1104                          */
1105                         nfit_mem->memdev_pmem = nfit_memdev->memdev;
1106                 } else
1107                         nfit_mem->memdev_dcr = nfit_memdev->memdev;
1108         }
1109
1110         return 0;
1111 }
1112
1113 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
1114 {
1115         struct nfit_mem *a = container_of(_a, typeof(*a), list);
1116         struct nfit_mem *b = container_of(_b, typeof(*b), list);
1117         u32 handleA, handleB;
1118
1119         handleA = __to_nfit_memdev(a)->device_handle;
1120         handleB = __to_nfit_memdev(b)->device_handle;
1121         if (handleA < handleB)
1122                 return -1;
1123         else if (handleA > handleB)
1124                 return 1;
1125         return 0;
1126 }
1127
1128 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
1129 {
1130         struct nfit_spa *nfit_spa;
1131         int rc;
1132
1133
1134         /*
1135          * For each SPA-DCR or SPA-PMEM address range find its
1136          * corresponding MEMDEV(s).  From each MEMDEV find the
1137          * corresponding DCR.  Then, if we're operating on a SPA-DCR,
1138          * try to find a SPA-BDW and a corresponding BDW that references
1139          * the DCR.  Throw it all into an nfit_mem object.  Note, that
1140          * BDWs are optional.
1141          */
1142         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1143                 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
1144                 if (rc)
1145                         return rc;
1146         }
1147
1148         /*
1149          * If a DIMM has failed to be mapped into SPA there will be no
1150          * SPA entries above. Find and register all the unmapped DIMMs
1151          * for reporting and recovery purposes.
1152          */
1153         rc = __nfit_mem_init(acpi_desc, NULL);
1154         if (rc)
1155                 return rc;
1156
1157         list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
1158
1159         return 0;
1160 }
1161
1162 static ssize_t bus_dsm_mask_show(struct device *dev,
1163                 struct device_attribute *attr, char *buf)
1164 {
1165         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1166         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1167
1168         return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
1169 }
1170 static struct device_attribute dev_attr_bus_dsm_mask =
1171                 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
1172
1173 static ssize_t revision_show(struct device *dev,
1174                 struct device_attribute *attr, char *buf)
1175 {
1176         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1177         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1178         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1179
1180         return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
1181 }
1182 static DEVICE_ATTR_RO(revision);
1183
1184 static ssize_t hw_error_scrub_show(struct device *dev,
1185                 struct device_attribute *attr, char *buf)
1186 {
1187         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1188         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1189         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1190
1191         return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
1192 }
1193
1194 /*
1195  * The 'hw_error_scrub' attribute can have the following values written to it:
1196  * '0': Switch to the default mode where an exception will only insert
1197  *      the address of the memory error into the poison and badblocks lists.
1198  * '1': Enable a full scrub to happen if an exception for a memory error is
1199  *      received.
1200  */
1201 static ssize_t hw_error_scrub_store(struct device *dev,
1202                 struct device_attribute *attr, const char *buf, size_t size)
1203 {
1204         struct nvdimm_bus_descriptor *nd_desc;
1205         ssize_t rc;
1206         long val;
1207
1208         rc = kstrtol(buf, 0, &val);
1209         if (rc)
1210                 return rc;
1211
1212         device_lock(dev);
1213         nd_desc = dev_get_drvdata(dev);
1214         if (nd_desc) {
1215                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1216
1217                 switch (val) {
1218                 case HW_ERROR_SCRUB_ON:
1219                         acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
1220                         break;
1221                 case HW_ERROR_SCRUB_OFF:
1222                         acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
1223                         break;
1224                 default:
1225                         rc = -EINVAL;
1226                         break;
1227                 }
1228         }
1229         device_unlock(dev);
1230         if (rc)
1231                 return rc;
1232         return size;
1233 }
1234 static DEVICE_ATTR_RW(hw_error_scrub);
1235
1236 /*
1237  * This shows the number of full Address Range Scrubs that have been
1238  * completed since driver load time. Userspace can wait on this using
1239  * select/poll etc. A '+' at the end indicates an ARS is in progress
1240  */
1241 static ssize_t scrub_show(struct device *dev,
1242                 struct device_attribute *attr, char *buf)
1243 {
1244         struct nvdimm_bus_descriptor *nd_desc;
1245         ssize_t rc = -ENXIO;
1246
1247         device_lock(dev);
1248         nd_desc = dev_get_drvdata(dev);
1249         if (nd_desc) {
1250                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1251
1252                 mutex_lock(&acpi_desc->init_mutex);
1253                 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
1254                                 work_busy(&acpi_desc->work)
1255                                 && !acpi_desc->cancel ? "+\n" : "\n");
1256                 mutex_unlock(&acpi_desc->init_mutex);
1257         }
1258         device_unlock(dev);
1259         return rc;
1260 }
1261
1262 static ssize_t scrub_store(struct device *dev,
1263                 struct device_attribute *attr, const char *buf, size_t size)
1264 {
1265         struct nvdimm_bus_descriptor *nd_desc;
1266         ssize_t rc;
1267         long val;
1268
1269         rc = kstrtol(buf, 0, &val);
1270         if (rc)
1271                 return rc;
1272         if (val != 1)
1273                 return -EINVAL;
1274
1275         device_lock(dev);
1276         nd_desc = dev_get_drvdata(dev);
1277         if (nd_desc) {
1278                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1279
1280                 rc = acpi_nfit_ars_rescan(acpi_desc, 0);
1281         }
1282         device_unlock(dev);
1283         if (rc)
1284                 return rc;
1285         return size;
1286 }
1287 static DEVICE_ATTR_RW(scrub);
1288
1289 static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1290 {
1291         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1292         const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1293                 | 1 << ND_CMD_ARS_STATUS;
1294
1295         return (nd_desc->cmd_mask & mask) == mask;
1296 }
1297
1298 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1299 {
1300         struct device *dev = container_of(kobj, struct device, kobj);
1301         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1302
1303         if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1304                 return 0;
1305         return a->mode;
1306 }
1307
1308 static struct attribute *acpi_nfit_attributes[] = {
1309         &dev_attr_revision.attr,
1310         &dev_attr_scrub.attr,
1311         &dev_attr_hw_error_scrub.attr,
1312         &dev_attr_bus_dsm_mask.attr,
1313         NULL,
1314 };
1315
1316 static const struct attribute_group acpi_nfit_attribute_group = {
1317         .name = "nfit",
1318         .attrs = acpi_nfit_attributes,
1319         .is_visible = nfit_visible,
1320 };
1321
1322 static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1323         &nvdimm_bus_attribute_group,
1324         &acpi_nfit_attribute_group,
1325         NULL,
1326 };
1327
1328 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1329 {
1330         struct nvdimm *nvdimm = to_nvdimm(dev);
1331         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1332
1333         return __to_nfit_memdev(nfit_mem);
1334 }
1335
1336 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1337 {
1338         struct nvdimm *nvdimm = to_nvdimm(dev);
1339         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1340
1341         return nfit_mem->dcr;
1342 }
1343
1344 static ssize_t handle_show(struct device *dev,
1345                 struct device_attribute *attr, char *buf)
1346 {
1347         struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1348
1349         return sprintf(buf, "%#x\n", memdev->device_handle);
1350 }
1351 static DEVICE_ATTR_RO(handle);
1352
1353 static ssize_t phys_id_show(struct device *dev,
1354                 struct device_attribute *attr, char *buf)
1355 {
1356         struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1357
1358         return sprintf(buf, "%#x\n", memdev->physical_id);
1359 }
1360 static DEVICE_ATTR_RO(phys_id);
1361
1362 static ssize_t vendor_show(struct device *dev,
1363                 struct device_attribute *attr, char *buf)
1364 {
1365         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1366
1367         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1368 }
1369 static DEVICE_ATTR_RO(vendor);
1370
1371 static ssize_t rev_id_show(struct device *dev,
1372                 struct device_attribute *attr, char *buf)
1373 {
1374         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1375
1376         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1377 }
1378 static DEVICE_ATTR_RO(rev_id);
1379
1380 static ssize_t device_show(struct device *dev,
1381                 struct device_attribute *attr, char *buf)
1382 {
1383         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1384
1385         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1386 }
1387 static DEVICE_ATTR_RO(device);
1388
1389 static ssize_t subsystem_vendor_show(struct device *dev,
1390                 struct device_attribute *attr, char *buf)
1391 {
1392         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1393
1394         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1395 }
1396 static DEVICE_ATTR_RO(subsystem_vendor);
1397
1398 static ssize_t subsystem_rev_id_show(struct device *dev,
1399                 struct device_attribute *attr, char *buf)
1400 {
1401         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1402
1403         return sprintf(buf, "0x%04x\n",
1404                         be16_to_cpu(dcr->subsystem_revision_id));
1405 }
1406 static DEVICE_ATTR_RO(subsystem_rev_id);
1407
1408 static ssize_t subsystem_device_show(struct device *dev,
1409                 struct device_attribute *attr, char *buf)
1410 {
1411         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1412
1413         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1414 }
1415 static DEVICE_ATTR_RO(subsystem_device);
1416
1417 static int num_nvdimm_formats(struct nvdimm *nvdimm)
1418 {
1419         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1420         int formats = 0;
1421
1422         if (nfit_mem->memdev_pmem)
1423                 formats++;
1424         if (nfit_mem->memdev_bdw)
1425                 formats++;
1426         return formats;
1427 }
1428
1429 static ssize_t format_show(struct device *dev,
1430                 struct device_attribute *attr, char *buf)
1431 {
1432         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1433
1434         return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1435 }
1436 static DEVICE_ATTR_RO(format);
1437
1438 static ssize_t format1_show(struct device *dev,
1439                 struct device_attribute *attr, char *buf)
1440 {
1441         u32 handle;
1442         ssize_t rc = -ENXIO;
1443         struct nfit_mem *nfit_mem;
1444         struct nfit_memdev *nfit_memdev;
1445         struct acpi_nfit_desc *acpi_desc;
1446         struct nvdimm *nvdimm = to_nvdimm(dev);
1447         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1448
1449         nfit_mem = nvdimm_provider_data(nvdimm);
1450         acpi_desc = nfit_mem->acpi_desc;
1451         handle = to_nfit_memdev(dev)->device_handle;
1452
1453         /* assumes DIMMs have at most 2 published interface codes */
1454         mutex_lock(&acpi_desc->init_mutex);
1455         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1456                 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1457                 struct nfit_dcr *nfit_dcr;
1458
1459                 if (memdev->device_handle != handle)
1460                         continue;
1461
1462                 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1463                         if (nfit_dcr->dcr->region_index != memdev->region_index)
1464                                 continue;
1465                         if (nfit_dcr->dcr->code == dcr->code)
1466                                 continue;
1467                         rc = sprintf(buf, "0x%04x\n",
1468                                         le16_to_cpu(nfit_dcr->dcr->code));
1469                         break;
1470                 }
1471                 if (rc != ENXIO)
1472                         break;
1473         }
1474         mutex_unlock(&acpi_desc->init_mutex);
1475         return rc;
1476 }
1477 static DEVICE_ATTR_RO(format1);
1478
1479 static ssize_t formats_show(struct device *dev,
1480                 struct device_attribute *attr, char *buf)
1481 {
1482         struct nvdimm *nvdimm = to_nvdimm(dev);
1483
1484         return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1485 }
1486 static DEVICE_ATTR_RO(formats);
1487
1488 static ssize_t serial_show(struct device *dev,
1489                 struct device_attribute *attr, char *buf)
1490 {
1491         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1492
1493         return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1494 }
1495 static DEVICE_ATTR_RO(serial);
1496
1497 static ssize_t family_show(struct device *dev,
1498                 struct device_attribute *attr, char *buf)
1499 {
1500         struct nvdimm *nvdimm = to_nvdimm(dev);
1501         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1502
1503         if (nfit_mem->family < 0)
1504                 return -ENXIO;
1505         return sprintf(buf, "%d\n", nfit_mem->family);
1506 }
1507 static DEVICE_ATTR_RO(family);
1508
1509 static ssize_t dsm_mask_show(struct device *dev,
1510                 struct device_attribute *attr, char *buf)
1511 {
1512         struct nvdimm *nvdimm = to_nvdimm(dev);
1513         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1514
1515         if (nfit_mem->family < 0)
1516                 return -ENXIO;
1517         return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1518 }
1519 static DEVICE_ATTR_RO(dsm_mask);
1520
1521 static ssize_t flags_show(struct device *dev,
1522                 struct device_attribute *attr, char *buf)
1523 {
1524         u16 flags = to_nfit_memdev(dev)->flags;
1525
1526         return sprintf(buf, "%s%s%s%s%s%s%s\n",
1527                 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1528                 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1529                 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1530                 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1531                 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
1532                 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
1533                 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
1534 }
1535 static DEVICE_ATTR_RO(flags);
1536
1537 static ssize_t id_show(struct device *dev,
1538                 struct device_attribute *attr, char *buf)
1539 {
1540         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1541
1542         if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1543                 return sprintf(buf, "%04x-%02x-%04x-%08x\n",
1544                                 be16_to_cpu(dcr->vendor_id),
1545                                 dcr->manufacturing_location,
1546                                 be16_to_cpu(dcr->manufacturing_date),
1547                                 be32_to_cpu(dcr->serial_number));
1548         else
1549                 return sprintf(buf, "%04x-%08x\n",
1550                                 be16_to_cpu(dcr->vendor_id),
1551                                 be32_to_cpu(dcr->serial_number));
1552 }
1553 static DEVICE_ATTR_RO(id);
1554
1555 static struct attribute *acpi_nfit_dimm_attributes[] = {
1556         &dev_attr_handle.attr,
1557         &dev_attr_phys_id.attr,
1558         &dev_attr_vendor.attr,
1559         &dev_attr_device.attr,
1560         &dev_attr_rev_id.attr,
1561         &dev_attr_subsystem_vendor.attr,
1562         &dev_attr_subsystem_device.attr,
1563         &dev_attr_subsystem_rev_id.attr,
1564         &dev_attr_format.attr,
1565         &dev_attr_formats.attr,
1566         &dev_attr_format1.attr,
1567         &dev_attr_serial.attr,
1568         &dev_attr_flags.attr,
1569         &dev_attr_id.attr,
1570         &dev_attr_family.attr,
1571         &dev_attr_dsm_mask.attr,
1572         NULL,
1573 };
1574
1575 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1576                 struct attribute *a, int n)
1577 {
1578         struct device *dev = container_of(kobj, struct device, kobj);
1579         struct nvdimm *nvdimm = to_nvdimm(dev);
1580
1581         if (!to_nfit_dcr(dev)) {
1582                 /* Without a dcr only the memdev attributes can be surfaced */
1583                 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
1584                                 || a == &dev_attr_flags.attr
1585                                 || a == &dev_attr_family.attr
1586                                 || a == &dev_attr_dsm_mask.attr)
1587                         return a->mode;
1588                 return 0;
1589         }
1590
1591         if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1592                 return 0;
1593         return a->mode;
1594 }
1595
1596 static const struct attribute_group acpi_nfit_dimm_attribute_group = {
1597         .name = "nfit",
1598         .attrs = acpi_nfit_dimm_attributes,
1599         .is_visible = acpi_nfit_dimm_attr_visible,
1600 };
1601
1602 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1603         &nvdimm_attribute_group,
1604         &nd_device_attribute_group,
1605         &acpi_nfit_dimm_attribute_group,
1606         NULL,
1607 };
1608
1609 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1610                 u32 device_handle)
1611 {
1612         struct nfit_mem *nfit_mem;
1613
1614         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1615                 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1616                         return nfit_mem->nvdimm;
1617
1618         return NULL;
1619 }
1620
1621 void __acpi_nvdimm_notify(struct device *dev, u32 event)
1622 {
1623         struct nfit_mem *nfit_mem;
1624         struct acpi_nfit_desc *acpi_desc;
1625
1626         dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev),
1627                         event);
1628
1629         if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1630                 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1631                                 event);
1632                 return;
1633         }
1634
1635         acpi_desc = dev_get_drvdata(dev->parent);
1636         if (!acpi_desc)
1637                 return;
1638
1639         /*
1640          * If we successfully retrieved acpi_desc, then we know nfit_mem data
1641          * is still valid.
1642          */
1643         nfit_mem = dev_get_drvdata(dev);
1644         if (nfit_mem && nfit_mem->flags_attr)
1645                 sysfs_notify_dirent(nfit_mem->flags_attr);
1646 }
1647 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1648
1649 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1650 {
1651         struct acpi_device *adev = data;
1652         struct device *dev = &adev->dev;
1653
1654         device_lock(dev->parent);
1655         __acpi_nvdimm_notify(dev, event);
1656         device_unlock(dev->parent);
1657 }
1658
1659 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
1660 {
1661         acpi_handle handle;
1662         acpi_status status;
1663
1664         status = acpi_get_handle(adev->handle, method, &handle);
1665
1666         if (ACPI_SUCCESS(status))
1667                 return true;
1668         return false;
1669 }
1670
1671 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1672                 struct nfit_mem *nfit_mem, u32 device_handle)
1673 {
1674         struct acpi_device *adev, *adev_dimm;
1675         struct device *dev = acpi_desc->dev;
1676         unsigned long dsm_mask;
1677         const guid_t *guid;
1678         int i;
1679         int family = -1;
1680
1681         /* nfit test assumes 1:1 relationship between commands and dsms */
1682         nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1683         nfit_mem->family = NVDIMM_FAMILY_INTEL;
1684         adev = to_acpi_dev(acpi_desc);
1685         if (!adev)
1686                 return 0;
1687
1688         adev_dimm = acpi_find_child_device(adev, device_handle, false);
1689         nfit_mem->adev = adev_dimm;
1690         if (!adev_dimm) {
1691                 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1692                                 device_handle);
1693                 return force_enable_dimms ? 0 : -ENODEV;
1694         }
1695
1696         if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1697                 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1698                 dev_err(dev, "%s: notification registration failed\n",
1699                                 dev_name(&adev_dimm->dev));
1700                 return -ENXIO;
1701         }
1702         /*
1703          * Record nfit_mem for the notification path to track back to
1704          * the nfit sysfs attributes for this dimm device object.
1705          */
1706         dev_set_drvdata(&adev_dimm->dev, nfit_mem);
1707
1708         /*
1709          * Until standardization materializes we need to consider 4
1710          * different command sets.  Note, that checking for function0 (bit0)
1711          * tells us if any commands are reachable through this GUID.
1712          */
1713         for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
1714                 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1715                         if (family < 0 || i == default_dsm_family)
1716                                 family = i;
1717
1718         /* limit the supported commands to those that are publicly documented */
1719         nfit_mem->family = family;
1720         if (override_dsm_mask && !disable_vendor_specific)
1721                 dsm_mask = override_dsm_mask;
1722         else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1723                 dsm_mask = NVDIMM_INTEL_CMDMASK;
1724                 if (disable_vendor_specific)
1725                         dsm_mask &= ~(1 << ND_CMD_VENDOR);
1726         } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1727                 dsm_mask = 0x1c3c76;
1728         } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1729                 dsm_mask = 0x1fe;
1730                 if (disable_vendor_specific)
1731                         dsm_mask &= ~(1 << 8);
1732         } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1733                 dsm_mask = 0xffffffff;
1734         } else {
1735                 dev_dbg(dev, "unknown dimm command family\n");
1736                 nfit_mem->family = -1;
1737                 /* DSMs are optional, continue loading the driver... */
1738                 return 0;
1739         }
1740
1741         guid = to_nfit_uuid(nfit_mem->family);
1742         for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1743                 if (acpi_check_dsm(adev_dimm->handle, guid,
1744                                         nfit_dsm_revid(nfit_mem->family, i),
1745                                         1ULL << i))
1746                         set_bit(i, &nfit_mem->dsm_mask);
1747
1748         if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1749                         && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1750                 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1751                 nfit_mem->has_lsr = true;
1752         }
1753
1754         if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1755                 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1756                 nfit_mem->has_lsw = true;
1757         }
1758
1759         return 0;
1760 }
1761
1762 static void shutdown_dimm_notify(void *data)
1763 {
1764         struct acpi_nfit_desc *acpi_desc = data;
1765         struct nfit_mem *nfit_mem;
1766
1767         mutex_lock(&acpi_desc->init_mutex);
1768         /*
1769          * Clear out the nfit_mem->flags_attr and shut down dimm event
1770          * notifications.
1771          */
1772         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1773                 struct acpi_device *adev_dimm = nfit_mem->adev;
1774
1775                 if (nfit_mem->flags_attr) {
1776                         sysfs_put(nfit_mem->flags_attr);
1777                         nfit_mem->flags_attr = NULL;
1778                 }
1779                 if (adev_dimm) {
1780                         acpi_remove_notify_handler(adev_dimm->handle,
1781                                         ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
1782                         dev_set_drvdata(&adev_dimm->dev, NULL);
1783                 }
1784         }
1785         mutex_unlock(&acpi_desc->init_mutex);
1786 }
1787
1788 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1789 {
1790         struct nfit_mem *nfit_mem;
1791         int dimm_count = 0, rc;
1792         struct nvdimm *nvdimm;
1793
1794         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1795                 struct acpi_nfit_flush_address *flush;
1796                 unsigned long flags = 0, cmd_mask;
1797                 struct nfit_memdev *nfit_memdev;
1798                 u32 device_handle;
1799                 u16 mem_flags;
1800
1801                 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1802                 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1803                 if (nvdimm) {
1804                         dimm_count++;
1805                         continue;
1806                 }
1807
1808                 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1809                         set_bit(NDD_ALIASING, &flags);
1810
1811                 /* collate flags across all memdevs for this dimm */
1812                 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1813                         struct acpi_nfit_memory_map *dimm_memdev;
1814
1815                         dimm_memdev = __to_nfit_memdev(nfit_mem);
1816                         if (dimm_memdev->device_handle
1817                                         != nfit_memdev->memdev->device_handle)
1818                                 continue;
1819                         dimm_memdev->flags |= nfit_memdev->memdev->flags;
1820                 }
1821
1822                 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
1823                 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
1824                         set_bit(NDD_UNARMED, &flags);
1825
1826                 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1827                 if (rc)
1828                         continue;
1829
1830                 /*
1831                  * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1832                  * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1833                  * userspace interface.
1834                  */
1835                 cmd_mask = 1UL << ND_CMD_CALL;
1836                 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1837                         /*
1838                          * These commands have a 1:1 correspondence
1839                          * between DSM payload and libnvdimm ioctl
1840                          * payload format.
1841                          */
1842                         cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
1843                 }
1844
1845                 if (nfit_mem->has_lsr) {
1846                         set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
1847                         set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
1848                 }
1849                 if (nfit_mem->has_lsw)
1850                         set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
1851
1852                 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
1853                         : NULL;
1854                 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
1855                                 acpi_nfit_dimm_attribute_groups,
1856                                 flags, cmd_mask, flush ? flush->hint_count : 0,
1857                                 nfit_mem->flush_wpq);
1858                 if (!nvdimm)
1859                         return -ENOMEM;
1860
1861                 nfit_mem->nvdimm = nvdimm;
1862                 dimm_count++;
1863
1864                 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
1865                         continue;
1866
1867                 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
1868                                 nvdimm_name(nvdimm),
1869                   mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
1870                   mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
1871                   mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
1872                   mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
1873                   mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
1874
1875         }
1876
1877         rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
1878         if (rc)
1879                 return rc;
1880
1881         /*
1882          * Now that dimms are successfully registered, and async registration
1883          * is flushed, attempt to enable event notification.
1884          */
1885         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1886                 struct kernfs_node *nfit_kernfs;
1887
1888                 nvdimm = nfit_mem->nvdimm;
1889                 if (!nvdimm)
1890                         continue;
1891
1892                 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
1893                 if (nfit_kernfs)
1894                         nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
1895                                         "flags");
1896                 sysfs_put(nfit_kernfs);
1897                 if (!nfit_mem->flags_attr)
1898                         dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
1899                                         nvdimm_name(nvdimm));
1900         }
1901
1902         return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
1903                         acpi_desc);
1904 }
1905
1906 /*
1907  * These constants are private because there are no kernel consumers of
1908  * these commands.
1909  */
1910 enum nfit_aux_cmds {
1911         NFIT_CMD_TRANSLATE_SPA = 5,
1912         NFIT_CMD_ARS_INJECT_SET = 7,
1913         NFIT_CMD_ARS_INJECT_CLEAR = 8,
1914         NFIT_CMD_ARS_INJECT_GET = 9,
1915 };
1916
1917 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
1918 {
1919         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1920         const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
1921         struct acpi_device *adev;
1922         unsigned long dsm_mask;
1923         int i;
1924
1925         nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
1926         nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
1927         adev = to_acpi_dev(acpi_desc);
1928         if (!adev)
1929                 return;
1930
1931         for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
1932                 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
1933                         set_bit(i, &nd_desc->cmd_mask);
1934         set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
1935
1936         dsm_mask =
1937                 (1 << ND_CMD_ARS_CAP) |
1938                 (1 << ND_CMD_ARS_START) |
1939                 (1 << ND_CMD_ARS_STATUS) |
1940                 (1 << ND_CMD_CLEAR_ERROR) |
1941                 (1 << NFIT_CMD_TRANSLATE_SPA) |
1942                 (1 << NFIT_CMD_ARS_INJECT_SET) |
1943                 (1 << NFIT_CMD_ARS_INJECT_CLEAR) |
1944                 (1 << NFIT_CMD_ARS_INJECT_GET);
1945         for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1946                 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
1947                         set_bit(i, &nd_desc->bus_dsm_mask);
1948 }
1949
1950 static ssize_t range_index_show(struct device *dev,
1951                 struct device_attribute *attr, char *buf)
1952 {
1953         struct nd_region *nd_region = to_nd_region(dev);
1954         struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1955
1956         return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1957 }
1958 static DEVICE_ATTR_RO(range_index);
1959
1960 static ssize_t ecc_unit_size_show(struct device *dev,
1961                 struct device_attribute *attr, char *buf)
1962 {
1963         struct nd_region *nd_region = to_nd_region(dev);
1964         struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1965
1966         return sprintf(buf, "%d\n", nfit_spa->clear_err_unit);
1967 }
1968 static DEVICE_ATTR_RO(ecc_unit_size);
1969
1970 static struct attribute *acpi_nfit_region_attributes[] = {
1971         &dev_attr_range_index.attr,
1972         &dev_attr_ecc_unit_size.attr,
1973         NULL,
1974 };
1975
1976 static const struct attribute_group acpi_nfit_region_attribute_group = {
1977         .name = "nfit",
1978         .attrs = acpi_nfit_region_attributes,
1979 };
1980
1981 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
1982         &nd_region_attribute_group,
1983         &nd_mapping_attribute_group,
1984         &nd_device_attribute_group,
1985         &nd_numa_attribute_group,
1986         &acpi_nfit_region_attribute_group,
1987         NULL,
1988 };
1989
1990 /* enough info to uniquely specify an interleave set */
1991 struct nfit_set_info {
1992         struct nfit_set_info_map {
1993                 u64 region_offset;
1994                 u32 serial_number;
1995                 u32 pad;
1996         } mapping[0];
1997 };
1998
1999 struct nfit_set_info2 {
2000         struct nfit_set_info_map2 {
2001                 u64 region_offset;
2002                 u32 serial_number;
2003                 u16 vendor_id;
2004                 u16 manufacturing_date;
2005                 u8  manufacturing_location;
2006                 u8  reserved[31];
2007         } mapping[0];
2008 };
2009
2010 static size_t sizeof_nfit_set_info(int num_mappings)
2011 {
2012         return sizeof(struct nfit_set_info)
2013                 + num_mappings * sizeof(struct nfit_set_info_map);
2014 }
2015
2016 static size_t sizeof_nfit_set_info2(int num_mappings)
2017 {
2018         return sizeof(struct nfit_set_info2)
2019                 + num_mappings * sizeof(struct nfit_set_info_map2);
2020 }
2021
2022 static int cmp_map_compat(const void *m0, const void *m1)
2023 {
2024         const struct nfit_set_info_map *map0 = m0;
2025         const struct nfit_set_info_map *map1 = m1;
2026
2027         return memcmp(&map0->region_offset, &map1->region_offset,
2028                         sizeof(u64));
2029 }
2030
2031 static int cmp_map(const void *m0, const void *m1)
2032 {
2033         const struct nfit_set_info_map *map0 = m0;
2034         const struct nfit_set_info_map *map1 = m1;
2035
2036         if (map0->region_offset < map1->region_offset)
2037                 return -1;
2038         else if (map0->region_offset > map1->region_offset)
2039                 return 1;
2040         return 0;
2041 }
2042
2043 static int cmp_map2(const void *m0, const void *m1)
2044 {
2045         const struct nfit_set_info_map2 *map0 = m0;
2046         const struct nfit_set_info_map2 *map1 = m1;
2047
2048         if (map0->region_offset < map1->region_offset)
2049                 return -1;
2050         else if (map0->region_offset > map1->region_offset)
2051                 return 1;
2052         return 0;
2053 }
2054
2055 /* Retrieve the nth entry referencing this spa */
2056 static struct acpi_nfit_memory_map *memdev_from_spa(
2057                 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
2058 {
2059         struct nfit_memdev *nfit_memdev;
2060
2061         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
2062                 if (nfit_memdev->memdev->range_index == range_index)
2063                         if (n-- == 0)
2064                                 return nfit_memdev->memdev;
2065         return NULL;
2066 }
2067
2068 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2069                 struct nd_region_desc *ndr_desc,
2070                 struct acpi_nfit_system_address *spa)
2071 {
2072         struct device *dev = acpi_desc->dev;
2073         struct nd_interleave_set *nd_set;
2074         u16 nr = ndr_desc->num_mappings;
2075         struct nfit_set_info2 *info2;
2076         struct nfit_set_info *info;
2077         int i;
2078
2079         nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2080         if (!nd_set)
2081                 return -ENOMEM;
2082         ndr_desc->nd_set = nd_set;
2083         guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
2084
2085         info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
2086         if (!info)
2087                 return -ENOMEM;
2088
2089         info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
2090         if (!info2)
2091                 return -ENOMEM;
2092
2093         for (i = 0; i < nr; i++) {
2094                 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
2095                 struct nfit_set_info_map *map = &info->mapping[i];
2096                 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2097                 struct nvdimm *nvdimm = mapping->nvdimm;
2098                 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2099                 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
2100                                 spa->range_index, i);
2101                 struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2102
2103                 if (!memdev || !nfit_mem->dcr) {
2104                         dev_err(dev, "%s: failed to find DCR\n", __func__);
2105                         return -ENODEV;
2106                 }
2107
2108                 map->region_offset = memdev->region_offset;
2109                 map->serial_number = dcr->serial_number;
2110
2111                 map2->region_offset = memdev->region_offset;
2112                 map2->serial_number = dcr->serial_number;
2113                 map2->vendor_id = dcr->vendor_id;
2114                 map2->manufacturing_date = dcr->manufacturing_date;
2115                 map2->manufacturing_location = dcr->manufacturing_location;
2116         }
2117
2118         /* v1.1 namespaces */
2119         sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2120                         cmp_map, NULL);
2121         nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2122
2123         /* v1.2 namespaces */
2124         sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
2125                         cmp_map2, NULL);
2126         nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
2127
2128         /* support v1.1 namespaces created with the wrong sort order */
2129         sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2130                         cmp_map_compat, NULL);
2131         nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2132
2133         /* record the result of the sort for the mapping position */
2134         for (i = 0; i < nr; i++) {
2135                 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2136                 int j;
2137
2138                 for (j = 0; j < nr; j++) {
2139                         struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
2140                         struct nvdimm *nvdimm = mapping->nvdimm;
2141                         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2142                         struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2143
2144                         if (map2->serial_number == dcr->serial_number &&
2145                             map2->vendor_id == dcr->vendor_id &&
2146                             map2->manufacturing_date == dcr->manufacturing_date &&
2147                             map2->manufacturing_location
2148                                     == dcr->manufacturing_location) {
2149                                 mapping->position = i;
2150                                 break;
2151                         }
2152                 }
2153         }
2154
2155         ndr_desc->nd_set = nd_set;
2156         devm_kfree(dev, info);
2157         devm_kfree(dev, info2);
2158
2159         return 0;
2160 }
2161
2162 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
2163 {
2164         struct acpi_nfit_interleave *idt = mmio->idt;
2165         u32 sub_line_offset, line_index, line_offset;
2166         u64 line_no, table_skip_count, table_offset;
2167
2168         line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
2169         table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
2170         line_offset = idt->line_offset[line_index]
2171                 * mmio->line_size;
2172         table_offset = table_skip_count * mmio->table_size;
2173
2174         return mmio->base_offset + line_offset + table_offset + sub_line_offset;
2175 }
2176
2177 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
2178 {
2179         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2180         u64 offset = nfit_blk->stat_offset + mmio->size * bw;
2181         const u32 STATUS_MASK = 0x80000037;
2182
2183         if (mmio->num_lines)
2184                 offset = to_interleave_offset(offset, mmio);
2185
2186         return readl(mmio->addr.base + offset) & STATUS_MASK;
2187 }
2188
2189 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
2190                 resource_size_t dpa, unsigned int len, unsigned int write)
2191 {
2192         u64 cmd, offset;
2193         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2194
2195         enum {
2196                 BCW_OFFSET_MASK = (1ULL << 48)-1,
2197                 BCW_LEN_SHIFT = 48,
2198                 BCW_LEN_MASK = (1ULL << 8) - 1,
2199                 BCW_CMD_SHIFT = 56,
2200         };
2201
2202         cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
2203         len = len >> L1_CACHE_SHIFT;
2204         cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
2205         cmd |= ((u64) write) << BCW_CMD_SHIFT;
2206
2207         offset = nfit_blk->cmd_offset + mmio->size * bw;
2208         if (mmio->num_lines)
2209                 offset = to_interleave_offset(offset, mmio);
2210
2211         writeq(cmd, mmio->addr.base + offset);
2212         nvdimm_flush(nfit_blk->nd_region);
2213
2214         if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
2215                 readq(mmio->addr.base + offset);
2216 }
2217
2218 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
2219                 resource_size_t dpa, void *iobuf, size_t len, int rw,
2220                 unsigned int lane)
2221 {
2222         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2223         unsigned int copied = 0;
2224         u64 base_offset;
2225         int rc;
2226
2227         base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
2228                 + lane * mmio->size;
2229         write_blk_ctl(nfit_blk, lane, dpa, len, rw);
2230         while (len) {
2231                 unsigned int c;
2232                 u64 offset;
2233
2234                 if (mmio->num_lines) {
2235                         u32 line_offset;
2236
2237                         offset = to_interleave_offset(base_offset + copied,
2238                                         mmio);
2239                         div_u64_rem(offset, mmio->line_size, &line_offset);
2240                         c = min_t(size_t, len, mmio->line_size - line_offset);
2241                 } else {
2242                         offset = base_offset + nfit_blk->bdw_offset;
2243                         c = len;
2244                 }
2245
2246                 if (rw)
2247                         memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
2248                 else {
2249                         if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
2250                                 arch_invalidate_pmem((void __force *)
2251                                         mmio->addr.aperture + offset, c);
2252
2253                         memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
2254                 }
2255
2256                 copied += c;
2257                 len -= c;
2258         }
2259
2260         if (rw)
2261                 nvdimm_flush(nfit_blk->nd_region);
2262
2263         rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
2264         return rc;
2265 }
2266
2267 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
2268                 resource_size_t dpa, void *iobuf, u64 len, int rw)
2269 {
2270         struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
2271         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2272         struct nd_region *nd_region = nfit_blk->nd_region;
2273         unsigned int lane, copied = 0;
2274         int rc = 0;
2275
2276         lane = nd_region_acquire_lane(nd_region);
2277         while (len) {
2278                 u64 c = min(len, mmio->size);
2279
2280                 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
2281                                 iobuf + copied, c, rw, lane);
2282                 if (rc)
2283                         break;
2284
2285                 copied += c;
2286                 len -= c;
2287         }
2288         nd_region_release_lane(nd_region, lane);
2289
2290         return rc;
2291 }
2292
2293 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
2294                 struct acpi_nfit_interleave *idt, u16 interleave_ways)
2295 {
2296         if (idt) {
2297                 mmio->num_lines = idt->line_count;
2298                 mmio->line_size = idt->line_size;
2299                 if (interleave_ways == 0)
2300                         return -ENXIO;
2301                 mmio->table_size = mmio->num_lines * interleave_ways
2302                         * mmio->line_size;
2303         }
2304
2305         return 0;
2306 }
2307
2308 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
2309                 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
2310 {
2311         struct nd_cmd_dimm_flags flags;
2312         int rc;
2313
2314         memset(&flags, 0, sizeof(flags));
2315         rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
2316                         sizeof(flags), NULL);
2317
2318         if (rc >= 0 && flags.status == 0)
2319                 nfit_blk->dimm_flags = flags.flags;
2320         else if (rc == -ENOTTY) {
2321                 /* fall back to a conservative default */
2322                 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
2323                 rc = 0;
2324         } else
2325                 rc = -ENXIO;
2326
2327         return rc;
2328 }
2329
2330 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2331                 struct device *dev)
2332 {
2333         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
2334         struct nd_blk_region *ndbr = to_nd_blk_region(dev);
2335         struct nfit_blk_mmio *mmio;
2336         struct nfit_blk *nfit_blk;
2337         struct nfit_mem *nfit_mem;
2338         struct nvdimm *nvdimm;
2339         int rc;
2340
2341         nvdimm = nd_blk_region_to_dimm(ndbr);
2342         nfit_mem = nvdimm_provider_data(nvdimm);
2343         if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
2344                 dev_dbg(dev, "missing%s%s%s\n",
2345                                 nfit_mem ? "" : " nfit_mem",
2346                                 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
2347                                 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
2348                 return -ENXIO;
2349         }
2350
2351         nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
2352         if (!nfit_blk)
2353                 return -ENOMEM;
2354         nd_blk_region_set_provider_data(ndbr, nfit_blk);
2355         nfit_blk->nd_region = to_nd_region(dev);
2356
2357         /* map block aperture memory */
2358         nfit_blk->bdw_offset = nfit_mem->bdw->offset;
2359         mmio = &nfit_blk->mmio[BDW];
2360         mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2361                         nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2362         if (!mmio->addr.base) {
2363                 dev_dbg(dev, "%s failed to map bdw\n",
2364                                 nvdimm_name(nvdimm));
2365                 return -ENOMEM;
2366         }
2367         mmio->size = nfit_mem->bdw->size;
2368         mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
2369         mmio->idt = nfit_mem->idt_bdw;
2370         mmio->spa = nfit_mem->spa_bdw;
2371         rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
2372                         nfit_mem->memdev_bdw->interleave_ways);
2373         if (rc) {
2374                 dev_dbg(dev, "%s failed to init bdw interleave\n",
2375                                 nvdimm_name(nvdimm));
2376                 return rc;
2377         }
2378
2379         /* map block control memory */
2380         nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
2381         nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
2382         mmio = &nfit_blk->mmio[DCR];
2383         mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
2384                         nfit_mem->spa_dcr->length);
2385         if (!mmio->addr.base) {
2386                 dev_dbg(dev, "%s failed to map dcr\n",
2387                                 nvdimm_name(nvdimm));
2388                 return -ENOMEM;
2389         }
2390         mmio->size = nfit_mem->dcr->window_size;
2391         mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
2392         mmio->idt = nfit_mem->idt_dcr;
2393         mmio->spa = nfit_mem->spa_dcr;
2394         rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
2395                         nfit_mem->memdev_dcr->interleave_ways);
2396         if (rc) {
2397                 dev_dbg(dev, "%s failed to init dcr interleave\n",
2398                                 nvdimm_name(nvdimm));
2399                 return rc;
2400         }
2401
2402         rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
2403         if (rc < 0) {
2404                 dev_dbg(dev, "%s failed get DIMM flags\n",
2405                                 nvdimm_name(nvdimm));
2406                 return rc;
2407         }
2408
2409         if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
2410                 dev_warn(dev, "unable to guarantee persistence of writes\n");
2411
2412         if (mmio->line_size == 0)
2413                 return 0;
2414
2415         if ((u32) nfit_blk->cmd_offset % mmio->line_size
2416                         + 8 > mmio->line_size) {
2417                 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
2418                 return -ENXIO;
2419         } else if ((u32) nfit_blk->stat_offset % mmio->line_size
2420                         + 8 > mmio->line_size) {
2421                 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
2422                 return -ENXIO;
2423         }
2424
2425         return 0;
2426 }
2427
2428 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
2429                 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
2430 {
2431         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2432         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2433         int cmd_rc, rc;
2434
2435         cmd->address = spa->address;
2436         cmd->length = spa->length;
2437         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
2438                         sizeof(*cmd), &cmd_rc);
2439         if (rc < 0)
2440                 return rc;
2441         return cmd_rc;
2442 }
2443
2444 static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
2445 {
2446         int rc;
2447         int cmd_rc;
2448         struct nd_cmd_ars_start ars_start;
2449         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2450         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2451
2452         memset(&ars_start, 0, sizeof(ars_start));
2453         ars_start.address = spa->address;
2454         ars_start.length = spa->length;
2455         ars_start.flags = acpi_desc->ars_start_flags;
2456         if (nfit_spa_type(spa) == NFIT_SPA_PM)
2457                 ars_start.type = ND_ARS_PERSISTENT;
2458         else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2459                 ars_start.type = ND_ARS_VOLATILE;
2460         else
2461                 return -ENOTTY;
2462
2463         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2464                         sizeof(ars_start), &cmd_rc);
2465
2466         if (rc < 0)
2467                 return rc;
2468         return cmd_rc;
2469 }
2470
2471 static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2472 {
2473         int rc, cmd_rc;
2474         struct nd_cmd_ars_start ars_start;
2475         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2476         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2477
2478         memset(&ars_start, 0, sizeof(ars_start));
2479         ars_start.address = ars_status->restart_address;
2480         ars_start.length = ars_status->restart_length;
2481         ars_start.type = ars_status->type;
2482         ars_start.flags = acpi_desc->ars_start_flags;
2483         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2484                         sizeof(ars_start), &cmd_rc);
2485         if (rc < 0)
2486                 return rc;
2487         return cmd_rc;
2488 }
2489
2490 static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2491 {
2492         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2493         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2494         int rc, cmd_rc;
2495
2496         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2497                         acpi_desc->ars_status_size, &cmd_rc);
2498         if (rc < 0)
2499                 return rc;
2500         return cmd_rc;
2501 }
2502
2503 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
2504                 struct nd_cmd_ars_status *ars_status)
2505 {
2506         struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2507         int rc;
2508         u32 i;
2509
2510         /*
2511          * First record starts at 44 byte offset from the start of the
2512          * payload.
2513          */
2514         if (ars_status->out_length < 44)
2515                 return 0;
2516         for (i = 0; i < ars_status->num_records; i++) {
2517                 /* only process full records */
2518                 if (ars_status->out_length
2519                                 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2520                         break;
2521                 rc = nvdimm_bus_add_badrange(nvdimm_bus,
2522                                 ars_status->records[i].err_address,
2523                                 ars_status->records[i].length);
2524                 if (rc)
2525                         return rc;
2526         }
2527         if (i < ars_status->num_records)
2528                 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2529
2530         return 0;
2531 }
2532
2533 static void acpi_nfit_remove_resource(void *data)
2534 {
2535         struct resource *res = data;
2536
2537         remove_resource(res);
2538 }
2539
2540 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2541                 struct nd_region_desc *ndr_desc)
2542 {
2543         struct resource *res, *nd_res = ndr_desc->res;
2544         int is_pmem, ret;
2545
2546         /* No operation if the region is already registered as PMEM */
2547         is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2548                                 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2549         if (is_pmem == REGION_INTERSECTS)
2550                 return 0;
2551
2552         res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2553         if (!res)
2554                 return -ENOMEM;
2555
2556         res->name = "Persistent Memory";
2557         res->start = nd_res->start;
2558         res->end = nd_res->end;
2559         res->flags = IORESOURCE_MEM;
2560         res->desc = IORES_DESC_PERSISTENT_MEMORY;
2561
2562         ret = insert_resource(&iomem_resource, res);
2563         if (ret)
2564                 return ret;
2565
2566         ret = devm_add_action_or_reset(acpi_desc->dev,
2567                                         acpi_nfit_remove_resource,
2568                                         res);
2569         if (ret)
2570                 return ret;
2571
2572         return 0;
2573 }
2574
2575 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2576                 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2577                 struct acpi_nfit_memory_map *memdev,
2578                 struct nfit_spa *nfit_spa)
2579 {
2580         struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2581                         memdev->device_handle);
2582         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2583         struct nd_blk_region_desc *ndbr_desc;
2584         struct nfit_mem *nfit_mem;
2585         int rc;
2586
2587         if (!nvdimm) {
2588                 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2589                                 spa->range_index, memdev->device_handle);
2590                 return -ENODEV;
2591         }
2592
2593         mapping->nvdimm = nvdimm;
2594         switch (nfit_spa_type(spa)) {
2595         case NFIT_SPA_PM:
2596         case NFIT_SPA_VOLATILE:
2597                 mapping->start = memdev->address;
2598                 mapping->size = memdev->region_size;
2599                 break;
2600         case NFIT_SPA_DCR:
2601                 nfit_mem = nvdimm_provider_data(nvdimm);
2602                 if (!nfit_mem || !nfit_mem->bdw) {
2603                         dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2604                                         spa->range_index, nvdimm_name(nvdimm));
2605                         break;
2606                 }
2607
2608                 mapping->size = nfit_mem->bdw->capacity;
2609                 mapping->start = nfit_mem->bdw->start_address;
2610                 ndr_desc->num_lanes = nfit_mem->bdw->windows;
2611                 ndr_desc->mapping = mapping;
2612                 ndr_desc->num_mappings = 1;
2613                 ndbr_desc = to_blk_region_desc(ndr_desc);
2614                 ndbr_desc->enable = acpi_nfit_blk_region_enable;
2615                 ndbr_desc->do_io = acpi_desc->blk_do_io;
2616                 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2617                 if (rc)
2618                         return rc;
2619                 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2620                                 ndr_desc);
2621                 if (!nfit_spa->nd_region)
2622                         return -ENOMEM;
2623                 break;
2624         }
2625
2626         return 0;
2627 }
2628
2629 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2630 {
2631         return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2632                 nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2633                 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2634                 nfit_spa_type(spa) == NFIT_SPA_PCD);
2635 }
2636
2637 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
2638 {
2639         return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2640                 nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2641                 nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
2642 }
2643
2644 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2645                 struct nfit_spa *nfit_spa)
2646 {
2647         static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2648         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2649         struct nd_blk_region_desc ndbr_desc;
2650         struct nd_region_desc *ndr_desc;
2651         struct nfit_memdev *nfit_memdev;
2652         struct nvdimm_bus *nvdimm_bus;
2653         struct resource res;
2654         int count = 0, rc;
2655
2656         if (nfit_spa->nd_region)
2657                 return 0;
2658
2659         if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2660                 dev_dbg(acpi_desc->dev, "detected invalid spa index\n");
2661                 return 0;
2662         }
2663
2664         memset(&res, 0, sizeof(res));
2665         memset(&mappings, 0, sizeof(mappings));
2666         memset(&ndbr_desc, 0, sizeof(ndbr_desc));
2667         res.start = spa->address;
2668         res.end = res.start + spa->length - 1;
2669         ndr_desc = &ndbr_desc.ndr_desc;
2670         ndr_desc->res = &res;
2671         ndr_desc->provider_data = nfit_spa;
2672         ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2673         if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2674                 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2675                                                 spa->proximity_domain);
2676         else
2677                 ndr_desc->numa_node = NUMA_NO_NODE;
2678
2679         if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
2680                 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
2681
2682         if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2683                 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
2684
2685         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2686                 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2687                 struct nd_mapping_desc *mapping;
2688
2689                 if (memdev->range_index != spa->range_index)
2690                         continue;
2691                 if (count >= ND_MAX_MAPPINGS) {
2692                         dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2693                                         spa->range_index, ND_MAX_MAPPINGS);
2694                         return -ENXIO;
2695                 }
2696                 mapping = &mappings[count++];
2697                 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
2698                                 memdev, nfit_spa);
2699                 if (rc)
2700                         goto out;
2701         }
2702
2703         ndr_desc->mapping = mappings;
2704         ndr_desc->num_mappings = count;
2705         rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2706         if (rc)
2707                 goto out;
2708
2709         nvdimm_bus = acpi_desc->nvdimm_bus;
2710         if (nfit_spa_type(spa) == NFIT_SPA_PM) {
2711                 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
2712                 if (rc) {
2713                         dev_warn(acpi_desc->dev,
2714                                 "failed to insert pmem resource to iomem: %d\n",
2715                                 rc);
2716                         goto out;
2717                 }
2718
2719                 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2720                                 ndr_desc);
2721                 if (!nfit_spa->nd_region)
2722                         rc = -ENOMEM;
2723         } else if (nfit_spa_is_volatile(spa)) {
2724                 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2725                                 ndr_desc);
2726                 if (!nfit_spa->nd_region)
2727                         rc = -ENOMEM;
2728         } else if (nfit_spa_is_virtual(spa)) {
2729                 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2730                                 ndr_desc);
2731                 if (!nfit_spa->nd_region)
2732                         rc = -ENOMEM;
2733         }
2734
2735  out:
2736         if (rc)
2737                 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2738                                 nfit_spa->spa->range_index);
2739         return rc;
2740 }
2741
2742 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
2743                 u32 max_ars)
2744 {
2745         struct device *dev = acpi_desc->dev;
2746         struct nd_cmd_ars_status *ars_status;
2747
2748         if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
2749                 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
2750                 return 0;
2751         }
2752
2753         if (acpi_desc->ars_status)
2754                 devm_kfree(dev, acpi_desc->ars_status);
2755         acpi_desc->ars_status = NULL;
2756         ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
2757         if (!ars_status)
2758                 return -ENOMEM;
2759         acpi_desc->ars_status = ars_status;
2760         acpi_desc->ars_status_size = max_ars;
2761         return 0;
2762 }
2763
2764 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
2765                 struct nfit_spa *nfit_spa)
2766 {
2767         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2768         int rc;
2769
2770         if (!nfit_spa->max_ars) {
2771                 struct nd_cmd_ars_cap ars_cap;
2772
2773                 memset(&ars_cap, 0, sizeof(ars_cap));
2774                 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2775                 if (rc < 0)
2776                         return rc;
2777                 nfit_spa->max_ars = ars_cap.max_ars_out;
2778                 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2779                 /* check that the supported scrub types match the spa type */
2780                 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
2781                                 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
2782                         return -ENOTTY;
2783                 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
2784                                 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
2785                         return -ENOTTY;
2786         }
2787
2788         if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
2789                 return -ENOMEM;
2790
2791         rc = ars_get_status(acpi_desc);
2792         if (rc < 0 && rc != -ENOSPC)
2793                 return rc;
2794
2795         if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
2796                 return -ENOMEM;
2797
2798         return 0;
2799 }
2800
2801 static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
2802                 struct nfit_spa *nfit_spa)
2803 {
2804         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2805         unsigned int overflow_retry = scrub_overflow_abort;
2806         u64 init_ars_start = 0, init_ars_len = 0;
2807         struct device *dev = acpi_desc->dev;
2808         unsigned int tmo = scrub_timeout;
2809         int rc;
2810
2811         if (!test_bit(ARS_REQ, &nfit_spa->ars_state) || !nfit_spa->nd_region)
2812                 return;
2813
2814         rc = ars_start(acpi_desc, nfit_spa);
2815         /*
2816          * If we timed out the initial scan we'll still be busy here,
2817          * and will wait another timeout before giving up permanently.
2818          */
2819         if (rc < 0 && rc != -EBUSY)
2820                 return;
2821
2822         do {
2823                 u64 ars_start, ars_len;
2824
2825                 if (acpi_desc->cancel)
2826                         break;
2827                 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2828                 if (rc == -ENOTTY)
2829                         break;
2830                 if (rc == -EBUSY && !tmo) {
2831                         dev_warn(dev, "range %d ars timeout, aborting\n",
2832                                         spa->range_index);
2833                         break;
2834                 }
2835
2836                 if (rc == -EBUSY) {
2837                         /*
2838                          * Note, entries may be appended to the list
2839                          * while the lock is dropped, but the workqueue
2840                          * being active prevents entries being deleted /
2841                          * freed.
2842                          */
2843                         mutex_unlock(&acpi_desc->init_mutex);
2844                         ssleep(1);
2845                         tmo--;
2846                         mutex_lock(&acpi_desc->init_mutex);
2847                         continue;
2848                 }
2849
2850                 /* we got some results, but there are more pending... */
2851                 if (rc == -ENOSPC && overflow_retry--) {
2852                         if (!init_ars_len) {
2853                                 init_ars_len = acpi_desc->ars_status->length;
2854                                 init_ars_start = acpi_desc->ars_status->address;
2855                         }
2856                         rc = ars_continue(acpi_desc);
2857                 }
2858
2859                 if (rc < 0) {
2860                         dev_warn(dev, "range %d ars continuation failed\n",
2861                                         spa->range_index);
2862                         break;
2863                 }
2864
2865                 if (init_ars_len) {
2866                         ars_start = init_ars_start;
2867                         ars_len = init_ars_len;
2868                 } else {
2869                         ars_start = acpi_desc->ars_status->address;
2870                         ars_len = acpi_desc->ars_status->length;
2871                 }
2872                 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
2873                                 spa->range_index, ars_start, ars_len);
2874                 /* notify the region about new poison entries */
2875                 nvdimm_region_notify(nfit_spa->nd_region,
2876                                 NVDIMM_REVALIDATE_POISON);
2877                 break;
2878         } while (1);
2879 }
2880
2881 static void acpi_nfit_scrub(struct work_struct *work)
2882 {
2883         struct device *dev;
2884         u64 init_scrub_length = 0;
2885         struct nfit_spa *nfit_spa;
2886         u64 init_scrub_address = 0;
2887         bool init_ars_done = false;
2888         struct acpi_nfit_desc *acpi_desc;
2889         unsigned int tmo = scrub_timeout;
2890         unsigned int overflow_retry = scrub_overflow_abort;
2891
2892         acpi_desc = container_of(work, typeof(*acpi_desc), work);
2893         dev = acpi_desc->dev;
2894
2895         /*
2896          * We scrub in 2 phases.  The first phase waits for any platform
2897          * firmware initiated scrubs to complete and then we go search for the
2898          * affected spa regions to mark them scanned.  In the second phase we
2899          * initiate a directed scrub for every range that was not scrubbed in
2900          * phase 1. If we're called for a 'rescan', we harmlessly pass through
2901          * the first phase, but really only care about running phase 2, where
2902          * regions can be notified of new poison.
2903          */
2904
2905         /* process platform firmware initiated scrubs */
2906  retry:
2907         mutex_lock(&acpi_desc->init_mutex);
2908         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2909                 struct nd_cmd_ars_status *ars_status;
2910                 struct acpi_nfit_system_address *spa;
2911                 u64 ars_start, ars_len;
2912                 int rc;
2913
2914                 if (acpi_desc->cancel)
2915                         break;
2916
2917                 if (nfit_spa->nd_region)
2918                         continue;
2919
2920                 if (init_ars_done) {
2921                         /*
2922                          * No need to re-query, we're now just
2923                          * reconciling all the ranges covered by the
2924                          * initial scrub
2925                          */
2926                         rc = 0;
2927                 } else
2928                         rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2929
2930                 if (rc == -ENOTTY) {
2931                         /* no ars capability, just register spa and move on */
2932                         acpi_nfit_register_region(acpi_desc, nfit_spa);
2933                         continue;
2934                 }
2935
2936                 if (rc == -EBUSY && !tmo) {
2937                         /* fallthrough to directed scrub in phase 2 */
2938                         dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2939                         break;
2940                 } else if (rc == -EBUSY) {
2941                         mutex_unlock(&acpi_desc->init_mutex);
2942                         ssleep(1);
2943                         tmo--;
2944                         goto retry;
2945                 }
2946
2947                 /* we got some results, but there are more pending... */
2948                 if (rc == -ENOSPC && overflow_retry--) {
2949                         ars_status = acpi_desc->ars_status;
2950                         /*
2951                          * Record the original scrub range, so that we
2952                          * can recall all the ranges impacted by the
2953                          * initial scrub.
2954                          */
2955                         if (!init_scrub_length) {
2956                                 init_scrub_length = ars_status->length;
2957                                 init_scrub_address = ars_status->address;
2958                         }
2959                         rc = ars_continue(acpi_desc);
2960                         if (rc == 0) {
2961                                 mutex_unlock(&acpi_desc->init_mutex);
2962                                 goto retry;
2963                         }
2964                 }
2965
2966                 if (rc < 0) {
2967                         /*
2968                          * Initial scrub failed, we'll give it one more
2969                          * try below...
2970                          */
2971                         break;
2972                 }
2973
2974                 /* We got some final results, record completed ranges */
2975                 ars_status = acpi_desc->ars_status;
2976                 if (init_scrub_length) {
2977                         ars_start = init_scrub_address;
2978                         ars_len = ars_start + init_scrub_length;
2979                 } else {
2980                         ars_start = ars_status->address;
2981                         ars_len = ars_status->length;
2982                 }
2983                 spa = nfit_spa->spa;
2984
2985                 if (!init_ars_done) {
2986                         init_ars_done = true;
2987                         dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2988                                         ars_start, ars_len);
2989                 }
2990                 if (ars_start <= spa->address && ars_start + ars_len
2991                                 >= spa->address + spa->length)
2992                         acpi_nfit_register_region(acpi_desc, nfit_spa);
2993         }
2994
2995         /*
2996          * For all the ranges not covered by an initial scrub we still
2997          * want to see if there are errors, but it's ok to discover them
2998          * asynchronously.
2999          */
3000         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3001                 /*
3002                  * Flag all the ranges that still need scrubbing, but
3003                  * register them now to make data available.
3004                  */
3005                 if (!nfit_spa->nd_region) {
3006                         set_bit(ARS_REQ, &nfit_spa->ars_state);
3007                         acpi_nfit_register_region(acpi_desc, nfit_spa);
3008                 }
3009         }
3010         acpi_desc->init_complete = 1;
3011
3012         list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
3013                 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
3014         acpi_desc->scrub_count++;
3015         acpi_desc->ars_start_flags = 0;
3016         if (acpi_desc->scrub_count_state)
3017                 sysfs_notify_dirent(acpi_desc->scrub_count_state);
3018         mutex_unlock(&acpi_desc->init_mutex);
3019 }
3020
3021 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3022 {
3023         struct nfit_spa *nfit_spa;
3024
3025         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3026                 int rc, type = nfit_spa_type(nfit_spa->spa);
3027
3028                 /* PMEM and VMEM will be registered by the ARS workqueue */
3029                 if (type == NFIT_SPA_PM || type == NFIT_SPA_VOLATILE)
3030                         continue;
3031                 /* BLK apertures belong to BLK region registration below */
3032                 if (type == NFIT_SPA_BDW)
3033                         continue;
3034                 /* BLK regions don't need to wait for ARS results */
3035                 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
3036                 if (rc)
3037                         return rc;
3038         }
3039
3040         acpi_desc->ars_start_flags = 0;
3041         if (!acpi_desc->cancel)
3042                 queue_work(nfit_wq, &acpi_desc->work);
3043         return 0;
3044 }
3045
3046 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
3047                 struct nfit_table_prev *prev)
3048 {
3049         struct device *dev = acpi_desc->dev;
3050
3051         if (!list_empty(&prev->spas) ||
3052                         !list_empty(&prev->memdevs) ||
3053                         !list_empty(&prev->dcrs) ||
3054                         !list_empty(&prev->bdws) ||
3055                         !list_empty(&prev->idts) ||
3056                         !list_empty(&prev->flushes)) {
3057                 dev_err(dev, "new nfit deletes entries (unsupported)\n");
3058                 return -ENXIO;
3059         }
3060         return 0;
3061 }
3062
3063 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
3064 {
3065         struct device *dev = acpi_desc->dev;
3066         struct kernfs_node *nfit;
3067         struct device *bus_dev;
3068
3069         if (!ars_supported(acpi_desc->nvdimm_bus))
3070                 return 0;
3071
3072         bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3073         nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
3074         if (!nfit) {
3075                 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
3076                 return -ENODEV;
3077         }
3078         acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
3079         sysfs_put(nfit);
3080         if (!acpi_desc->scrub_count_state) {
3081                 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
3082                 return -ENODEV;
3083         }
3084
3085         return 0;
3086 }
3087
3088 static void acpi_nfit_unregister(void *data)
3089 {
3090         struct acpi_nfit_desc *acpi_desc = data;
3091
3092         nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
3093 }
3094
3095 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3096 {
3097         struct device *dev = acpi_desc->dev;
3098         struct nfit_table_prev prev;
3099         const void *end;
3100         int rc;
3101
3102         if (!acpi_desc->nvdimm_bus) {
3103                 acpi_nfit_init_dsms(acpi_desc);
3104
3105                 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
3106                                 &acpi_desc->nd_desc);
3107                 if (!acpi_desc->nvdimm_bus)
3108                         return -ENOMEM;
3109
3110                 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
3111                                 acpi_desc);
3112                 if (rc)
3113                         return rc;
3114
3115                 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
3116                 if (rc)
3117                         return rc;
3118
3119                 /* register this acpi_desc for mce notifications */
3120                 mutex_lock(&acpi_desc_lock);
3121                 list_add_tail(&acpi_desc->list, &acpi_descs);
3122                 mutex_unlock(&acpi_desc_lock);
3123         }
3124
3125         mutex_lock(&acpi_desc->init_mutex);
3126
3127         INIT_LIST_HEAD(&prev.spas);
3128         INIT_LIST_HEAD(&prev.memdevs);
3129         INIT_LIST_HEAD(&prev.dcrs);
3130         INIT_LIST_HEAD(&prev.bdws);
3131         INIT_LIST_HEAD(&prev.idts);
3132         INIT_LIST_HEAD(&prev.flushes);
3133
3134         list_cut_position(&prev.spas, &acpi_desc->spas,
3135                                 acpi_desc->spas.prev);
3136         list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
3137                                 acpi_desc->memdevs.prev);
3138         list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
3139                                 acpi_desc->dcrs.prev);
3140         list_cut_position(&prev.bdws, &acpi_desc->bdws,
3141                                 acpi_desc->bdws.prev);
3142         list_cut_position(&prev.idts, &acpi_desc->idts,
3143                                 acpi_desc->idts.prev);
3144         list_cut_position(&prev.flushes, &acpi_desc->flushes,
3145                                 acpi_desc->flushes.prev);
3146
3147         end = data + sz;
3148         while (!IS_ERR_OR_NULL(data))
3149                 data = add_table(acpi_desc, &prev, data, end);
3150
3151         if (IS_ERR(data)) {
3152                 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data));
3153                 rc = PTR_ERR(data);
3154                 goto out_unlock;
3155         }
3156
3157         rc = acpi_nfit_check_deletions(acpi_desc, &prev);
3158         if (rc)
3159                 goto out_unlock;
3160
3161         rc = nfit_mem_init(acpi_desc);
3162         if (rc)
3163                 goto out_unlock;
3164
3165         rc = acpi_nfit_register_dimms(acpi_desc);
3166         if (rc)
3167                 goto out_unlock;
3168
3169         rc = acpi_nfit_register_regions(acpi_desc);
3170
3171  out_unlock:
3172         mutex_unlock(&acpi_desc->init_mutex);
3173         return rc;
3174 }
3175 EXPORT_SYMBOL_GPL(acpi_nfit_init);
3176
3177 struct acpi_nfit_flush_work {
3178         struct work_struct work;
3179         struct completion cmp;
3180 };
3181
3182 static void flush_probe(struct work_struct *work)
3183 {
3184         struct acpi_nfit_flush_work *flush;
3185
3186         flush = container_of(work, typeof(*flush), work);
3187         complete(&flush->cmp);
3188 }
3189
3190 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3191 {
3192         struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3193         struct device *dev = acpi_desc->dev;
3194         struct acpi_nfit_flush_work flush;
3195         int rc;
3196
3197         /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3198         device_lock(dev);
3199         device_unlock(dev);
3200
3201         /* bounce the init_mutex to make init_complete valid */
3202         mutex_lock(&acpi_desc->init_mutex);
3203         if (acpi_desc->cancel || acpi_desc->init_complete) {
3204                 mutex_unlock(&acpi_desc->init_mutex);
3205                 return 0;
3206         }
3207
3208         /*
3209          * Scrub work could take 10s of seconds, userspace may give up so we
3210          * need to be interruptible while waiting.
3211          */
3212         INIT_WORK_ONSTACK(&flush.work, flush_probe);
3213         init_completion(&flush.cmp);
3214         queue_work(nfit_wq, &flush.work);
3215         mutex_unlock(&acpi_desc->init_mutex);
3216
3217         rc = wait_for_completion_interruptible(&flush.cmp);
3218         cancel_work_sync(&flush.work);
3219         return rc;
3220 }
3221
3222 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3223                 struct nvdimm *nvdimm, unsigned int cmd)
3224 {
3225         struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3226
3227         if (nvdimm)
3228                 return 0;
3229         if (cmd != ND_CMD_ARS_START)
3230                 return 0;
3231
3232         /*
3233          * The kernel and userspace may race to initiate a scrub, but
3234          * the scrub thread is prepared to lose that initial race.  It
3235          * just needs guarantees that any ars it initiates are not
3236          * interrupted by any intervening start reqeusts from userspace.
3237          */
3238         if (work_busy(&acpi_desc->work))
3239                 return -EBUSY;
3240
3241         return 0;
3242 }
3243
3244 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
3245 {
3246         struct device *dev = acpi_desc->dev;
3247         struct nfit_spa *nfit_spa;
3248
3249         if (work_busy(&acpi_desc->work))
3250                 return -EBUSY;
3251
3252         mutex_lock(&acpi_desc->init_mutex);
3253         if (acpi_desc->cancel) {
3254                 mutex_unlock(&acpi_desc->init_mutex);
3255                 return 0;
3256         }
3257
3258         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3259                 struct acpi_nfit_system_address *spa = nfit_spa->spa;
3260
3261                 if (nfit_spa_type(spa) != NFIT_SPA_PM)
3262                         continue;
3263
3264                 set_bit(ARS_REQ, &nfit_spa->ars_state);
3265         }
3266         acpi_desc->ars_start_flags = 0;
3267         if (test_bit(ARS_SHORT, &flags))
3268                 acpi_desc->ars_start_flags |= ND_ARS_RETURN_PREV_DATA;
3269         queue_work(nfit_wq, &acpi_desc->work);
3270         dev_dbg(dev, "ars_scan triggered\n");
3271         mutex_unlock(&acpi_desc->init_mutex);
3272
3273         return 0;
3274 }
3275
3276 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
3277 {
3278         struct nvdimm_bus_descriptor *nd_desc;
3279
3280         dev_set_drvdata(dev, acpi_desc);
3281         acpi_desc->dev = dev;
3282         acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
3283         nd_desc = &acpi_desc->nd_desc;
3284         nd_desc->provider_name = "ACPI.NFIT";
3285         nd_desc->module = THIS_MODULE;
3286         nd_desc->ndctl = acpi_nfit_ctl;
3287         nd_desc->flush_probe = acpi_nfit_flush_probe;
3288         nd_desc->clear_to_send = acpi_nfit_clear_to_send;
3289         nd_desc->attr_groups = acpi_nfit_attribute_groups;
3290
3291         INIT_LIST_HEAD(&acpi_desc->spas);
3292         INIT_LIST_HEAD(&acpi_desc->dcrs);
3293         INIT_LIST_HEAD(&acpi_desc->bdws);
3294         INIT_LIST_HEAD(&acpi_desc->idts);
3295         INIT_LIST_HEAD(&acpi_desc->flushes);
3296         INIT_LIST_HEAD(&acpi_desc->memdevs);
3297         INIT_LIST_HEAD(&acpi_desc->dimms);
3298         INIT_LIST_HEAD(&acpi_desc->list);
3299         mutex_init(&acpi_desc->init_mutex);
3300         INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
3301 }
3302 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
3303
3304 static void acpi_nfit_put_table(void *table)
3305 {
3306         acpi_put_table(table);
3307 }
3308
3309 void acpi_nfit_shutdown(void *data)
3310 {
3311         struct acpi_nfit_desc *acpi_desc = data;
3312         struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3313
3314         /*
3315          * Destruct under acpi_desc_lock so that nfit_handle_mce does not
3316          * race teardown
3317          */
3318         mutex_lock(&acpi_desc_lock);
3319         list_del(&acpi_desc->list);
3320         mutex_unlock(&acpi_desc_lock);
3321
3322         mutex_lock(&acpi_desc->init_mutex);
3323         acpi_desc->cancel = 1;
3324         mutex_unlock(&acpi_desc->init_mutex);
3325
3326         /*
3327          * Bounce the nvdimm bus lock to make sure any in-flight
3328          * acpi_nfit_ars_rescan() submissions have had a chance to
3329          * either submit or see ->cancel set.
3330          */
3331         device_lock(bus_dev);
3332         device_unlock(bus_dev);
3333
3334         flush_workqueue(nfit_wq);
3335 }
3336 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
3337
3338 static int acpi_nfit_add(struct acpi_device *adev)
3339 {
3340         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3341         struct acpi_nfit_desc *acpi_desc;
3342         struct device *dev = &adev->dev;
3343         struct acpi_table_header *tbl;
3344         acpi_status status = AE_OK;
3345         acpi_size sz;
3346         int rc = 0;
3347
3348         status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
3349         if (ACPI_FAILURE(status)) {
3350                 /* This is ok, we could have an nvdimm hotplugged later */
3351                 dev_dbg(dev, "failed to find NFIT at startup\n");
3352                 return 0;
3353         }
3354
3355         rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
3356         if (rc)
3357                 return rc;
3358         sz = tbl->length;
3359
3360         acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3361         if (!acpi_desc)
3362                 return -ENOMEM;
3363         acpi_nfit_desc_init(acpi_desc, &adev->dev);
3364
3365         /* Save the acpi header for exporting the revision via sysfs */
3366         acpi_desc->acpi_header = *tbl;
3367
3368         /* Evaluate _FIT and override with that if present */
3369         status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
3370         if (ACPI_SUCCESS(status) && buf.length > 0) {
3371                 union acpi_object *obj = buf.pointer;
3372
3373                 if (obj->type == ACPI_TYPE_BUFFER)
3374                         rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3375                                         obj->buffer.length);
3376                 else
3377                         dev_dbg(dev, "invalid type %d, ignoring _FIT\n",
3378                                 (int) obj->type);
3379                 kfree(buf.pointer);
3380         } else
3381                 /* skip over the lead-in header table */
3382                 rc = acpi_nfit_init(acpi_desc, (void *) tbl
3383                                 + sizeof(struct acpi_table_nfit),
3384                                 sz - sizeof(struct acpi_table_nfit));
3385
3386         if (rc)
3387                 return rc;
3388         return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
3389 }
3390
3391 static int acpi_nfit_remove(struct acpi_device *adev)
3392 {
3393         /* see acpi_nfit_unregister */
3394         return 0;
3395 }
3396
3397 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
3398 {
3399         struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3400         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3401         union acpi_object *obj;
3402         acpi_status status;
3403         int ret;
3404
3405         if (!dev->driver) {
3406                 /* dev->driver may be null if we're being removed */
3407                 dev_dbg(dev, "no driver found for dev\n");
3408                 return;
3409         }
3410
3411         if (!acpi_desc) {
3412                 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3413                 if (!acpi_desc)
3414                         return;
3415                 acpi_nfit_desc_init(acpi_desc, dev);
3416         } else {
3417                 /*
3418                  * Finish previous registration before considering new
3419                  * regions.
3420                  */
3421                 flush_workqueue(nfit_wq);
3422         }
3423
3424         /* Evaluate _FIT */
3425         status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
3426         if (ACPI_FAILURE(status)) {
3427                 dev_err(dev, "failed to evaluate _FIT\n");
3428                 return;
3429         }
3430
3431         obj = buf.pointer;
3432         if (obj->type == ACPI_TYPE_BUFFER) {
3433                 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3434                                 obj->buffer.length);
3435                 if (ret)
3436                         dev_err(dev, "failed to merge updated NFIT\n");
3437         } else
3438                 dev_err(dev, "Invalid _FIT\n");
3439         kfree(buf.pointer);
3440 }
3441
3442 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
3443 {
3444         struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3445         unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ?
3446                         0 : 1 << ARS_SHORT;
3447
3448         acpi_nfit_ars_rescan(acpi_desc, flags);
3449 }
3450
3451 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
3452 {
3453         dev_dbg(dev, "event: 0x%x\n", event);
3454
3455         switch (event) {
3456         case NFIT_NOTIFY_UPDATE:
3457                 return acpi_nfit_update_notify(dev, handle);
3458         case NFIT_NOTIFY_UC_MEMORY_ERROR:
3459                 return acpi_nfit_uc_error_notify(dev, handle);
3460         default:
3461                 return;
3462         }
3463 }
3464 EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
3465
3466 static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
3467 {
3468         device_lock(&adev->dev);
3469         __acpi_nfit_notify(&adev->dev, adev->handle, event);
3470         device_unlock(&adev->dev);
3471 }
3472
3473 static const struct acpi_device_id acpi_nfit_ids[] = {
3474         { "ACPI0012", 0 },
3475         { "", 0 },
3476 };
3477 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
3478
3479 static struct acpi_driver acpi_nfit_driver = {
3480         .name = KBUILD_MODNAME,
3481         .ids = acpi_nfit_ids,
3482         .ops = {
3483                 .add = acpi_nfit_add,
3484                 .remove = acpi_nfit_remove,
3485                 .notify = acpi_nfit_notify,
3486         },
3487 };
3488
3489 static __init int nfit_init(void)
3490 {
3491         int ret;
3492
3493         BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3494         BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
3495         BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
3496         BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
3497         BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
3498         BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
3499         BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
3500         BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
3501
3502         guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
3503         guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
3504         guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
3505         guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
3506         guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
3507         guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
3508         guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
3509         guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
3510         guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
3511         guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
3512         guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
3513         guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
3514         guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
3515
3516         nfit_wq = create_singlethread_workqueue("nfit");
3517         if (!nfit_wq)
3518                 return -ENOMEM;
3519
3520         nfit_mce_register();
3521         ret = acpi_bus_register_driver(&acpi_nfit_driver);
3522         if (ret) {
3523                 nfit_mce_unregister();
3524                 destroy_workqueue(nfit_wq);
3525         }
3526
3527         return ret;
3528
3529 }
3530
3531 static __exit void nfit_exit(void)
3532 {
3533         nfit_mce_unregister();
3534         acpi_bus_unregister_driver(&acpi_nfit_driver);
3535         destroy_workqueue(nfit_wq);
3536         WARN_ON(!list_empty(&acpi_descs));
3537 }
3538
3539 module_init(nfit_init);
3540 module_exit(nfit_exit);
3541 MODULE_LICENSE("GPL v2");
3542 MODULE_AUTHOR("Intel Corporation");