acpi, nfit: allow specifying a default DSM family
[linux-2.6-block.git] / drivers / acpi / nfit / core.c
CommitLineData
b94d5230
DW
1/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/list_sort.h>
14#include <linux/libnvdimm.h>
15#include <linux/module.h>
047fc8a1 16#include <linux/mutex.h>
62232e45 17#include <linux/ndctl.h>
37b137ff 18#include <linux/sysfs.h>
0caeef63 19#include <linux/delay.h>
b94d5230
DW
20#include <linux/list.h>
21#include <linux/acpi.h>
eaf96153 22#include <linux/sort.h>
c2ad2954 23#include <linux/pmem.h>
047fc8a1 24#include <linux/io.h>
1cf03c00 25#include <linux/nd.h>
96601adb 26#include <asm/cacheflush.h>
b94d5230
DW
27#include "nfit.h"
28
047fc8a1
RZ
29/*
30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
31 * irrelevant.
32 */
2f8e2c87 33#include <linux/io-64-nonatomic-hi-lo.h>
047fc8a1 34
4d88a97a
DW
35static bool force_enable_dimms;
36module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
37MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
38
1cf03c00
DW
39static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
40module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
41MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
42
43/* after three payloads of overflow, it's dead jim */
44static unsigned int scrub_overflow_abort = 3;
45module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
46MODULE_PARM_DESC(scrub_overflow_abort,
47 "Number of times we overflow ARS results before abort");
48
87554098
DW
49static bool disable_vendor_specific;
50module_param(disable_vendor_specific, bool, S_IRUGO);
51MODULE_PARM_DESC(disable_vendor_specific,
52 "Limit commands to the publicly specified set\n");
53
095ab4b3
LK
54static unsigned long override_dsm_mask;
55module_param(override_dsm_mask, ulong, S_IRUGO);
56MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
57
ba650cfc
LK
58static int default_dsm_family = -1;
59module_param(default_dsm_family, int, S_IRUGO);
60MODULE_PARM_DESC(default_dsm_family,
61 "Try this DSM type first when identifying NVDIMM family");
62
6839a6d9
VV
63LIST_HEAD(acpi_descs);
64DEFINE_MUTEX(acpi_desc_lock);
65
7ae0fa43
DW
66static struct workqueue_struct *nfit_wq;
67
20985164
VV
68struct nfit_table_prev {
69 struct list_head spas;
70 struct list_head memdevs;
71 struct list_head dcrs;
72 struct list_head bdws;
73 struct list_head idts;
74 struct list_head flushes;
75};
76
b94d5230
DW
77static u8 nfit_uuid[NFIT_UUID_MAX][16];
78
6bc75619 79const u8 *to_nfit_uuid(enum nfit_uuids id)
b94d5230
DW
80{
81 return nfit_uuid[id];
82}
6bc75619 83EXPORT_SYMBOL(to_nfit_uuid);
b94d5230 84
62232e45
DW
85static struct acpi_nfit_desc *to_acpi_nfit_desc(
86 struct nvdimm_bus_descriptor *nd_desc)
87{
88 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
89}
90
91static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
92{
93 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
94
95 /*
96 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
97 * acpi_device.
98 */
99 if (!nd_desc->provider_name
100 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
101 return NULL;
102
103 return to_acpi_device(acpi_desc->dev);
104}
105
d6eb270c 106static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
aef25338 107{
d4f32367 108 struct nd_cmd_clear_error *clear_err;
aef25338 109 struct nd_cmd_ars_status *ars_status;
aef25338
DW
110 u16 flags;
111
112 switch (cmd) {
113 case ND_CMD_ARS_CAP:
11294d63 114 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
aef25338
DW
115 return -ENOTTY;
116
117 /* Command failed */
11294d63 118 if (status & 0xffff)
aef25338
DW
119 return -EIO;
120
121 /* No supported scan types for this range */
122 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
11294d63 123 if ((status >> 16 & flags) == 0)
aef25338 124 return -ENOTTY;
9a901f54 125 return 0;
aef25338 126 case ND_CMD_ARS_START:
aef25338 127 /* ARS is in progress */
11294d63 128 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
aef25338
DW
129 return -EBUSY;
130
131 /* Command failed */
11294d63 132 if (status & 0xffff)
aef25338 133 return -EIO;
9a901f54 134 return 0;
aef25338
DW
135 case ND_CMD_ARS_STATUS:
136 ars_status = buf;
137 /* Command failed */
11294d63 138 if (status & 0xffff)
aef25338
DW
139 return -EIO;
140 /* Check extended status (Upper two bytes) */
11294d63 141 if (status == NFIT_ARS_STATUS_DONE)
aef25338
DW
142 return 0;
143
144 /* ARS is in progress */
11294d63 145 if (status == NFIT_ARS_STATUS_BUSY)
aef25338
DW
146 return -EBUSY;
147
148 /* No ARS performed for the current boot */
11294d63 149 if (status == NFIT_ARS_STATUS_NONE)
aef25338
DW
150 return -EAGAIN;
151
152 /*
153 * ARS interrupted, either we overflowed or some other
154 * agent wants the scan to stop. If we didn't overflow
155 * then just continue with the returned results.
156 */
11294d63 157 if (status == NFIT_ARS_STATUS_INTR) {
82aa37cf
DW
158 if (ars_status->out_length >= 40 && (ars_status->flags
159 & NFIT_ARS_F_OVERFLOW))
aef25338
DW
160 return -ENOSPC;
161 return 0;
162 }
163
164 /* Unknown status */
11294d63 165 if (status >> 16)
aef25338 166 return -EIO;
9a901f54 167 return 0;
d4f32367
DW
168 case ND_CMD_CLEAR_ERROR:
169 clear_err = buf;
11294d63 170 if (status & 0xffff)
d4f32367
DW
171 return -EIO;
172 if (!clear_err->cleared)
173 return -EIO;
174 if (clear_err->length > clear_err->cleared)
175 return clear_err->cleared;
9a901f54 176 return 0;
aef25338
DW
177 default:
178 break;
179 }
180
11294d63
DW
181 /* all other non-zero status results in an error */
182 if (status)
183 return -EIO;
aef25338
DW
184 return 0;
185}
186
d6eb270c
DW
187static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
188 u32 status)
189{
190 if (!nvdimm)
191 return xlat_bus_status(buf, cmd, status);
192 if (status)
193 return -EIO;
194 return 0;
195}
196
a7de92da
DW
197int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
198 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
b94d5230 199{
62232e45 200 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
62232e45 201 union acpi_object in_obj, in_buf, *out_obj;
31eca76b 202 const struct nd_cmd_desc *desc = NULL;
62232e45 203 struct device *dev = acpi_desc->dev;
31eca76b 204 struct nd_cmd_pkg *call_pkg = NULL;
62232e45 205 const char *cmd_name, *dimm_name;
31eca76b 206 unsigned long cmd_mask, dsm_mask;
11294d63 207 u32 offset, fw_status = 0;
62232e45 208 acpi_handle handle;
31eca76b 209 unsigned int func;
62232e45 210 const u8 *uuid;
62232e45
DW
211 int rc, i;
212
31eca76b
DW
213 func = cmd;
214 if (cmd == ND_CMD_CALL) {
215 call_pkg = buf;
216 func = call_pkg->nd_command;
217 }
218
62232e45
DW
219 if (nvdimm) {
220 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
221 struct acpi_device *adev = nfit_mem->adev;
222
223 if (!adev)
224 return -ENOTTY;
31eca76b
DW
225 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
226 return -ENOTTY;
227
047fc8a1 228 dimm_name = nvdimm_name(nvdimm);
62232e45 229 cmd_name = nvdimm_cmd_name(cmd);
e3654eca 230 cmd_mask = nvdimm_cmd_mask(nvdimm);
62232e45
DW
231 dsm_mask = nfit_mem->dsm_mask;
232 desc = nd_cmd_dimm_desc(cmd);
31eca76b 233 uuid = to_nfit_uuid(nfit_mem->family);
62232e45
DW
234 handle = adev->handle;
235 } else {
236 struct acpi_device *adev = to_acpi_dev(acpi_desc);
237
238 cmd_name = nvdimm_bus_cmd_name(cmd);
e3654eca 239 cmd_mask = nd_desc->cmd_mask;
31eca76b 240 dsm_mask = cmd_mask;
62232e45
DW
241 desc = nd_cmd_bus_desc(cmd);
242 uuid = to_nfit_uuid(NFIT_DEV_BUS);
243 handle = adev->handle;
244 dimm_name = "bus";
245 }
246
247 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
248 return -ENOTTY;
249
31eca76b 250 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
62232e45
DW
251 return -ENOTTY;
252
253 in_obj.type = ACPI_TYPE_PACKAGE;
254 in_obj.package.count = 1;
255 in_obj.package.elements = &in_buf;
256 in_buf.type = ACPI_TYPE_BUFFER;
257 in_buf.buffer.pointer = buf;
258 in_buf.buffer.length = 0;
259
260 /* libnvdimm has already validated the input envelope */
261 for (i = 0; i < desc->in_num; i++)
262 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
263 i, buf);
264
31eca76b
DW
265 if (call_pkg) {
266 /* skip over package wrapper */
267 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
268 in_buf.buffer.length = call_pkg->nd_size_in;
269 }
270
62232e45 271 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
31eca76b
DW
272 dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
273 __func__, dimm_name, cmd, func,
274 in_buf.buffer.length);
275 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
276 in_buf.buffer.pointer,
277 min_t(u32, 256, in_buf.buffer.length), true);
62232e45
DW
278 }
279
31eca76b 280 out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
62232e45
DW
281 if (!out_obj) {
282 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
283 cmd_name);
284 return -EINVAL;
285 }
286
31eca76b
DW
287 if (call_pkg) {
288 call_pkg->nd_fw_size = out_obj->buffer.length;
289 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
290 out_obj->buffer.pointer,
291 min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
292
293 ACPI_FREE(out_obj);
294 /*
295 * Need to support FW function w/o known size in advance.
296 * Caller can determine required size based upon nd_fw_size.
297 * If we return an error (like elsewhere) then caller wouldn't
298 * be able to rely upon data returned to make calculation.
299 */
300 return 0;
301 }
302
62232e45
DW
303 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
304 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
305 __func__, dimm_name, cmd_name, out_obj->type);
306 rc = -EINVAL;
307 goto out;
308 }
309
310 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
311 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
312 dimm_name, cmd_name, out_obj->buffer.length);
313 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
314 4, out_obj->buffer.pointer, min_t(u32, 128,
315 out_obj->buffer.length), true);
316 }
317
318 for (i = 0, offset = 0; i < desc->out_num; i++) {
319 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
efda1b5d
DW
320 (u32 *) out_obj->buffer.pointer,
321 out_obj->buffer.length - offset);
62232e45
DW
322
323 if (offset + out_size > out_obj->buffer.length) {
324 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
325 __func__, dimm_name, cmd_name, i);
326 break;
327 }
328
329 if (in_buf.buffer.length + offset + out_size > buf_len) {
330 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
331 __func__, dimm_name, cmd_name, i);
332 rc = -ENXIO;
333 goto out;
334 }
335 memcpy(buf + in_buf.buffer.length + offset,
336 out_obj->buffer.pointer + offset, out_size);
337 offset += out_size;
338 }
11294d63
DW
339
340 /*
341 * Set fw_status for all the commands with a known format to be
342 * later interpreted by xlat_status().
343 */
344 if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
345 || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
346 fw_status = *(u32 *) out_obj->buffer.pointer;
347
62232e45
DW
348 if (offset + in_buf.buffer.length < buf_len) {
349 if (i >= 1) {
350 /*
351 * status valid, return the number of bytes left
352 * unfilled in the output buffer
353 */
354 rc = buf_len - offset - in_buf.buffer.length;
aef25338 355 if (cmd_rc)
d6eb270c
DW
356 *cmd_rc = xlat_status(nvdimm, buf, cmd,
357 fw_status);
62232e45
DW
358 } else {
359 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
360 __func__, dimm_name, cmd_name, buf_len,
361 offset);
362 rc = -ENXIO;
363 }
2eea6582 364 } else {
62232e45 365 rc = 0;
2eea6582 366 if (cmd_rc)
d6eb270c 367 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
2eea6582 368 }
62232e45
DW
369
370 out:
371 ACPI_FREE(out_obj);
372
373 return rc;
b94d5230 374}
a7de92da 375EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
b94d5230
DW
376
377static const char *spa_type_name(u16 type)
378{
379 static const char *to_name[] = {
380 [NFIT_SPA_VOLATILE] = "volatile",
381 [NFIT_SPA_PM] = "pmem",
382 [NFIT_SPA_DCR] = "dimm-control-region",
383 [NFIT_SPA_BDW] = "block-data-window",
384 [NFIT_SPA_VDISK] = "volatile-disk",
385 [NFIT_SPA_VCD] = "volatile-cd",
386 [NFIT_SPA_PDISK] = "persistent-disk",
387 [NFIT_SPA_PCD] = "persistent-cd",
388
389 };
390
391 if (type > NFIT_SPA_PCD)
392 return "unknown";
393
394 return to_name[type];
395}
396
6839a6d9 397int nfit_spa_type(struct acpi_nfit_system_address *spa)
b94d5230
DW
398{
399 int i;
400
401 for (i = 0; i < NFIT_UUID_MAX; i++)
402 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
403 return i;
404 return -1;
405}
406
407static bool add_spa(struct acpi_nfit_desc *acpi_desc,
20985164 408 struct nfit_table_prev *prev,
b94d5230
DW
409 struct acpi_nfit_system_address *spa)
410{
411 struct device *dev = acpi_desc->dev;
20985164
VV
412 struct nfit_spa *nfit_spa;
413
31932041
DW
414 if (spa->header.length != sizeof(*spa))
415 return false;
416
20985164 417 list_for_each_entry(nfit_spa, &prev->spas, list) {
31932041 418 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
20985164
VV
419 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
420 return true;
421 }
422 }
b94d5230 423
31932041
DW
424 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
425 GFP_KERNEL);
b94d5230
DW
426 if (!nfit_spa)
427 return false;
428 INIT_LIST_HEAD(&nfit_spa->list);
31932041 429 memcpy(nfit_spa->spa, spa, sizeof(*spa));
b94d5230
DW
430 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
431 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
432 spa->range_index,
433 spa_type_name(nfit_spa_type(spa)));
434 return true;
435}
436
437static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
20985164 438 struct nfit_table_prev *prev,
b94d5230
DW
439 struct acpi_nfit_memory_map *memdev)
440{
441 struct device *dev = acpi_desc->dev;
20985164 442 struct nfit_memdev *nfit_memdev;
b94d5230 443
31932041
DW
444 if (memdev->header.length != sizeof(*memdev))
445 return false;
446
20985164 447 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
31932041 448 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
20985164
VV
449 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
450 return true;
451 }
452
31932041
DW
453 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
454 GFP_KERNEL);
b94d5230
DW
455 if (!nfit_memdev)
456 return false;
457 INIT_LIST_HEAD(&nfit_memdev->list);
31932041 458 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
b94d5230
DW
459 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
460 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
461 __func__, memdev->device_handle, memdev->range_index,
462 memdev->region_index);
463 return true;
464}
465
31932041
DW
466/*
467 * An implementation may provide a truncated control region if no block windows
468 * are defined.
469 */
470static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
471{
472 if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
473 window_size))
474 return 0;
475 if (dcr->windows)
476 return sizeof(*dcr);
477 return offsetof(struct acpi_nfit_control_region, window_size);
478}
479
b94d5230 480static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
20985164 481 struct nfit_table_prev *prev,
b94d5230
DW
482 struct acpi_nfit_control_region *dcr)
483{
484 struct device *dev = acpi_desc->dev;
20985164
VV
485 struct nfit_dcr *nfit_dcr;
486
31932041
DW
487 if (!sizeof_dcr(dcr))
488 return false;
489
20985164 490 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
31932041 491 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
20985164
VV
492 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
493 return true;
494 }
b94d5230 495
31932041
DW
496 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
497 GFP_KERNEL);
b94d5230
DW
498 if (!nfit_dcr)
499 return false;
500 INIT_LIST_HEAD(&nfit_dcr->list);
31932041 501 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
b94d5230
DW
502 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
503 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
504 dcr->region_index, dcr->windows);
505 return true;
506}
507
508static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
20985164 509 struct nfit_table_prev *prev,
b94d5230
DW
510 struct acpi_nfit_data_region *bdw)
511{
512 struct device *dev = acpi_desc->dev;
20985164
VV
513 struct nfit_bdw *nfit_bdw;
514
31932041
DW
515 if (bdw->header.length != sizeof(*bdw))
516 return false;
20985164 517 list_for_each_entry(nfit_bdw, &prev->bdws, list)
31932041 518 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
20985164
VV
519 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
520 return true;
521 }
b94d5230 522
31932041
DW
523 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
524 GFP_KERNEL);
b94d5230
DW
525 if (!nfit_bdw)
526 return false;
527 INIT_LIST_HEAD(&nfit_bdw->list);
31932041 528 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
b94d5230
DW
529 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
530 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
531 bdw->region_index, bdw->windows);
532 return true;
533}
534
31932041
DW
535static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
536{
537 if (idt->header.length < sizeof(*idt))
538 return 0;
539 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
540}
541
047fc8a1 542static bool add_idt(struct acpi_nfit_desc *acpi_desc,
20985164 543 struct nfit_table_prev *prev,
047fc8a1
RZ
544 struct acpi_nfit_interleave *idt)
545{
546 struct device *dev = acpi_desc->dev;
20985164
VV
547 struct nfit_idt *nfit_idt;
548
31932041
DW
549 if (!sizeof_idt(idt))
550 return false;
551
552 list_for_each_entry(nfit_idt, &prev->idts, list) {
553 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
554 continue;
555
556 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
20985164
VV
557 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
558 return true;
559 }
31932041 560 }
047fc8a1 561
31932041
DW
562 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
563 GFP_KERNEL);
047fc8a1
RZ
564 if (!nfit_idt)
565 return false;
566 INIT_LIST_HEAD(&nfit_idt->list);
31932041 567 memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
047fc8a1
RZ
568 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
569 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
570 idt->interleave_index, idt->line_count);
571 return true;
572}
573
31932041
DW
574static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
575{
576 if (flush->header.length < sizeof(*flush))
577 return 0;
578 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
579}
580
c2ad2954 581static bool add_flush(struct acpi_nfit_desc *acpi_desc,
20985164 582 struct nfit_table_prev *prev,
c2ad2954
RZ
583 struct acpi_nfit_flush_address *flush)
584{
585 struct device *dev = acpi_desc->dev;
20985164 586 struct nfit_flush *nfit_flush;
c2ad2954 587
31932041
DW
588 if (!sizeof_flush(flush))
589 return false;
590
591 list_for_each_entry(nfit_flush, &prev->flushes, list) {
592 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
593 continue;
594
595 if (memcmp(nfit_flush->flush, flush,
596 sizeof_flush(flush)) == 0) {
20985164
VV
597 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
598 return true;
599 }
31932041 600 }
20985164 601
31932041
DW
602 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
603 + sizeof_flush(flush), GFP_KERNEL);
c2ad2954
RZ
604 if (!nfit_flush)
605 return false;
606 INIT_LIST_HEAD(&nfit_flush->list);
31932041 607 memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
c2ad2954
RZ
608 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
609 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
610 flush->device_handle, flush->hint_count);
611 return true;
612}
613
20985164
VV
614static void *add_table(struct acpi_nfit_desc *acpi_desc,
615 struct nfit_table_prev *prev, void *table, const void *end)
b94d5230
DW
616{
617 struct device *dev = acpi_desc->dev;
618 struct acpi_nfit_header *hdr;
619 void *err = ERR_PTR(-ENOMEM);
620
621 if (table >= end)
622 return NULL;
623
624 hdr = table;
564d5011
VV
625 if (!hdr->length) {
626 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
627 hdr->type);
628 return NULL;
629 }
630
b94d5230
DW
631 switch (hdr->type) {
632 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
20985164 633 if (!add_spa(acpi_desc, prev, table))
b94d5230
DW
634 return err;
635 break;
636 case ACPI_NFIT_TYPE_MEMORY_MAP:
20985164 637 if (!add_memdev(acpi_desc, prev, table))
b94d5230
DW
638 return err;
639 break;
640 case ACPI_NFIT_TYPE_CONTROL_REGION:
20985164 641 if (!add_dcr(acpi_desc, prev, table))
b94d5230
DW
642 return err;
643 break;
644 case ACPI_NFIT_TYPE_DATA_REGION:
20985164 645 if (!add_bdw(acpi_desc, prev, table))
b94d5230
DW
646 return err;
647 break;
b94d5230 648 case ACPI_NFIT_TYPE_INTERLEAVE:
20985164 649 if (!add_idt(acpi_desc, prev, table))
047fc8a1 650 return err;
b94d5230
DW
651 break;
652 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
20985164 653 if (!add_flush(acpi_desc, prev, table))
c2ad2954 654 return err;
b94d5230
DW
655 break;
656 case ACPI_NFIT_TYPE_SMBIOS:
657 dev_dbg(dev, "%s: smbios\n", __func__);
658 break;
659 default:
660 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
661 break;
662 }
663
664 return table + hdr->length;
665}
666
667static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
668 struct nfit_mem *nfit_mem)
669{
670 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
671 u16 dcr = nfit_mem->dcr->region_index;
672 struct nfit_spa *nfit_spa;
673
674 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
675 u16 range_index = nfit_spa->spa->range_index;
676 int type = nfit_spa_type(nfit_spa->spa);
677 struct nfit_memdev *nfit_memdev;
678
679 if (type != NFIT_SPA_BDW)
680 continue;
681
682 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
683 if (nfit_memdev->memdev->range_index != range_index)
684 continue;
685 if (nfit_memdev->memdev->device_handle != device_handle)
686 continue;
687 if (nfit_memdev->memdev->region_index != dcr)
688 continue;
689
690 nfit_mem->spa_bdw = nfit_spa->spa;
691 return;
692 }
693 }
694
695 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
696 nfit_mem->spa_dcr->range_index);
697 nfit_mem->bdw = NULL;
698}
699
6697b2cf 700static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
b94d5230
DW
701 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
702{
703 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
047fc8a1 704 struct nfit_memdev *nfit_memdev;
b94d5230 705 struct nfit_bdw *nfit_bdw;
047fc8a1
RZ
706 struct nfit_idt *nfit_idt;
707 u16 idt_idx, range_index;
b94d5230 708
b94d5230
DW
709 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
710 if (nfit_bdw->bdw->region_index != dcr)
711 continue;
712 nfit_mem->bdw = nfit_bdw->bdw;
713 break;
714 }
715
716 if (!nfit_mem->bdw)
6697b2cf 717 return;
b94d5230
DW
718
719 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
047fc8a1
RZ
720
721 if (!nfit_mem->spa_bdw)
6697b2cf 722 return;
047fc8a1
RZ
723
724 range_index = nfit_mem->spa_bdw->range_index;
725 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
726 if (nfit_memdev->memdev->range_index != range_index ||
727 nfit_memdev->memdev->region_index != dcr)
728 continue;
729 nfit_mem->memdev_bdw = nfit_memdev->memdev;
730 idt_idx = nfit_memdev->memdev->interleave_index;
731 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
732 if (nfit_idt->idt->interleave_index != idt_idx)
733 continue;
734 nfit_mem->idt_bdw = nfit_idt->idt;
735 break;
736 }
737 break;
738 }
b94d5230
DW
739}
740
741static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
742 struct acpi_nfit_system_address *spa)
743{
744 struct nfit_mem *nfit_mem, *found;
745 struct nfit_memdev *nfit_memdev;
746 int type = nfit_spa_type(spa);
b94d5230
DW
747
748 switch (type) {
749 case NFIT_SPA_DCR:
750 case NFIT_SPA_PM:
751 break;
752 default:
753 return 0;
754 }
755
756 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
ad9ac5e1 757 struct nfit_flush *nfit_flush;
6697b2cf
DW
758 struct nfit_dcr *nfit_dcr;
759 u32 device_handle;
760 u16 dcr;
b94d5230
DW
761
762 if (nfit_memdev->memdev->range_index != spa->range_index)
763 continue;
764 found = NULL;
765 dcr = nfit_memdev->memdev->region_index;
6697b2cf 766 device_handle = nfit_memdev->memdev->device_handle;
b94d5230 767 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
6697b2cf
DW
768 if (__to_nfit_memdev(nfit_mem)->device_handle
769 == device_handle) {
b94d5230
DW
770 found = nfit_mem;
771 break;
772 }
773
774 if (found)
775 nfit_mem = found;
776 else {
777 nfit_mem = devm_kzalloc(acpi_desc->dev,
778 sizeof(*nfit_mem), GFP_KERNEL);
779 if (!nfit_mem)
780 return -ENOMEM;
781 INIT_LIST_HEAD(&nfit_mem->list);
8cc6ddfc 782 nfit_mem->acpi_desc = acpi_desc;
6697b2cf
DW
783 list_add(&nfit_mem->list, &acpi_desc->dimms);
784 }
785
786 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
787 if (nfit_dcr->dcr->region_index != dcr)
788 continue;
789 /*
790 * Record the control region for the dimm. For
791 * the ACPI 6.1 case, where there are separate
792 * control regions for the pmem vs blk
793 * interfaces, be sure to record the extended
794 * blk details.
795 */
796 if (!nfit_mem->dcr)
797 nfit_mem->dcr = nfit_dcr->dcr;
798 else if (nfit_mem->dcr->windows == 0
799 && nfit_dcr->dcr->windows)
800 nfit_mem->dcr = nfit_dcr->dcr;
801 break;
802 }
803
ad9ac5e1 804 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
e5ae3b25
DW
805 struct acpi_nfit_flush_address *flush;
806 u16 i;
807
ad9ac5e1
DW
808 if (nfit_flush->flush->device_handle != device_handle)
809 continue;
810 nfit_mem->nfit_flush = nfit_flush;
e5ae3b25
DW
811 flush = nfit_flush->flush;
812 nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
813 flush->hint_count
814 * sizeof(struct resource), GFP_KERNEL);
815 if (!nfit_mem->flush_wpq)
816 return -ENOMEM;
817 for (i = 0; i < flush->hint_count; i++) {
818 struct resource *res = &nfit_mem->flush_wpq[i];
819
820 res->start = flush->hint_address[i];
821 res->end = res->start + 8 - 1;
822 }
ad9ac5e1
DW
823 break;
824 }
825
6697b2cf
DW
826 if (dcr && !nfit_mem->dcr) {
827 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
828 spa->range_index, dcr);
829 return -ENODEV;
b94d5230
DW
830 }
831
832 if (type == NFIT_SPA_DCR) {
047fc8a1
RZ
833 struct nfit_idt *nfit_idt;
834 u16 idt_idx;
835
b94d5230
DW
836 /* multiple dimms may share a SPA when interleaved */
837 nfit_mem->spa_dcr = spa;
838 nfit_mem->memdev_dcr = nfit_memdev->memdev;
047fc8a1
RZ
839 idt_idx = nfit_memdev->memdev->interleave_index;
840 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
841 if (nfit_idt->idt->interleave_index != idt_idx)
842 continue;
843 nfit_mem->idt_dcr = nfit_idt->idt;
844 break;
845 }
6697b2cf 846 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
b94d5230
DW
847 } else {
848 /*
849 * A single dimm may belong to multiple SPA-PM
850 * ranges, record at least one in addition to
851 * any SPA-DCR range.
852 */
853 nfit_mem->memdev_pmem = nfit_memdev->memdev;
854 }
b94d5230
DW
855 }
856
857 return 0;
858}
859
860static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
861{
862 struct nfit_mem *a = container_of(_a, typeof(*a), list);
863 struct nfit_mem *b = container_of(_b, typeof(*b), list);
864 u32 handleA, handleB;
865
866 handleA = __to_nfit_memdev(a)->device_handle;
867 handleB = __to_nfit_memdev(b)->device_handle;
868 if (handleA < handleB)
869 return -1;
870 else if (handleA > handleB)
871 return 1;
872 return 0;
873}
874
875static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
876{
877 struct nfit_spa *nfit_spa;
878
879 /*
880 * For each SPA-DCR or SPA-PMEM address range find its
881 * corresponding MEMDEV(s). From each MEMDEV find the
882 * corresponding DCR. Then, if we're operating on a SPA-DCR,
883 * try to find a SPA-BDW and a corresponding BDW that references
884 * the DCR. Throw it all into an nfit_mem object. Note, that
885 * BDWs are optional.
886 */
887 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
888 int rc;
889
890 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
891 if (rc)
892 return rc;
893 }
894
895 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
896
897 return 0;
898}
899
45def22c
DW
900static ssize_t revision_show(struct device *dev,
901 struct device_attribute *attr, char *buf)
902{
903 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
904 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
905 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
906
6b577c9d 907 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
45def22c
DW
908}
909static DEVICE_ATTR_RO(revision);
910
9ffd6350
VV
911static ssize_t hw_error_scrub_show(struct device *dev,
912 struct device_attribute *attr, char *buf)
913{
914 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
915 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
916 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
917
918 return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
919}
920
921/*
922 * The 'hw_error_scrub' attribute can have the following values written to it:
923 * '0': Switch to the default mode where an exception will only insert
924 * the address of the memory error into the poison and badblocks lists.
925 * '1': Enable a full scrub to happen if an exception for a memory error is
926 * received.
927 */
928static ssize_t hw_error_scrub_store(struct device *dev,
929 struct device_attribute *attr, const char *buf, size_t size)
930{
931 struct nvdimm_bus_descriptor *nd_desc;
932 ssize_t rc;
933 long val;
934
935 rc = kstrtol(buf, 0, &val);
936 if (rc)
937 return rc;
938
939 device_lock(dev);
940 nd_desc = dev_get_drvdata(dev);
941 if (nd_desc) {
942 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
943
944 switch (val) {
945 case HW_ERROR_SCRUB_ON:
946 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
947 break;
948 case HW_ERROR_SCRUB_OFF:
949 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
950 break;
951 default:
952 rc = -EINVAL;
953 break;
954 }
955 }
956 device_unlock(dev);
957 if (rc)
958 return rc;
959 return size;
960}
961static DEVICE_ATTR_RW(hw_error_scrub);
962
37b137ff
VV
963/*
964 * This shows the number of full Address Range Scrubs that have been
965 * completed since driver load time. Userspace can wait on this using
966 * select/poll etc. A '+' at the end indicates an ARS is in progress
967 */
968static ssize_t scrub_show(struct device *dev,
969 struct device_attribute *attr, char *buf)
970{
971 struct nvdimm_bus_descriptor *nd_desc;
972 ssize_t rc = -ENXIO;
973
974 device_lock(dev);
975 nd_desc = dev_get_drvdata(dev);
976 if (nd_desc) {
977 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
978
979 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
980 (work_busy(&acpi_desc->work)) ? "+\n" : "\n");
981 }
982 device_unlock(dev);
983 return rc;
984}
985
37b137ff
VV
986static ssize_t scrub_store(struct device *dev,
987 struct device_attribute *attr, const char *buf, size_t size)
988{
989 struct nvdimm_bus_descriptor *nd_desc;
990 ssize_t rc;
991 long val;
992
993 rc = kstrtol(buf, 0, &val);
994 if (rc)
995 return rc;
996 if (val != 1)
997 return -EINVAL;
998
999 device_lock(dev);
1000 nd_desc = dev_get_drvdata(dev);
1001 if (nd_desc) {
1002 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1003
1004 rc = acpi_nfit_ars_rescan(acpi_desc);
1005 }
1006 device_unlock(dev);
1007 if (rc)
1008 return rc;
1009 return size;
1010}
1011static DEVICE_ATTR_RW(scrub);
1012
1013static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1014{
1015 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1016 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1017 | 1 << ND_CMD_ARS_STATUS;
1018
1019 return (nd_desc->cmd_mask & mask) == mask;
1020}
1021
1022static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1023{
1024 struct device *dev = container_of(kobj, struct device, kobj);
1025 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1026
1027 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1028 return 0;
1029 return a->mode;
1030}
1031
45def22c
DW
1032static struct attribute *acpi_nfit_attributes[] = {
1033 &dev_attr_revision.attr,
37b137ff 1034 &dev_attr_scrub.attr,
9ffd6350 1035 &dev_attr_hw_error_scrub.attr,
45def22c
DW
1036 NULL,
1037};
1038
1039static struct attribute_group acpi_nfit_attribute_group = {
1040 .name = "nfit",
1041 .attrs = acpi_nfit_attributes,
37b137ff 1042 .is_visible = nfit_visible,
45def22c
DW
1043};
1044
a61fe6f7 1045static const struct attribute_group *acpi_nfit_attribute_groups[] = {
45def22c
DW
1046 &nvdimm_bus_attribute_group,
1047 &acpi_nfit_attribute_group,
1048 NULL,
1049};
1050
e6dfb2de
DW
1051static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1052{
1053 struct nvdimm *nvdimm = to_nvdimm(dev);
1054 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1055
1056 return __to_nfit_memdev(nfit_mem);
1057}
1058
1059static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1060{
1061 struct nvdimm *nvdimm = to_nvdimm(dev);
1062 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1063
1064 return nfit_mem->dcr;
1065}
1066
1067static ssize_t handle_show(struct device *dev,
1068 struct device_attribute *attr, char *buf)
1069{
1070 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1071
1072 return sprintf(buf, "%#x\n", memdev->device_handle);
1073}
1074static DEVICE_ATTR_RO(handle);
1075
1076static ssize_t phys_id_show(struct device *dev,
1077 struct device_attribute *attr, char *buf)
1078{
1079 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1080
1081 return sprintf(buf, "%#x\n", memdev->physical_id);
1082}
1083static DEVICE_ATTR_RO(phys_id);
1084
1085static ssize_t vendor_show(struct device *dev,
1086 struct device_attribute *attr, char *buf)
1087{
1088 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1089
5ad9a7fd 1090 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
e6dfb2de
DW
1091}
1092static DEVICE_ATTR_RO(vendor);
1093
1094static ssize_t rev_id_show(struct device *dev,
1095 struct device_attribute *attr, char *buf)
1096{
1097 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1098
5ad9a7fd 1099 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
e6dfb2de
DW
1100}
1101static DEVICE_ATTR_RO(rev_id);
1102
1103static ssize_t device_show(struct device *dev,
1104 struct device_attribute *attr, char *buf)
1105{
1106 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1107
5ad9a7fd 1108 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
e6dfb2de
DW
1109}
1110static DEVICE_ATTR_RO(device);
1111
6ca72085
DW
1112static ssize_t subsystem_vendor_show(struct device *dev,
1113 struct device_attribute *attr, char *buf)
1114{
1115 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1116
1117 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1118}
1119static DEVICE_ATTR_RO(subsystem_vendor);
1120
1121static ssize_t subsystem_rev_id_show(struct device *dev,
1122 struct device_attribute *attr, char *buf)
1123{
1124 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1125
1126 return sprintf(buf, "0x%04x\n",
1127 be16_to_cpu(dcr->subsystem_revision_id));
1128}
1129static DEVICE_ATTR_RO(subsystem_rev_id);
1130
1131static ssize_t subsystem_device_show(struct device *dev,
1132 struct device_attribute *attr, char *buf)
1133{
1134 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1135
1136 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1137}
1138static DEVICE_ATTR_RO(subsystem_device);
1139
8cc6ddfc
DW
1140static int num_nvdimm_formats(struct nvdimm *nvdimm)
1141{
1142 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1143 int formats = 0;
1144
1145 if (nfit_mem->memdev_pmem)
1146 formats++;
1147 if (nfit_mem->memdev_bdw)
1148 formats++;
1149 return formats;
1150}
1151
e6dfb2de
DW
1152static ssize_t format_show(struct device *dev,
1153 struct device_attribute *attr, char *buf)
1154{
1155 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1156
1bcbf42d 1157 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
e6dfb2de
DW
1158}
1159static DEVICE_ATTR_RO(format);
1160
8cc6ddfc
DW
1161static ssize_t format1_show(struct device *dev,
1162 struct device_attribute *attr, char *buf)
1163{
1164 u32 handle;
1165 ssize_t rc = -ENXIO;
1166 struct nfit_mem *nfit_mem;
1167 struct nfit_memdev *nfit_memdev;
1168 struct acpi_nfit_desc *acpi_desc;
1169 struct nvdimm *nvdimm = to_nvdimm(dev);
1170 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1171
1172 nfit_mem = nvdimm_provider_data(nvdimm);
1173 acpi_desc = nfit_mem->acpi_desc;
1174 handle = to_nfit_memdev(dev)->device_handle;
1175
1176 /* assumes DIMMs have at most 2 published interface codes */
1177 mutex_lock(&acpi_desc->init_mutex);
1178 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1179 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1180 struct nfit_dcr *nfit_dcr;
1181
1182 if (memdev->device_handle != handle)
1183 continue;
1184
1185 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1186 if (nfit_dcr->dcr->region_index != memdev->region_index)
1187 continue;
1188 if (nfit_dcr->dcr->code == dcr->code)
1189 continue;
1bcbf42d
DW
1190 rc = sprintf(buf, "0x%04x\n",
1191 le16_to_cpu(nfit_dcr->dcr->code));
8cc6ddfc
DW
1192 break;
1193 }
1194 if (rc != ENXIO)
1195 break;
1196 }
1197 mutex_unlock(&acpi_desc->init_mutex);
1198 return rc;
1199}
1200static DEVICE_ATTR_RO(format1);
1201
1202static ssize_t formats_show(struct device *dev,
1203 struct device_attribute *attr, char *buf)
1204{
1205 struct nvdimm *nvdimm = to_nvdimm(dev);
1206
1207 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1208}
1209static DEVICE_ATTR_RO(formats);
1210
e6dfb2de
DW
1211static ssize_t serial_show(struct device *dev,
1212 struct device_attribute *attr, char *buf)
1213{
1214 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1215
5ad9a7fd 1216 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
e6dfb2de
DW
1217}
1218static DEVICE_ATTR_RO(serial);
1219
a94e3fbe
DW
1220static ssize_t family_show(struct device *dev,
1221 struct device_attribute *attr, char *buf)
1222{
1223 struct nvdimm *nvdimm = to_nvdimm(dev);
1224 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1225
1226 if (nfit_mem->family < 0)
1227 return -ENXIO;
1228 return sprintf(buf, "%d\n", nfit_mem->family);
1229}
1230static DEVICE_ATTR_RO(family);
1231
1232static ssize_t dsm_mask_show(struct device *dev,
1233 struct device_attribute *attr, char *buf)
1234{
1235 struct nvdimm *nvdimm = to_nvdimm(dev);
1236 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1237
1238 if (nfit_mem->family < 0)
1239 return -ENXIO;
1240 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1241}
1242static DEVICE_ATTR_RO(dsm_mask);
1243
58138820
DW
1244static ssize_t flags_show(struct device *dev,
1245 struct device_attribute *attr, char *buf)
1246{
1247 u16 flags = to_nfit_memdev(dev)->flags;
1248
1249 return sprintf(buf, "%s%s%s%s%s\n",
402bae59
TK
1250 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1251 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1252 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
ca321d1c 1253 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
402bae59 1254 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
58138820
DW
1255}
1256static DEVICE_ATTR_RO(flags);
1257
38a879ba
TK
1258static ssize_t id_show(struct device *dev,
1259 struct device_attribute *attr, char *buf)
1260{
1261 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1262
1263 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1264 return sprintf(buf, "%04x-%02x-%04x-%08x\n",
1265 be16_to_cpu(dcr->vendor_id),
1266 dcr->manufacturing_location,
1267 be16_to_cpu(dcr->manufacturing_date),
1268 be32_to_cpu(dcr->serial_number));
1269 else
1270 return sprintf(buf, "%04x-%08x\n",
1271 be16_to_cpu(dcr->vendor_id),
1272 be32_to_cpu(dcr->serial_number));
1273}
1274static DEVICE_ATTR_RO(id);
1275
e6dfb2de
DW
1276static struct attribute *acpi_nfit_dimm_attributes[] = {
1277 &dev_attr_handle.attr,
1278 &dev_attr_phys_id.attr,
1279 &dev_attr_vendor.attr,
1280 &dev_attr_device.attr,
6ca72085
DW
1281 &dev_attr_rev_id.attr,
1282 &dev_attr_subsystem_vendor.attr,
1283 &dev_attr_subsystem_device.attr,
1284 &dev_attr_subsystem_rev_id.attr,
e6dfb2de 1285 &dev_attr_format.attr,
8cc6ddfc
DW
1286 &dev_attr_formats.attr,
1287 &dev_attr_format1.attr,
e6dfb2de 1288 &dev_attr_serial.attr,
58138820 1289 &dev_attr_flags.attr,
38a879ba 1290 &dev_attr_id.attr,
a94e3fbe
DW
1291 &dev_attr_family.attr,
1292 &dev_attr_dsm_mask.attr,
e6dfb2de
DW
1293 NULL,
1294};
1295
1296static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1297 struct attribute *a, int n)
1298{
1299 struct device *dev = container_of(kobj, struct device, kobj);
8cc6ddfc 1300 struct nvdimm *nvdimm = to_nvdimm(dev);
e6dfb2de 1301
8cc6ddfc
DW
1302 if (!to_nfit_dcr(dev))
1303 return 0;
1304 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
e6dfb2de 1305 return 0;
8cc6ddfc 1306 return a->mode;
e6dfb2de
DW
1307}
1308
1309static struct attribute_group acpi_nfit_dimm_attribute_group = {
1310 .name = "nfit",
1311 .attrs = acpi_nfit_dimm_attributes,
1312 .is_visible = acpi_nfit_dimm_attr_visible,
1313};
1314
1315static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
62232e45 1316 &nvdimm_attribute_group,
4d88a97a 1317 &nd_device_attribute_group,
e6dfb2de
DW
1318 &acpi_nfit_dimm_attribute_group,
1319 NULL,
1320};
1321
1322static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1323 u32 device_handle)
1324{
1325 struct nfit_mem *nfit_mem;
1326
1327 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1328 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1329 return nfit_mem->nvdimm;
1330
1331 return NULL;
1332}
1333
231bf117 1334void __acpi_nvdimm_notify(struct device *dev, u32 event)
ba9c8dd3
DW
1335{
1336 struct nfit_mem *nfit_mem;
1337 struct acpi_nfit_desc *acpi_desc;
1338
1339 dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
1340 event);
1341
1342 if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1343 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1344 event);
1345 return;
1346 }
1347
1348 acpi_desc = dev_get_drvdata(dev->parent);
1349 if (!acpi_desc)
1350 return;
1351
1352 /*
1353 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1354 * is still valid.
1355 */
1356 nfit_mem = dev_get_drvdata(dev);
1357 if (nfit_mem && nfit_mem->flags_attr)
1358 sysfs_notify_dirent(nfit_mem->flags_attr);
1359}
231bf117 1360EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
ba9c8dd3
DW
1361
1362static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1363{
1364 struct acpi_device *adev = data;
1365 struct device *dev = &adev->dev;
1366
1367 device_lock(dev->parent);
1368 __acpi_nvdimm_notify(dev, event);
1369 device_unlock(dev->parent);
1370}
1371
62232e45
DW
1372static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1373 struct nfit_mem *nfit_mem, u32 device_handle)
1374{
1375 struct acpi_device *adev, *adev_dimm;
1376 struct device *dev = acpi_desc->dev;
31eca76b
DW
1377 unsigned long dsm_mask;
1378 const u8 *uuid;
60e95f43 1379 int i;
ba650cfc 1380 int family = -1;
62232e45 1381
e3654eca
DW
1382 /* nfit test assumes 1:1 relationship between commands and dsms */
1383 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
31eca76b 1384 nfit_mem->family = NVDIMM_FAMILY_INTEL;
62232e45
DW
1385 adev = to_acpi_dev(acpi_desc);
1386 if (!adev)
1387 return 0;
1388
1389 adev_dimm = acpi_find_child_device(adev, device_handle, false);
1390 nfit_mem->adev = adev_dimm;
1391 if (!adev_dimm) {
1392 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1393 device_handle);
4d88a97a 1394 return force_enable_dimms ? 0 : -ENODEV;
62232e45
DW
1395 }
1396
ba9c8dd3
DW
1397 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1398 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1399 dev_err(dev, "%s: notification registration failed\n",
1400 dev_name(&adev_dimm->dev));
1401 return -ENXIO;
1402 }
1403
31eca76b 1404 /*
e02fb726 1405 * Until standardization materializes we need to consider 4
a7225598
DW
1406 * different command sets. Note, that checking for function0 (bit0)
1407 * tells us if any commands are reachable through this uuid.
31eca76b 1408 */
e02fb726 1409 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
a7225598 1410 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
ba650cfc
LK
1411 if (family < 0 || i == default_dsm_family)
1412 family = i;
31eca76b
DW
1413
1414 /* limit the supported commands to those that are publicly documented */
ba650cfc 1415 nfit_mem->family = family;
095ab4b3
LK
1416 if (override_dsm_mask && !disable_vendor_specific)
1417 dsm_mask = override_dsm_mask;
1418 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
31eca76b 1419 dsm_mask = 0x3fe;
87554098
DW
1420 if (disable_vendor_specific)
1421 dsm_mask &= ~(1 << ND_CMD_VENDOR);
e02fb726 1422 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
31eca76b 1423 dsm_mask = 0x1c3c76;
e02fb726 1424 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
31eca76b 1425 dsm_mask = 0x1fe;
87554098
DW
1426 if (disable_vendor_specific)
1427 dsm_mask &= ~(1 << 8);
e02fb726 1428 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1429 dsm_mask = 0xffffffff;
87554098 1430 } else {
a7225598 1431 dev_dbg(dev, "unknown dimm command family\n");
31eca76b 1432 nfit_mem->family = -1;
a7225598
DW
1433 /* DSMs are optional, continue loading the driver... */
1434 return 0;
31eca76b
DW
1435 }
1436
1437 uuid = to_nfit_uuid(nfit_mem->family);
1438 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
62232e45
DW
1439 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
1440 set_bit(i, &nfit_mem->dsm_mask);
1441
60e95f43 1442 return 0;
62232e45
DW
1443}
1444
ba9c8dd3
DW
1445static void shutdown_dimm_notify(void *data)
1446{
1447 struct acpi_nfit_desc *acpi_desc = data;
1448 struct nfit_mem *nfit_mem;
1449
1450 mutex_lock(&acpi_desc->init_mutex);
1451 /*
1452 * Clear out the nfit_mem->flags_attr and shut down dimm event
1453 * notifications.
1454 */
1455 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
231bf117
DW
1456 struct acpi_device *adev_dimm = nfit_mem->adev;
1457
ba9c8dd3
DW
1458 if (nfit_mem->flags_attr) {
1459 sysfs_put(nfit_mem->flags_attr);
1460 nfit_mem->flags_attr = NULL;
1461 }
231bf117
DW
1462 if (adev_dimm)
1463 acpi_remove_notify_handler(adev_dimm->handle,
1464 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
ba9c8dd3
DW
1465 }
1466 mutex_unlock(&acpi_desc->init_mutex);
1467}
1468
e6dfb2de
DW
1469static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1470{
1471 struct nfit_mem *nfit_mem;
ba9c8dd3
DW
1472 int dimm_count = 0, rc;
1473 struct nvdimm *nvdimm;
e6dfb2de
DW
1474
1475 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
e5ae3b25 1476 struct acpi_nfit_flush_address *flush;
31eca76b 1477 unsigned long flags = 0, cmd_mask;
e6dfb2de 1478 u32 device_handle;
58138820 1479 u16 mem_flags;
e6dfb2de
DW
1480
1481 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1482 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1483 if (nvdimm) {
20985164 1484 dimm_count++;
e6dfb2de
DW
1485 continue;
1486 }
1487
1488 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1489 flags |= NDD_ALIASING;
1490
58138820 1491 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
ca321d1c 1492 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
58138820
DW
1493 flags |= NDD_UNARMED;
1494
62232e45
DW
1495 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1496 if (rc)
1497 continue;
1498
e3654eca 1499 /*
31eca76b
DW
1500 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1501 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1502 * userspace interface.
e3654eca 1503 */
31eca76b
DW
1504 cmd_mask = 1UL << ND_CMD_CALL;
1505 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1506 cmd_mask |= nfit_mem->dsm_mask;
1507
e5ae3b25
DW
1508 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
1509 : NULL;
e6dfb2de 1510 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
62232e45 1511 acpi_nfit_dimm_attribute_groups,
e5ae3b25
DW
1512 flags, cmd_mask, flush ? flush->hint_count : 0,
1513 nfit_mem->flush_wpq);
e6dfb2de
DW
1514 if (!nvdimm)
1515 return -ENOMEM;
1516
1517 nfit_mem->nvdimm = nvdimm;
4d88a97a 1518 dimm_count++;
58138820
DW
1519
1520 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
1521 continue;
1522
402bae59 1523 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
58138820 1524 nvdimm_name(nvdimm),
402bae59
TK
1525 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
1526 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
1527 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
ca321d1c 1528 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
58138820 1529
e6dfb2de
DW
1530 }
1531
ba9c8dd3
DW
1532 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
1533 if (rc)
1534 return rc;
1535
1536 /*
1537 * Now that dimms are successfully registered, and async registration
1538 * is flushed, attempt to enable event notification.
1539 */
1540 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1541 struct kernfs_node *nfit_kernfs;
1542
1543 nvdimm = nfit_mem->nvdimm;
1544 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
1545 if (nfit_kernfs)
1546 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
1547 "flags");
1548 sysfs_put(nfit_kernfs);
1549 if (!nfit_mem->flags_attr)
1550 dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
1551 nvdimm_name(nvdimm));
1552 }
1553
1554 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
1555 acpi_desc);
e6dfb2de
DW
1556}
1557
62232e45
DW
1558static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
1559{
1560 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1561 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
1562 struct acpi_device *adev;
1563 int i;
1564
e3654eca 1565 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
62232e45
DW
1566 adev = to_acpi_dev(acpi_desc);
1567 if (!adev)
1568 return;
1569
d4f32367 1570 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
62232e45 1571 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
e3654eca 1572 set_bit(i, &nd_desc->cmd_mask);
62232e45
DW
1573}
1574
1f7df6f8
DW
1575static ssize_t range_index_show(struct device *dev,
1576 struct device_attribute *attr, char *buf)
1577{
1578 struct nd_region *nd_region = to_nd_region(dev);
1579 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1580
1581 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1582}
1583static DEVICE_ATTR_RO(range_index);
1584
1585static struct attribute *acpi_nfit_region_attributes[] = {
1586 &dev_attr_range_index.attr,
1587 NULL,
1588};
1589
1590static struct attribute_group acpi_nfit_region_attribute_group = {
1591 .name = "nfit",
1592 .attrs = acpi_nfit_region_attributes,
1593};
1594
1595static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
1596 &nd_region_attribute_group,
1597 &nd_mapping_attribute_group,
3d88002e 1598 &nd_device_attribute_group,
74ae66c3 1599 &nd_numa_attribute_group,
1f7df6f8
DW
1600 &acpi_nfit_region_attribute_group,
1601 NULL,
1602};
1603
eaf96153
DW
1604/* enough info to uniquely specify an interleave set */
1605struct nfit_set_info {
1606 struct nfit_set_info_map {
1607 u64 region_offset;
1608 u32 serial_number;
1609 u32 pad;
1610 } mapping[0];
1611};
1612
1613static size_t sizeof_nfit_set_info(int num_mappings)
1614{
1615 return sizeof(struct nfit_set_info)
1616 + num_mappings * sizeof(struct nfit_set_info_map);
1617}
1618
86ef58a4 1619static int cmp_map_compat(const void *m0, const void *m1)
eaf96153
DW
1620{
1621 const struct nfit_set_info_map *map0 = m0;
1622 const struct nfit_set_info_map *map1 = m1;
1623
1624 return memcmp(&map0->region_offset, &map1->region_offset,
1625 sizeof(u64));
1626}
1627
86ef58a4
DW
1628static int cmp_map(const void *m0, const void *m1)
1629{
1630 const struct nfit_set_info_map *map0 = m0;
1631 const struct nfit_set_info_map *map1 = m1;
1632
b03b99a3
DW
1633 if (map0->region_offset < map1->region_offset)
1634 return -1;
1635 else if (map0->region_offset > map1->region_offset)
1636 return 1;
1637 return 0;
86ef58a4
DW
1638}
1639
eaf96153
DW
1640/* Retrieve the nth entry referencing this spa */
1641static struct acpi_nfit_memory_map *memdev_from_spa(
1642 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
1643{
1644 struct nfit_memdev *nfit_memdev;
1645
1646 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
1647 if (nfit_memdev->memdev->range_index == range_index)
1648 if (n-- == 0)
1649 return nfit_memdev->memdev;
1650 return NULL;
1651}
1652
1653static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
1654 struct nd_region_desc *ndr_desc,
1655 struct acpi_nfit_system_address *spa)
1656{
1657 int i, spa_type = nfit_spa_type(spa);
1658 struct device *dev = acpi_desc->dev;
1659 struct nd_interleave_set *nd_set;
1660 u16 nr = ndr_desc->num_mappings;
1661 struct nfit_set_info *info;
1662
1663 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
1664 /* pass */;
1665 else
1666 return 0;
1667
1668 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
1669 if (!nd_set)
1670 return -ENOMEM;
1671
1672 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
1673 if (!info)
1674 return -ENOMEM;
1675 for (i = 0; i < nr; i++) {
44c462eb 1676 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
eaf96153 1677 struct nfit_set_info_map *map = &info->mapping[i];
44c462eb 1678 struct nvdimm *nvdimm = mapping->nvdimm;
eaf96153
DW
1679 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1680 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
1681 spa->range_index, i);
1682
1683 if (!memdev || !nfit_mem->dcr) {
1684 dev_err(dev, "%s: failed to find DCR\n", __func__);
1685 return -ENODEV;
1686 }
1687
1688 map->region_offset = memdev->region_offset;
1689 map->serial_number = nfit_mem->dcr->serial_number;
1690 }
1691
1692 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1693 cmp_map, NULL);
1694 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
86ef58a4
DW
1695
1696 /* support namespaces created with the wrong sort order */
1697 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1698 cmp_map_compat, NULL);
1699 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1700
eaf96153
DW
1701 ndr_desc->nd_set = nd_set;
1702 devm_kfree(dev, info);
1703
1704 return 0;
1705}
1706
047fc8a1
RZ
1707static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1708{
1709 struct acpi_nfit_interleave *idt = mmio->idt;
1710 u32 sub_line_offset, line_index, line_offset;
1711 u64 line_no, table_skip_count, table_offset;
1712
1713 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
1714 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
1715 line_offset = idt->line_offset[line_index]
1716 * mmio->line_size;
1717 table_offset = table_skip_count * mmio->table_size;
1718
1719 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1720}
1721
de4a196c 1722static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
047fc8a1
RZ
1723{
1724 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1725 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
68202c9f 1726 const u32 STATUS_MASK = 0x80000037;
047fc8a1
RZ
1727
1728 if (mmio->num_lines)
1729 offset = to_interleave_offset(offset, mmio);
1730
68202c9f 1731 return readl(mmio->addr.base + offset) & STATUS_MASK;
047fc8a1
RZ
1732}
1733
1734static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1735 resource_size_t dpa, unsigned int len, unsigned int write)
1736{
1737 u64 cmd, offset;
1738 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1739
1740 enum {
1741 BCW_OFFSET_MASK = (1ULL << 48)-1,
1742 BCW_LEN_SHIFT = 48,
1743 BCW_LEN_MASK = (1ULL << 8) - 1,
1744 BCW_CMD_SHIFT = 56,
1745 };
1746
1747 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1748 len = len >> L1_CACHE_SHIFT;
1749 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1750 cmd |= ((u64) write) << BCW_CMD_SHIFT;
1751
1752 offset = nfit_blk->cmd_offset + mmio->size * bw;
1753 if (mmio->num_lines)
1754 offset = to_interleave_offset(offset, mmio);
1755
67a3e8fe 1756 writeq(cmd, mmio->addr.base + offset);
f284a4f2 1757 nvdimm_flush(nfit_blk->nd_region);
f0f2c072 1758
aef25338 1759 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
67a3e8fe 1760 readq(mmio->addr.base + offset);
047fc8a1
RZ
1761}
1762
1763static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1764 resource_size_t dpa, void *iobuf, size_t len, int rw,
1765 unsigned int lane)
1766{
1767 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1768 unsigned int copied = 0;
1769 u64 base_offset;
1770 int rc;
1771
1772 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1773 + lane * mmio->size;
047fc8a1
RZ
1774 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1775 while (len) {
1776 unsigned int c;
1777 u64 offset;
1778
1779 if (mmio->num_lines) {
1780 u32 line_offset;
1781
1782 offset = to_interleave_offset(base_offset + copied,
1783 mmio);
1784 div_u64_rem(offset, mmio->line_size, &line_offset);
1785 c = min_t(size_t, len, mmio->line_size - line_offset);
1786 } else {
1787 offset = base_offset + nfit_blk->bdw_offset;
1788 c = len;
1789 }
1790
1791 if (rw)
67a3e8fe 1792 memcpy_to_pmem(mmio->addr.aperture + offset,
c2ad2954 1793 iobuf + copied, c);
67a3e8fe 1794 else {
aef25338 1795 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
67a3e8fe
RZ
1796 mmio_flush_range((void __force *)
1797 mmio->addr.aperture + offset, c);
1798
c2ad2954 1799 memcpy_from_pmem(iobuf + copied,
67a3e8fe
RZ
1800 mmio->addr.aperture + offset, c);
1801 }
047fc8a1
RZ
1802
1803 copied += c;
1804 len -= c;
1805 }
c2ad2954
RZ
1806
1807 if (rw)
f284a4f2 1808 nvdimm_flush(nfit_blk->nd_region);
c2ad2954 1809
047fc8a1
RZ
1810 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1811 return rc;
1812}
1813
1814static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1815 resource_size_t dpa, void *iobuf, u64 len, int rw)
1816{
1817 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1818 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1819 struct nd_region *nd_region = nfit_blk->nd_region;
1820 unsigned int lane, copied = 0;
1821 int rc = 0;
1822
1823 lane = nd_region_acquire_lane(nd_region);
1824 while (len) {
1825 u64 c = min(len, mmio->size);
1826
1827 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1828 iobuf + copied, c, rw, lane);
1829 if (rc)
1830 break;
1831
1832 copied += c;
1833 len -= c;
1834 }
1835 nd_region_release_lane(nd_region, lane);
1836
1837 return rc;
1838}
1839
047fc8a1
RZ
1840static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1841 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1842{
1843 if (idt) {
1844 mmio->num_lines = idt->line_count;
1845 mmio->line_size = idt->line_size;
1846 if (interleave_ways == 0)
1847 return -ENXIO;
1848 mmio->table_size = mmio->num_lines * interleave_ways
1849 * mmio->line_size;
1850 }
1851
1852 return 0;
1853}
1854
f0f2c072
RZ
1855static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1856 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1857{
1858 struct nd_cmd_dimm_flags flags;
1859 int rc;
1860
1861 memset(&flags, 0, sizeof(flags));
1862 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
aef25338 1863 sizeof(flags), NULL);
f0f2c072
RZ
1864
1865 if (rc >= 0 && flags.status == 0)
1866 nfit_blk->dimm_flags = flags.flags;
1867 else if (rc == -ENOTTY) {
1868 /* fall back to a conservative default */
aef25338 1869 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
f0f2c072
RZ
1870 rc = 0;
1871 } else
1872 rc = -ENXIO;
1873
1874 return rc;
1875}
1876
047fc8a1
RZ
1877static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1878 struct device *dev)
1879{
1880 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
047fc8a1
RZ
1881 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1882 struct nfit_blk_mmio *mmio;
1883 struct nfit_blk *nfit_blk;
1884 struct nfit_mem *nfit_mem;
1885 struct nvdimm *nvdimm;
1886 int rc;
1887
1888 nvdimm = nd_blk_region_to_dimm(ndbr);
1889 nfit_mem = nvdimm_provider_data(nvdimm);
1890 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1891 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1892 nfit_mem ? "" : " nfit_mem",
193ccca4
DW
1893 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1894 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
047fc8a1
RZ
1895 return -ENXIO;
1896 }
1897
1898 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1899 if (!nfit_blk)
1900 return -ENOMEM;
1901 nd_blk_region_set_provider_data(ndbr, nfit_blk);
1902 nfit_blk->nd_region = to_nd_region(dev);
1903
1904 /* map block aperture memory */
1905 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1906 mmio = &nfit_blk->mmio[BDW];
29b9aa0a
DW
1907 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
1908 nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM);
67a3e8fe 1909 if (!mmio->addr.base) {
047fc8a1
RZ
1910 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1911 nvdimm_name(nvdimm));
1912 return -ENOMEM;
1913 }
1914 mmio->size = nfit_mem->bdw->size;
1915 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1916 mmio->idt = nfit_mem->idt_bdw;
1917 mmio->spa = nfit_mem->spa_bdw;
1918 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1919 nfit_mem->memdev_bdw->interleave_ways);
1920 if (rc) {
1921 dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1922 __func__, nvdimm_name(nvdimm));
1923 return rc;
1924 }
1925
1926 /* map block control memory */
1927 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1928 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1929 mmio = &nfit_blk->mmio[DCR];
29b9aa0a
DW
1930 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
1931 nfit_mem->spa_dcr->length);
67a3e8fe 1932 if (!mmio->addr.base) {
047fc8a1
RZ
1933 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1934 nvdimm_name(nvdimm));
1935 return -ENOMEM;
1936 }
1937 mmio->size = nfit_mem->dcr->window_size;
1938 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1939 mmio->idt = nfit_mem->idt_dcr;
1940 mmio->spa = nfit_mem->spa_dcr;
1941 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1942 nfit_mem->memdev_dcr->interleave_ways);
1943 if (rc) {
1944 dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1945 __func__, nvdimm_name(nvdimm));
1946 return rc;
1947 }
1948
f0f2c072
RZ
1949 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1950 if (rc < 0) {
1951 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1952 __func__, nvdimm_name(nvdimm));
1953 return rc;
1954 }
1955
f284a4f2 1956 if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
c2ad2954
RZ
1957 dev_warn(dev, "unable to guarantee persistence of writes\n");
1958
047fc8a1
RZ
1959 if (mmio->line_size == 0)
1960 return 0;
1961
1962 if ((u32) nfit_blk->cmd_offset % mmio->line_size
1963 + 8 > mmio->line_size) {
1964 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
1965 return -ENXIO;
1966 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
1967 + 8 > mmio->line_size) {
1968 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
1969 return -ENXIO;
1970 }
1971
1972 return 0;
1973}
1974
aef25338 1975static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
1cf03c00 1976 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
0caeef63 1977{
aef25338 1978 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1cf03c00 1979 struct acpi_nfit_system_address *spa = nfit_spa->spa;
aef25338
DW
1980 int cmd_rc, rc;
1981
1cf03c00
DW
1982 cmd->address = spa->address;
1983 cmd->length = spa->length;
aef25338
DW
1984 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
1985 sizeof(*cmd), &cmd_rc);
1986 if (rc < 0)
1987 return rc;
1cf03c00 1988 return cmd_rc;
0caeef63
VV
1989}
1990
1cf03c00 1991static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
0caeef63
VV
1992{
1993 int rc;
1cf03c00
DW
1994 int cmd_rc;
1995 struct nd_cmd_ars_start ars_start;
1996 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1997 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
0caeef63 1998
1cf03c00
DW
1999 memset(&ars_start, 0, sizeof(ars_start));
2000 ars_start.address = spa->address;
2001 ars_start.length = spa->length;
2002 if (nfit_spa_type(spa) == NFIT_SPA_PM)
2003 ars_start.type = ND_ARS_PERSISTENT;
2004 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2005 ars_start.type = ND_ARS_VOLATILE;
2006 else
2007 return -ENOTTY;
aef25338 2008
1cf03c00
DW
2009 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2010 sizeof(ars_start), &cmd_rc);
aef25338 2011
1cf03c00
DW
2012 if (rc < 0)
2013 return rc;
2014 return cmd_rc;
0caeef63
VV
2015}
2016
1cf03c00 2017static int ars_continue(struct acpi_nfit_desc *acpi_desc)
0caeef63 2018{
aef25338 2019 int rc, cmd_rc;
1cf03c00
DW
2020 struct nd_cmd_ars_start ars_start;
2021 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2022 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2023
2024 memset(&ars_start, 0, sizeof(ars_start));
2025 ars_start.address = ars_status->restart_address;
2026 ars_start.length = ars_status->restart_length;
2027 ars_start.type = ars_status->type;
2028 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2029 sizeof(ars_start), &cmd_rc);
2030 if (rc < 0)
2031 return rc;
2032 return cmd_rc;
2033}
0caeef63 2034
1cf03c00
DW
2035static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2036{
2037 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2038 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2039 int rc, cmd_rc;
aef25338 2040
1cf03c00
DW
2041 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2042 acpi_desc->ars_status_size, &cmd_rc);
2043 if (rc < 0)
2044 return rc;
2045 return cmd_rc;
0caeef63
VV
2046}
2047
82aa37cf 2048static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
1cf03c00 2049 struct nd_cmd_ars_status *ars_status)
0caeef63 2050{
82aa37cf 2051 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
0caeef63
VV
2052 int rc;
2053 u32 i;
2054
82aa37cf
DW
2055 /*
2056 * First record starts at 44 byte offset from the start of the
2057 * payload.
2058 */
2059 if (ars_status->out_length < 44)
2060 return 0;
0caeef63 2061 for (i = 0; i < ars_status->num_records; i++) {
82aa37cf
DW
2062 /* only process full records */
2063 if (ars_status->out_length
2064 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2065 break;
0caeef63
VV
2066 rc = nvdimm_bus_add_poison(nvdimm_bus,
2067 ars_status->records[i].err_address,
2068 ars_status->records[i].length);
2069 if (rc)
2070 return rc;
2071 }
82aa37cf
DW
2072 if (i < ars_status->num_records)
2073 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
0caeef63
VV
2074
2075 return 0;
2076}
2077
af1996ef
TK
2078static void acpi_nfit_remove_resource(void *data)
2079{
2080 struct resource *res = data;
2081
2082 remove_resource(res);
2083}
2084
2085static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2086 struct nd_region_desc *ndr_desc)
2087{
2088 struct resource *res, *nd_res = ndr_desc->res;
2089 int is_pmem, ret;
2090
2091 /* No operation if the region is already registered as PMEM */
2092 is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2093 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2094 if (is_pmem == REGION_INTERSECTS)
2095 return 0;
2096
2097 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2098 if (!res)
2099 return -ENOMEM;
2100
2101 res->name = "Persistent Memory";
2102 res->start = nd_res->start;
2103 res->end = nd_res->end;
2104 res->flags = IORESOURCE_MEM;
2105 res->desc = IORES_DESC_PERSISTENT_MEMORY;
2106
2107 ret = insert_resource(&iomem_resource, res);
2108 if (ret)
2109 return ret;
2110
d932dd2c
SV
2111 ret = devm_add_action_or_reset(acpi_desc->dev,
2112 acpi_nfit_remove_resource,
2113 res);
2114 if (ret)
af1996ef 2115 return ret;
af1996ef
TK
2116
2117 return 0;
2118}
2119
1f7df6f8 2120static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
44c462eb 2121 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
1f7df6f8 2122 struct acpi_nfit_memory_map *memdev,
1cf03c00 2123 struct nfit_spa *nfit_spa)
1f7df6f8
DW
2124{
2125 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2126 memdev->device_handle);
1cf03c00 2127 struct acpi_nfit_system_address *spa = nfit_spa->spa;
047fc8a1 2128 struct nd_blk_region_desc *ndbr_desc;
1f7df6f8
DW
2129 struct nfit_mem *nfit_mem;
2130 int blk_valid = 0;
2131
2132 if (!nvdimm) {
2133 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2134 spa->range_index, memdev->device_handle);
2135 return -ENODEV;
2136 }
2137
44c462eb 2138 mapping->nvdimm = nvdimm;
1f7df6f8
DW
2139 switch (nfit_spa_type(spa)) {
2140 case NFIT_SPA_PM:
2141 case NFIT_SPA_VOLATILE:
44c462eb
DW
2142 mapping->start = memdev->address;
2143 mapping->size = memdev->region_size;
1f7df6f8
DW
2144 break;
2145 case NFIT_SPA_DCR:
2146 nfit_mem = nvdimm_provider_data(nvdimm);
2147 if (!nfit_mem || !nfit_mem->bdw) {
2148 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2149 spa->range_index, nvdimm_name(nvdimm));
2150 } else {
44c462eb
DW
2151 mapping->size = nfit_mem->bdw->capacity;
2152 mapping->start = nfit_mem->bdw->start_address;
5212e11f 2153 ndr_desc->num_lanes = nfit_mem->bdw->windows;
1f7df6f8
DW
2154 blk_valid = 1;
2155 }
2156
44c462eb 2157 ndr_desc->mapping = mapping;
1f7df6f8 2158 ndr_desc->num_mappings = blk_valid;
047fc8a1
RZ
2159 ndbr_desc = to_blk_region_desc(ndr_desc);
2160 ndbr_desc->enable = acpi_nfit_blk_region_enable;
6bc75619 2161 ndbr_desc->do_io = acpi_desc->blk_do_io;
1cf03c00
DW
2162 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2163 ndr_desc);
2164 if (!nfit_spa->nd_region)
1f7df6f8
DW
2165 return -ENOMEM;
2166 break;
2167 }
2168
2169 return 0;
2170}
2171
c2f32acd
LCY
2172static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2173{
2174 return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2175 nfit_spa_type(spa) == NFIT_SPA_VCD ||
2176 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2177 nfit_spa_type(spa) == NFIT_SPA_PCD);
2178}
2179
1f7df6f8
DW
2180static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2181 struct nfit_spa *nfit_spa)
2182{
44c462eb 2183 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
1f7df6f8 2184 struct acpi_nfit_system_address *spa = nfit_spa->spa;
047fc8a1
RZ
2185 struct nd_blk_region_desc ndbr_desc;
2186 struct nd_region_desc *ndr_desc;
1f7df6f8 2187 struct nfit_memdev *nfit_memdev;
1f7df6f8
DW
2188 struct nvdimm_bus *nvdimm_bus;
2189 struct resource res;
eaf96153 2190 int count = 0, rc;
1f7df6f8 2191
1cf03c00 2192 if (nfit_spa->nd_region)
20985164
VV
2193 return 0;
2194
c2f32acd 2195 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
1f7df6f8
DW
2196 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
2197 __func__);
2198 return 0;
2199 }
2200
2201 memset(&res, 0, sizeof(res));
44c462eb 2202 memset(&mappings, 0, sizeof(mappings));
047fc8a1 2203 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
1f7df6f8
DW
2204 res.start = spa->address;
2205 res.end = res.start + spa->length - 1;
047fc8a1
RZ
2206 ndr_desc = &ndbr_desc.ndr_desc;
2207 ndr_desc->res = &res;
2208 ndr_desc->provider_data = nfit_spa;
2209 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
41d7a6d6
TK
2210 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2211 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2212 spa->proximity_domain);
2213 else
2214 ndr_desc->numa_node = NUMA_NO_NODE;
2215
1f7df6f8
DW
2216 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2217 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
44c462eb 2218 struct nd_mapping_desc *mapping;
1f7df6f8
DW
2219
2220 if (memdev->range_index != spa->range_index)
2221 continue;
2222 if (count >= ND_MAX_MAPPINGS) {
2223 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2224 spa->range_index, ND_MAX_MAPPINGS);
2225 return -ENXIO;
2226 }
44c462eb
DW
2227 mapping = &mappings[count++];
2228 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
1cf03c00 2229 memdev, nfit_spa);
1f7df6f8 2230 if (rc)
1cf03c00 2231 goto out;
1f7df6f8
DW
2232 }
2233
44c462eb 2234 ndr_desc->mapping = mappings;
047fc8a1
RZ
2235 ndr_desc->num_mappings = count;
2236 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
eaf96153 2237 if (rc)
1cf03c00 2238 goto out;
eaf96153 2239
1f7df6f8
DW
2240 nvdimm_bus = acpi_desc->nvdimm_bus;
2241 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
af1996ef 2242 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
48901165 2243 if (rc) {
af1996ef
TK
2244 dev_warn(acpi_desc->dev,
2245 "failed to insert pmem resource to iomem: %d\n",
2246 rc);
48901165 2247 goto out;
0caeef63 2248 }
48901165 2249
1cf03c00
DW
2250 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2251 ndr_desc);
2252 if (!nfit_spa->nd_region)
2253 rc = -ENOMEM;
1f7df6f8 2254 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
1cf03c00
DW
2255 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2256 ndr_desc);
2257 if (!nfit_spa->nd_region)
2258 rc = -ENOMEM;
c2f32acd
LCY
2259 } else if (nfit_spa_is_virtual(spa)) {
2260 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2261 ndr_desc);
2262 if (!nfit_spa->nd_region)
2263 rc = -ENOMEM;
1f7df6f8 2264 }
20985164 2265
1cf03c00
DW
2266 out:
2267 if (rc)
2268 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2269 nfit_spa->spa->range_index);
2270 return rc;
2271}
2272
2273static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
2274 u32 max_ars)
2275{
2276 struct device *dev = acpi_desc->dev;
2277 struct nd_cmd_ars_status *ars_status;
2278
2279 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
2280 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
2281 return 0;
2282 }
2283
2284 if (acpi_desc->ars_status)
2285 devm_kfree(dev, acpi_desc->ars_status);
2286 acpi_desc->ars_status = NULL;
2287 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
2288 if (!ars_status)
2289 return -ENOMEM;
2290 acpi_desc->ars_status = ars_status;
2291 acpi_desc->ars_status_size = max_ars;
1f7df6f8
DW
2292 return 0;
2293}
2294
1cf03c00
DW
2295static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
2296 struct nfit_spa *nfit_spa)
2297{
2298 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2299 int rc;
2300
2301 if (!nfit_spa->max_ars) {
2302 struct nd_cmd_ars_cap ars_cap;
2303
2304 memset(&ars_cap, 0, sizeof(ars_cap));
2305 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2306 if (rc < 0)
2307 return rc;
2308 nfit_spa->max_ars = ars_cap.max_ars_out;
2309 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2310 /* check that the supported scrub types match the spa type */
2311 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
2312 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
2313 return -ENOTTY;
2314 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
2315 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
2316 return -ENOTTY;
2317 }
2318
2319 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
2320 return -ENOMEM;
2321
2322 rc = ars_get_status(acpi_desc);
2323 if (rc < 0 && rc != -ENOSPC)
2324 return rc;
2325
82aa37cf 2326 if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
1cf03c00
DW
2327 return -ENOMEM;
2328
2329 return 0;
2330}
2331
2332static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
2333 struct nfit_spa *nfit_spa)
2334{
2335 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2336 unsigned int overflow_retry = scrub_overflow_abort;
2337 u64 init_ars_start = 0, init_ars_len = 0;
2338 struct device *dev = acpi_desc->dev;
2339 unsigned int tmo = scrub_timeout;
2340 int rc;
2341
37b137ff 2342 if (!nfit_spa->ars_required || !nfit_spa->nd_region)
1cf03c00
DW
2343 return;
2344
2345 rc = ars_start(acpi_desc, nfit_spa);
2346 /*
2347 * If we timed out the initial scan we'll still be busy here,
2348 * and will wait another timeout before giving up permanently.
2349 */
2350 if (rc < 0 && rc != -EBUSY)
2351 return;
2352
2353 do {
2354 u64 ars_start, ars_len;
2355
2356 if (acpi_desc->cancel)
2357 break;
2358 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2359 if (rc == -ENOTTY)
2360 break;
2361 if (rc == -EBUSY && !tmo) {
2362 dev_warn(dev, "range %d ars timeout, aborting\n",
2363 spa->range_index);
2364 break;
2365 }
2366
2367 if (rc == -EBUSY) {
2368 /*
2369 * Note, entries may be appended to the list
2370 * while the lock is dropped, but the workqueue
2371 * being active prevents entries being deleted /
2372 * freed.
2373 */
2374 mutex_unlock(&acpi_desc->init_mutex);
2375 ssleep(1);
2376 tmo--;
2377 mutex_lock(&acpi_desc->init_mutex);
2378 continue;
2379 }
2380
2381 /* we got some results, but there are more pending... */
2382 if (rc == -ENOSPC && overflow_retry--) {
2383 if (!init_ars_len) {
2384 init_ars_len = acpi_desc->ars_status->length;
2385 init_ars_start = acpi_desc->ars_status->address;
2386 }
2387 rc = ars_continue(acpi_desc);
2388 }
2389
2390 if (rc < 0) {
2391 dev_warn(dev, "range %d ars continuation failed\n",
2392 spa->range_index);
2393 break;
2394 }
2395
2396 if (init_ars_len) {
2397 ars_start = init_ars_start;
2398 ars_len = init_ars_len;
2399 } else {
2400 ars_start = acpi_desc->ars_status->address;
2401 ars_len = acpi_desc->ars_status->length;
2402 }
2403 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
2404 spa->range_index, ars_start, ars_len);
2405 /* notify the region about new poison entries */
2406 nvdimm_region_notify(nfit_spa->nd_region,
2407 NVDIMM_REVALIDATE_POISON);
2408 break;
2409 } while (1);
2410}
2411
2412static void acpi_nfit_scrub(struct work_struct *work)
1f7df6f8 2413{
1cf03c00
DW
2414 struct device *dev;
2415 u64 init_scrub_length = 0;
1f7df6f8 2416 struct nfit_spa *nfit_spa;
1cf03c00
DW
2417 u64 init_scrub_address = 0;
2418 bool init_ars_done = false;
2419 struct acpi_nfit_desc *acpi_desc;
2420 unsigned int tmo = scrub_timeout;
2421 unsigned int overflow_retry = scrub_overflow_abort;
2422
2423 acpi_desc = container_of(work, typeof(*acpi_desc), work);
2424 dev = acpi_desc->dev;
1f7df6f8 2425
1cf03c00
DW
2426 /*
2427 * We scrub in 2 phases. The first phase waits for any platform
2428 * firmware initiated scrubs to complete and then we go search for the
2429 * affected spa regions to mark them scanned. In the second phase we
2430 * initiate a directed scrub for every range that was not scrubbed in
37b137ff
VV
2431 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2432 * the first phase, but really only care about running phase 2, where
2433 * regions can be notified of new poison.
1cf03c00
DW
2434 */
2435
2436 /* process platform firmware initiated scrubs */
2437 retry:
2438 mutex_lock(&acpi_desc->init_mutex);
1f7df6f8 2439 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1cf03c00
DW
2440 struct nd_cmd_ars_status *ars_status;
2441 struct acpi_nfit_system_address *spa;
2442 u64 ars_start, ars_len;
2443 int rc;
1f7df6f8 2444
1cf03c00
DW
2445 if (acpi_desc->cancel)
2446 break;
2447
2448 if (nfit_spa->nd_region)
2449 continue;
2450
2451 if (init_ars_done) {
2452 /*
2453 * No need to re-query, we're now just
2454 * reconciling all the ranges covered by the
2455 * initial scrub
2456 */
2457 rc = 0;
2458 } else
2459 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2460
2461 if (rc == -ENOTTY) {
2462 /* no ars capability, just register spa and move on */
2463 acpi_nfit_register_region(acpi_desc, nfit_spa);
2464 continue;
2465 }
2466
2467 if (rc == -EBUSY && !tmo) {
2468 /* fallthrough to directed scrub in phase 2 */
2469 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2470 break;
2471 } else if (rc == -EBUSY) {
2472 mutex_unlock(&acpi_desc->init_mutex);
2473 ssleep(1);
2474 tmo--;
2475 goto retry;
2476 }
2477
2478 /* we got some results, but there are more pending... */
2479 if (rc == -ENOSPC && overflow_retry--) {
2480 ars_status = acpi_desc->ars_status;
2481 /*
2482 * Record the original scrub range, so that we
2483 * can recall all the ranges impacted by the
2484 * initial scrub.
2485 */
2486 if (!init_scrub_length) {
2487 init_scrub_length = ars_status->length;
2488 init_scrub_address = ars_status->address;
2489 }
2490 rc = ars_continue(acpi_desc);
2491 if (rc == 0) {
2492 mutex_unlock(&acpi_desc->init_mutex);
2493 goto retry;
2494 }
2495 }
2496
2497 if (rc < 0) {
2498 /*
2499 * Initial scrub failed, we'll give it one more
2500 * try below...
2501 */
2502 break;
2503 }
2504
2505 /* We got some final results, record completed ranges */
2506 ars_status = acpi_desc->ars_status;
2507 if (init_scrub_length) {
2508 ars_start = init_scrub_address;
2509 ars_len = ars_start + init_scrub_length;
2510 } else {
2511 ars_start = ars_status->address;
2512 ars_len = ars_status->length;
2513 }
2514 spa = nfit_spa->spa;
2515
2516 if (!init_ars_done) {
2517 init_ars_done = true;
2518 dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2519 ars_start, ars_len);
2520 }
2521 if (ars_start <= spa->address && ars_start + ars_len
2522 >= spa->address + spa->length)
2523 acpi_nfit_register_region(acpi_desc, nfit_spa);
1f7df6f8 2524 }
1cf03c00
DW
2525
2526 /*
2527 * For all the ranges not covered by an initial scrub we still
2528 * want to see if there are errors, but it's ok to discover them
2529 * asynchronously.
2530 */
2531 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2532 /*
2533 * Flag all the ranges that still need scrubbing, but
2534 * register them now to make data available.
2535 */
37b137ff
VV
2536 if (!nfit_spa->nd_region) {
2537 nfit_spa->ars_required = 1;
1cf03c00 2538 acpi_nfit_register_region(acpi_desc, nfit_spa);
37b137ff 2539 }
1cf03c00
DW
2540 }
2541
2542 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2543 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
37b137ff
VV
2544 acpi_desc->scrub_count++;
2545 if (acpi_desc->scrub_count_state)
2546 sysfs_notify_dirent(acpi_desc->scrub_count_state);
1cf03c00
DW
2547 mutex_unlock(&acpi_desc->init_mutex);
2548}
2549
2550static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2551{
2552 struct nfit_spa *nfit_spa;
2553 int rc;
2554
2555 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2556 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
2557 /* BLK regions don't need to wait for ars results */
2558 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
2559 if (rc)
2560 return rc;
2561 }
2562
2563 queue_work(nfit_wq, &acpi_desc->work);
1f7df6f8
DW
2564 return 0;
2565}
2566
20985164
VV
2567static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2568 struct nfit_table_prev *prev)
2569{
2570 struct device *dev = acpi_desc->dev;
2571
2572 if (!list_empty(&prev->spas) ||
2573 !list_empty(&prev->memdevs) ||
2574 !list_empty(&prev->dcrs) ||
2575 !list_empty(&prev->bdws) ||
2576 !list_empty(&prev->idts) ||
2577 !list_empty(&prev->flushes)) {
2578 dev_err(dev, "new nfit deletes entries (unsupported)\n");
2579 return -ENXIO;
2580 }
2581 return 0;
2582}
2583
37b137ff
VV
2584static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
2585{
2586 struct device *dev = acpi_desc->dev;
2587 struct kernfs_node *nfit;
2588 struct device *bus_dev;
2589
2590 if (!ars_supported(acpi_desc->nvdimm_bus))
2591 return 0;
2592
2593 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
2594 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
2595 if (!nfit) {
2596 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
2597 return -ENODEV;
2598 }
2599 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
2600 sysfs_put(nfit);
2601 if (!acpi_desc->scrub_count_state) {
2602 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
2603 return -ENODEV;
2604 }
2605
2606 return 0;
2607}
2608
58cd71b4
DW
2609static void acpi_nfit_destruct(void *data)
2610{
2611 struct acpi_nfit_desc *acpi_desc = data;
37b137ff 2612 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
58cd71b4 2613
6839a6d9
VV
2614 /*
2615 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
2616 * race teardown
2617 */
2618 mutex_lock(&acpi_desc_lock);
58cd71b4 2619 acpi_desc->cancel = 1;
37b137ff
VV
2620 /*
2621 * Bounce the nvdimm bus lock to make sure any in-flight
2622 * acpi_nfit_ars_rescan() submissions have had a chance to
2623 * either submit or see ->cancel set.
2624 */
2625 device_lock(bus_dev);
2626 device_unlock(bus_dev);
2627
58cd71b4 2628 flush_workqueue(nfit_wq);
37b137ff
VV
2629 if (acpi_desc->scrub_count_state)
2630 sysfs_put(acpi_desc->scrub_count_state);
58cd71b4
DW
2631 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2632 acpi_desc->nvdimm_bus = NULL;
6839a6d9
VV
2633 list_del(&acpi_desc->list);
2634 mutex_unlock(&acpi_desc_lock);
58cd71b4
DW
2635}
2636
e7a11b44 2637int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
b94d5230
DW
2638{
2639 struct device *dev = acpi_desc->dev;
20985164 2640 struct nfit_table_prev prev;
b94d5230 2641 const void *end;
1f7df6f8 2642 int rc;
b94d5230 2643
58cd71b4 2644 if (!acpi_desc->nvdimm_bus) {
37b137ff
VV
2645 acpi_nfit_init_dsms(acpi_desc);
2646
58cd71b4
DW
2647 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
2648 &acpi_desc->nd_desc);
2649 if (!acpi_desc->nvdimm_bus)
2650 return -ENOMEM;
37b137ff 2651
58cd71b4
DW
2652 rc = devm_add_action_or_reset(dev, acpi_nfit_destruct,
2653 acpi_desc);
2654 if (rc)
2655 return rc;
37b137ff
VV
2656
2657 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
2658 if (rc)
2659 return rc;
6839a6d9
VV
2660
2661 /* register this acpi_desc for mce notifications */
2662 mutex_lock(&acpi_desc_lock);
2663 list_add_tail(&acpi_desc->list, &acpi_descs);
2664 mutex_unlock(&acpi_desc_lock);
58cd71b4
DW
2665 }
2666
20985164
VV
2667 mutex_lock(&acpi_desc->init_mutex);
2668
2669 INIT_LIST_HEAD(&prev.spas);
2670 INIT_LIST_HEAD(&prev.memdevs);
2671 INIT_LIST_HEAD(&prev.dcrs);
2672 INIT_LIST_HEAD(&prev.bdws);
2673 INIT_LIST_HEAD(&prev.idts);
2674 INIT_LIST_HEAD(&prev.flushes);
2675
2676 list_cut_position(&prev.spas, &acpi_desc->spas,
2677 acpi_desc->spas.prev);
2678 list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
2679 acpi_desc->memdevs.prev);
2680 list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
2681 acpi_desc->dcrs.prev);
2682 list_cut_position(&prev.bdws, &acpi_desc->bdws,
2683 acpi_desc->bdws.prev);
2684 list_cut_position(&prev.idts, &acpi_desc->idts,
2685 acpi_desc->idts.prev);
2686 list_cut_position(&prev.flushes, &acpi_desc->flushes,
2687 acpi_desc->flushes.prev);
b94d5230 2688
b94d5230 2689 end = data + sz;
b94d5230 2690 while (!IS_ERR_OR_NULL(data))
20985164 2691 data = add_table(acpi_desc, &prev, data, end);
b94d5230
DW
2692
2693 if (IS_ERR(data)) {
2694 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
2695 PTR_ERR(data));
20985164
VV
2696 rc = PTR_ERR(data);
2697 goto out_unlock;
b94d5230
DW
2698 }
2699
20985164
VV
2700 rc = acpi_nfit_check_deletions(acpi_desc, &prev);
2701 if (rc)
2702 goto out_unlock;
2703
81ed4e36
DW
2704 rc = nfit_mem_init(acpi_desc);
2705 if (rc)
20985164 2706 goto out_unlock;
62232e45 2707
1f7df6f8
DW
2708 rc = acpi_nfit_register_dimms(acpi_desc);
2709 if (rc)
20985164
VV
2710 goto out_unlock;
2711
2712 rc = acpi_nfit_register_regions(acpi_desc);
1f7df6f8 2713
20985164
VV
2714 out_unlock:
2715 mutex_unlock(&acpi_desc->init_mutex);
2716 return rc;
b94d5230 2717}
6bc75619 2718EXPORT_SYMBOL_GPL(acpi_nfit_init);
b94d5230 2719
7ae0fa43
DW
2720struct acpi_nfit_flush_work {
2721 struct work_struct work;
2722 struct completion cmp;
2723};
2724
2725static void flush_probe(struct work_struct *work)
2726{
2727 struct acpi_nfit_flush_work *flush;
2728
2729 flush = container_of(work, typeof(*flush), work);
2730 complete(&flush->cmp);
2731}
2732
2733static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2734{
2735 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2736 struct device *dev = acpi_desc->dev;
2737 struct acpi_nfit_flush_work flush;
e471486c 2738 int rc;
7ae0fa43
DW
2739
2740 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2741 device_lock(dev);
2742 device_unlock(dev);
2743
2744 /*
2745 * Scrub work could take 10s of seconds, userspace may give up so we
2746 * need to be interruptible while waiting.
2747 */
2748 INIT_WORK_ONSTACK(&flush.work, flush_probe);
2749 COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2750 queue_work(nfit_wq, &flush.work);
e471486c
DW
2751
2752 rc = wait_for_completion_interruptible(&flush.cmp);
2753 cancel_work_sync(&flush.work);
2754 return rc;
7ae0fa43
DW
2755}
2756
87bf572e
DW
2757static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2758 struct nvdimm *nvdimm, unsigned int cmd)
2759{
2760 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2761
2762 if (nvdimm)
2763 return 0;
2764 if (cmd != ND_CMD_ARS_START)
2765 return 0;
2766
2767 /*
2768 * The kernel and userspace may race to initiate a scrub, but
2769 * the scrub thread is prepared to lose that initial race. It
2770 * just needs guarantees that any ars it initiates are not
2771 * interrupted by any intervening start reqeusts from userspace.
2772 */
2773 if (work_busy(&acpi_desc->work))
2774 return -EBUSY;
2775
2776 return 0;
2777}
2778
6839a6d9 2779int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
37b137ff
VV
2780{
2781 struct device *dev = acpi_desc->dev;
2782 struct nfit_spa *nfit_spa;
2783
2784 if (work_busy(&acpi_desc->work))
2785 return -EBUSY;
2786
2787 if (acpi_desc->cancel)
2788 return 0;
2789
2790 mutex_lock(&acpi_desc->init_mutex);
2791 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2792 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2793
2794 if (nfit_spa_type(spa) != NFIT_SPA_PM)
2795 continue;
2796
2797 nfit_spa->ars_required = 1;
2798 }
2799 queue_work(nfit_wq, &acpi_desc->work);
2800 dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
2801 mutex_unlock(&acpi_desc->init_mutex);
2802
2803 return 0;
2804}
2805
a61fe6f7 2806void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
b94d5230
DW
2807{
2808 struct nvdimm_bus_descriptor *nd_desc;
b94d5230
DW
2809
2810 dev_set_drvdata(dev, acpi_desc);
2811 acpi_desc->dev = dev;
6bc75619 2812 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
b94d5230
DW
2813 nd_desc = &acpi_desc->nd_desc;
2814 nd_desc->provider_name = "ACPI.NFIT";
bc9775d8 2815 nd_desc->module = THIS_MODULE;
b94d5230 2816 nd_desc->ndctl = acpi_nfit_ctl;
7ae0fa43 2817 nd_desc->flush_probe = acpi_nfit_flush_probe;
87bf572e 2818 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
45def22c 2819 nd_desc->attr_groups = acpi_nfit_attribute_groups;
b94d5230 2820
20985164
VV
2821 INIT_LIST_HEAD(&acpi_desc->spas);
2822 INIT_LIST_HEAD(&acpi_desc->dcrs);
2823 INIT_LIST_HEAD(&acpi_desc->bdws);
2824 INIT_LIST_HEAD(&acpi_desc->idts);
2825 INIT_LIST_HEAD(&acpi_desc->flushes);
2826 INIT_LIST_HEAD(&acpi_desc->memdevs);
2827 INIT_LIST_HEAD(&acpi_desc->dimms);
6839a6d9 2828 INIT_LIST_HEAD(&acpi_desc->list);
20985164 2829 mutex_init(&acpi_desc->init_mutex);
1cf03c00 2830 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
20985164 2831}
a61fe6f7 2832EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
20985164
VV
2833
2834static int acpi_nfit_add(struct acpi_device *adev)
2835{
2836 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
2837 struct acpi_nfit_desc *acpi_desc;
2838 struct device *dev = &adev->dev;
2839 struct acpi_table_header *tbl;
2840 acpi_status status = AE_OK;
2841 acpi_size sz;
31932041 2842 int rc = 0;
20985164 2843
6b11d1d6 2844 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
20985164
VV
2845 if (ACPI_FAILURE(status)) {
2846 /* This is ok, we could have an nvdimm hotplugged later */
2847 dev_dbg(dev, "failed to find NFIT at startup\n");
2848 return 0;
2849 }
6b11d1d6 2850 sz = tbl->length;
20985164 2851
a61fe6f7
DW
2852 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2853 if (!acpi_desc)
2854 return -ENOMEM;
2855 acpi_nfit_desc_init(acpi_desc, &adev->dev);
20985164 2856
e7a11b44 2857 /* Save the acpi header for exporting the revision via sysfs */
6b577c9d 2858 acpi_desc->acpi_header = *tbl;
20985164
VV
2859
2860 /* Evaluate _FIT and override with that if present */
2861 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2862 if (ACPI_SUCCESS(status) && buf.length > 0) {
e7a11b44
DW
2863 union acpi_object *obj = buf.pointer;
2864
2865 if (obj->type == ACPI_TYPE_BUFFER)
2866 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
2867 obj->buffer.length);
2868 else
6b577c9d
LK
2869 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
2870 __func__, (int) obj->type);
31932041
DW
2871 kfree(buf.pointer);
2872 } else
e7a11b44
DW
2873 /* skip over the lead-in header table */
2874 rc = acpi_nfit_init(acpi_desc, (void *) tbl
2875 + sizeof(struct acpi_table_nfit),
2876 sz - sizeof(struct acpi_table_nfit));
e7a11b44 2877 return rc;
b94d5230
DW
2878}
2879
2880static int acpi_nfit_remove(struct acpi_device *adev)
2881{
58cd71b4 2882 /* see acpi_nfit_destruct */
b94d5230
DW
2883 return 0;
2884}
2885
c14a868a 2886void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
20985164 2887{
c14a868a 2888 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
20985164 2889 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
e7a11b44 2890 union acpi_object *obj;
20985164
VV
2891 acpi_status status;
2892 int ret;
2893
2894 dev_dbg(dev, "%s: event: %d\n", __func__, event);
2895
c09f1218
VV
2896 if (event != NFIT_NOTIFY_UPDATE)
2897 return;
2898
20985164
VV
2899 if (!dev->driver) {
2900 /* dev->driver may be null if we're being removed */
2901 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
c14a868a 2902 return;
20985164
VV
2903 }
2904
2905 if (!acpi_desc) {
a61fe6f7
DW
2906 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2907 if (!acpi_desc)
c14a868a
DW
2908 return;
2909 acpi_nfit_desc_init(acpi_desc, dev);
7ae0fa43
DW
2910 } else {
2911 /*
2912 * Finish previous registration before considering new
2913 * regions.
2914 */
2915 flush_workqueue(nfit_wq);
20985164
VV
2916 }
2917
2918 /* Evaluate _FIT */
c14a868a 2919 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
20985164
VV
2920 if (ACPI_FAILURE(status)) {
2921 dev_err(dev, "failed to evaluate _FIT\n");
c14a868a 2922 return;
20985164
VV
2923 }
2924
6b577c9d
LK
2925 obj = buf.pointer;
2926 if (obj->type == ACPI_TYPE_BUFFER) {
e7a11b44
DW
2927 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
2928 obj->buffer.length);
31932041 2929 if (ret)
6b577c9d 2930 dev_err(dev, "failed to merge updated NFIT\n");
31932041 2931 } else
6b577c9d 2932 dev_err(dev, "Invalid _FIT\n");
20985164 2933 kfree(buf.pointer);
c14a868a
DW
2934}
2935EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
20985164 2936
c14a868a
DW
2937static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2938{
2939 device_lock(&adev->dev);
2940 __acpi_nfit_notify(&adev->dev, adev->handle, event);
2941 device_unlock(&adev->dev);
20985164
VV
2942}
2943
b94d5230
DW
2944static const struct acpi_device_id acpi_nfit_ids[] = {
2945 { "ACPI0012", 0 },
2946 { "", 0 },
2947};
2948MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
2949
2950static struct acpi_driver acpi_nfit_driver = {
2951 .name = KBUILD_MODNAME,
2952 .ids = acpi_nfit_ids,
2953 .ops = {
2954 .add = acpi_nfit_add,
2955 .remove = acpi_nfit_remove,
20985164 2956 .notify = acpi_nfit_notify,
b94d5230
DW
2957 },
2958};
2959
2960static __init int nfit_init(void)
2961{
2962 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
2963 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
2964 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
2965 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
2966 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
2967 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
2968 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
2969
2970 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
2971 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
2972 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
2973 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
2974 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
2975 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
2976 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
2977 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
2978 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
2979 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
31eca76b
DW
2980 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
2981 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
e02fb726 2982 acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
b94d5230 2983
7ae0fa43
DW
2984 nfit_wq = create_singlethread_workqueue("nfit");
2985 if (!nfit_wq)
2986 return -ENOMEM;
2987
6839a6d9
VV
2988 nfit_mce_register();
2989
b94d5230
DW
2990 return acpi_bus_register_driver(&acpi_nfit_driver);
2991}
2992
2993static __exit void nfit_exit(void)
2994{
6839a6d9 2995 nfit_mce_unregister();
b94d5230 2996 acpi_bus_unregister_driver(&acpi_nfit_driver);
7ae0fa43 2997 destroy_workqueue(nfit_wq);
6839a6d9 2998 WARN_ON(!list_empty(&acpi_descs));
b94d5230
DW
2999}
3000
3001module_init(nfit_init);
3002module_exit(nfit_exit);
3003MODULE_LICENSE("GPL v2");
3004MODULE_AUTHOR("Intel Corporation");