| 1 | /* |
| 2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of version 2 of the GNU General Public License as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | */ |
| 13 | #include <linux/list_sort.h> |
| 14 | #include <linux/libnvdimm.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/mutex.h> |
| 17 | #include <linux/ndctl.h> |
| 18 | #include <linux/sysfs.h> |
| 19 | #include <linux/delay.h> |
| 20 | #include <linux/list.h> |
| 21 | #include <linux/acpi.h> |
| 22 | #include <linux/sort.h> |
| 23 | #include <linux/io.h> |
| 24 | #include <linux/nd.h> |
| 25 | #include <asm/cacheflush.h> |
| 26 | #include "nfit.h" |
| 27 | |
| 28 | /* |
| 29 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is |
| 30 | * irrelevant. |
| 31 | */ |
| 32 | #include <linux/io-64-nonatomic-hi-lo.h> |
| 33 | |
| 34 | static bool force_enable_dimms; |
| 35 | module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); |
| 36 | MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); |
| 37 | |
| 38 | static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT; |
| 39 | module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR); |
| 40 | MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds"); |
| 41 | |
| 42 | /* after three payloads of overflow, it's dead jim */ |
| 43 | static unsigned int scrub_overflow_abort = 3; |
| 44 | module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR); |
| 45 | MODULE_PARM_DESC(scrub_overflow_abort, |
| 46 | "Number of times we overflow ARS results before abort"); |
| 47 | |
| 48 | static bool disable_vendor_specific; |
| 49 | module_param(disable_vendor_specific, bool, S_IRUGO); |
| 50 | MODULE_PARM_DESC(disable_vendor_specific, |
| 51 | "Limit commands to the publicly specified set"); |
| 52 | |
| 53 | static unsigned long override_dsm_mask; |
| 54 | module_param(override_dsm_mask, ulong, S_IRUGO); |
| 55 | MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions"); |
| 56 | |
| 57 | static int default_dsm_family = -1; |
| 58 | module_param(default_dsm_family, int, S_IRUGO); |
| 59 | MODULE_PARM_DESC(default_dsm_family, |
| 60 | "Try this DSM type first when identifying NVDIMM family"); |
| 61 | |
| 62 | LIST_HEAD(acpi_descs); |
| 63 | DEFINE_MUTEX(acpi_desc_lock); |
| 64 | |
| 65 | static struct workqueue_struct *nfit_wq; |
| 66 | |
| 67 | struct nfit_table_prev { |
| 68 | struct list_head spas; |
| 69 | struct list_head memdevs; |
| 70 | struct list_head dcrs; |
| 71 | struct list_head bdws; |
| 72 | struct list_head idts; |
| 73 | struct list_head flushes; |
| 74 | }; |
| 75 | |
| 76 | static guid_t nfit_uuid[NFIT_UUID_MAX]; |
| 77 | |
| 78 | const guid_t *to_nfit_uuid(enum nfit_uuids id) |
| 79 | { |
| 80 | return &nfit_uuid[id]; |
| 81 | } |
| 82 | EXPORT_SYMBOL(to_nfit_uuid); |
| 83 | |
| 84 | static struct acpi_nfit_desc *to_acpi_nfit_desc( |
| 85 | struct nvdimm_bus_descriptor *nd_desc) |
| 86 | { |
| 87 | return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); |
| 88 | } |
| 89 | |
| 90 | static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) |
| 91 | { |
| 92 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
| 93 | |
| 94 | /* |
| 95 | * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct |
| 96 | * acpi_device. |
| 97 | */ |
| 98 | if (!nd_desc->provider_name |
| 99 | || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) |
| 100 | return NULL; |
| 101 | |
| 102 | return to_acpi_device(acpi_desc->dev); |
| 103 | } |
| 104 | |
| 105 | static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) |
| 106 | { |
| 107 | struct nd_cmd_clear_error *clear_err; |
| 108 | struct nd_cmd_ars_status *ars_status; |
| 109 | u16 flags; |
| 110 | |
| 111 | switch (cmd) { |
| 112 | case ND_CMD_ARS_CAP: |
| 113 | if ((status & 0xffff) == NFIT_ARS_CAP_NONE) |
| 114 | return -ENOTTY; |
| 115 | |
| 116 | /* Command failed */ |
| 117 | if (status & 0xffff) |
| 118 | return -EIO; |
| 119 | |
| 120 | /* No supported scan types for this range */ |
| 121 | flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; |
| 122 | if ((status >> 16 & flags) == 0) |
| 123 | return -ENOTTY; |
| 124 | return 0; |
| 125 | case ND_CMD_ARS_START: |
| 126 | /* ARS is in progress */ |
| 127 | if ((status & 0xffff) == NFIT_ARS_START_BUSY) |
| 128 | return -EBUSY; |
| 129 | |
| 130 | /* Command failed */ |
| 131 | if (status & 0xffff) |
| 132 | return -EIO; |
| 133 | return 0; |
| 134 | case ND_CMD_ARS_STATUS: |
| 135 | ars_status = buf; |
| 136 | /* Command failed */ |
| 137 | if (status & 0xffff) |
| 138 | return -EIO; |
| 139 | /* Check extended status (Upper two bytes) */ |
| 140 | if (status == NFIT_ARS_STATUS_DONE) |
| 141 | return 0; |
| 142 | |
| 143 | /* ARS is in progress */ |
| 144 | if (status == NFIT_ARS_STATUS_BUSY) |
| 145 | return -EBUSY; |
| 146 | |
| 147 | /* No ARS performed for the current boot */ |
| 148 | if (status == NFIT_ARS_STATUS_NONE) |
| 149 | return -EAGAIN; |
| 150 | |
| 151 | /* |
| 152 | * ARS interrupted, either we overflowed or some other |
| 153 | * agent wants the scan to stop. If we didn't overflow |
| 154 | * then just continue with the returned results. |
| 155 | */ |
| 156 | if (status == NFIT_ARS_STATUS_INTR) { |
| 157 | if (ars_status->out_length >= 40 && (ars_status->flags |
| 158 | & NFIT_ARS_F_OVERFLOW)) |
| 159 | return -ENOSPC; |
| 160 | return 0; |
| 161 | } |
| 162 | |
| 163 | /* Unknown status */ |
| 164 | if (status >> 16) |
| 165 | return -EIO; |
| 166 | return 0; |
| 167 | case ND_CMD_CLEAR_ERROR: |
| 168 | clear_err = buf; |
| 169 | if (status & 0xffff) |
| 170 | return -EIO; |
| 171 | if (!clear_err->cleared) |
| 172 | return -EIO; |
| 173 | if (clear_err->length > clear_err->cleared) |
| 174 | return clear_err->cleared; |
| 175 | return 0; |
| 176 | default: |
| 177 | break; |
| 178 | } |
| 179 | |
| 180 | /* all other non-zero status results in an error */ |
| 181 | if (status) |
| 182 | return -EIO; |
| 183 | return 0; |
| 184 | } |
| 185 | |
| 186 | #define ACPI_LABELS_LOCKED 3 |
| 187 | |
| 188 | static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, |
| 189 | u32 status) |
| 190 | { |
| 191 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 192 | |
| 193 | switch (cmd) { |
| 194 | case ND_CMD_GET_CONFIG_SIZE: |
| 195 | /* |
| 196 | * In the _LSI, _LSR, _LSW case the locked status is |
| 197 | * communicated via the read/write commands |
| 198 | */ |
| 199 | if (nfit_mem->has_lsi) |
| 200 | break; |
| 201 | |
| 202 | if (status >> 16 & ND_CONFIG_LOCKED) |
| 203 | return -EACCES; |
| 204 | break; |
| 205 | case ND_CMD_GET_CONFIG_DATA: |
| 206 | if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED) |
| 207 | return -EACCES; |
| 208 | break; |
| 209 | case ND_CMD_SET_CONFIG_DATA: |
| 210 | if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED) |
| 211 | return -EACCES; |
| 212 | break; |
| 213 | default: |
| 214 | break; |
| 215 | } |
| 216 | |
| 217 | /* all other non-zero status results in an error */ |
| 218 | if (status) |
| 219 | return -EIO; |
| 220 | return 0; |
| 221 | } |
| 222 | |
| 223 | static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, |
| 224 | u32 status) |
| 225 | { |
| 226 | if (!nvdimm) |
| 227 | return xlat_bus_status(buf, cmd, status); |
| 228 | return xlat_nvdimm_status(nvdimm, buf, cmd, status); |
| 229 | } |
| 230 | |
| 231 | /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */ |
| 232 | static union acpi_object *pkg_to_buf(union acpi_object *pkg) |
| 233 | { |
| 234 | int i; |
| 235 | void *dst; |
| 236 | size_t size = 0; |
| 237 | union acpi_object *buf = NULL; |
| 238 | |
| 239 | if (pkg->type != ACPI_TYPE_PACKAGE) { |
| 240 | WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", |
| 241 | pkg->type); |
| 242 | goto err; |
| 243 | } |
| 244 | |
| 245 | for (i = 0; i < pkg->package.count; i++) { |
| 246 | union acpi_object *obj = &pkg->package.elements[i]; |
| 247 | |
| 248 | if (obj->type == ACPI_TYPE_INTEGER) |
| 249 | size += 4; |
| 250 | else if (obj->type == ACPI_TYPE_BUFFER) |
| 251 | size += obj->buffer.length; |
| 252 | else { |
| 253 | WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", |
| 254 | obj->type); |
| 255 | goto err; |
| 256 | } |
| 257 | } |
| 258 | |
| 259 | buf = ACPI_ALLOCATE(sizeof(*buf) + size); |
| 260 | if (!buf) |
| 261 | goto err; |
| 262 | |
| 263 | dst = buf + 1; |
| 264 | buf->type = ACPI_TYPE_BUFFER; |
| 265 | buf->buffer.length = size; |
| 266 | buf->buffer.pointer = dst; |
| 267 | for (i = 0; i < pkg->package.count; i++) { |
| 268 | union acpi_object *obj = &pkg->package.elements[i]; |
| 269 | |
| 270 | if (obj->type == ACPI_TYPE_INTEGER) { |
| 271 | memcpy(dst, &obj->integer.value, 4); |
| 272 | dst += 4; |
| 273 | } else if (obj->type == ACPI_TYPE_BUFFER) { |
| 274 | memcpy(dst, obj->buffer.pointer, obj->buffer.length); |
| 275 | dst += obj->buffer.length; |
| 276 | } |
| 277 | } |
| 278 | err: |
| 279 | ACPI_FREE(pkg); |
| 280 | return buf; |
| 281 | } |
| 282 | |
| 283 | static union acpi_object *int_to_buf(union acpi_object *integer) |
| 284 | { |
| 285 | union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4); |
| 286 | void *dst = NULL; |
| 287 | |
| 288 | if (!buf) |
| 289 | goto err; |
| 290 | |
| 291 | if (integer->type != ACPI_TYPE_INTEGER) { |
| 292 | WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", |
| 293 | integer->type); |
| 294 | goto err; |
| 295 | } |
| 296 | |
| 297 | dst = buf + 1; |
| 298 | buf->type = ACPI_TYPE_BUFFER; |
| 299 | buf->buffer.length = 4; |
| 300 | buf->buffer.pointer = dst; |
| 301 | memcpy(dst, &integer->integer.value, 4); |
| 302 | err: |
| 303 | ACPI_FREE(integer); |
| 304 | return buf; |
| 305 | } |
| 306 | |
| 307 | static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset, |
| 308 | u32 len, void *data) |
| 309 | { |
| 310 | acpi_status rc; |
| 311 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 312 | struct acpi_object_list input = { |
| 313 | .count = 3, |
| 314 | .pointer = (union acpi_object []) { |
| 315 | [0] = { |
| 316 | .integer.type = ACPI_TYPE_INTEGER, |
| 317 | .integer.value = offset, |
| 318 | }, |
| 319 | [1] = { |
| 320 | .integer.type = ACPI_TYPE_INTEGER, |
| 321 | .integer.value = len, |
| 322 | }, |
| 323 | [2] = { |
| 324 | .buffer.type = ACPI_TYPE_BUFFER, |
| 325 | .buffer.pointer = data, |
| 326 | .buffer.length = len, |
| 327 | }, |
| 328 | }, |
| 329 | }; |
| 330 | |
| 331 | rc = acpi_evaluate_object(handle, "_LSW", &input, &buf); |
| 332 | if (ACPI_FAILURE(rc)) |
| 333 | return NULL; |
| 334 | return int_to_buf(buf.pointer); |
| 335 | } |
| 336 | |
| 337 | static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset, |
| 338 | u32 len) |
| 339 | { |
| 340 | acpi_status rc; |
| 341 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 342 | struct acpi_object_list input = { |
| 343 | .count = 2, |
| 344 | .pointer = (union acpi_object []) { |
| 345 | [0] = { |
| 346 | .integer.type = ACPI_TYPE_INTEGER, |
| 347 | .integer.value = offset, |
| 348 | }, |
| 349 | [1] = { |
| 350 | .integer.type = ACPI_TYPE_INTEGER, |
| 351 | .integer.value = len, |
| 352 | }, |
| 353 | }, |
| 354 | }; |
| 355 | |
| 356 | rc = acpi_evaluate_object(handle, "_LSR", &input, &buf); |
| 357 | if (ACPI_FAILURE(rc)) |
| 358 | return NULL; |
| 359 | return pkg_to_buf(buf.pointer); |
| 360 | } |
| 361 | |
| 362 | static union acpi_object *acpi_label_info(acpi_handle handle) |
| 363 | { |
| 364 | acpi_status rc; |
| 365 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 366 | |
| 367 | rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf); |
| 368 | if (ACPI_FAILURE(rc)) |
| 369 | return NULL; |
| 370 | return pkg_to_buf(buf.pointer); |
| 371 | } |
| 372 | |
| 373 | static u8 nfit_dsm_revid(unsigned family, unsigned func) |
| 374 | { |
| 375 | static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = { |
| 376 | [NVDIMM_FAMILY_INTEL] = { |
| 377 | [NVDIMM_INTEL_GET_MODES] = 2, |
| 378 | [NVDIMM_INTEL_GET_FWINFO] = 2, |
| 379 | [NVDIMM_INTEL_START_FWUPDATE] = 2, |
| 380 | [NVDIMM_INTEL_SEND_FWUPDATE] = 2, |
| 381 | [NVDIMM_INTEL_FINISH_FWUPDATE] = 2, |
| 382 | [NVDIMM_INTEL_QUERY_FWUPDATE] = 2, |
| 383 | [NVDIMM_INTEL_SET_THRESHOLD] = 2, |
| 384 | [NVDIMM_INTEL_INJECT_ERROR] = 2, |
| 385 | }, |
| 386 | }; |
| 387 | u8 id; |
| 388 | |
| 389 | if (family > NVDIMM_FAMILY_MAX) |
| 390 | return 0; |
| 391 | if (func > 31) |
| 392 | return 0; |
| 393 | id = revid_table[family][func]; |
| 394 | if (id == 0) |
| 395 | return 1; /* default */ |
| 396 | return id; |
| 397 | } |
| 398 | |
| 399 | int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, |
| 400 | unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) |
| 401 | { |
| 402 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); |
| 403 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 404 | union acpi_object in_obj, in_buf, *out_obj; |
| 405 | const struct nd_cmd_desc *desc = NULL; |
| 406 | struct device *dev = acpi_desc->dev; |
| 407 | struct nd_cmd_pkg *call_pkg = NULL; |
| 408 | const char *cmd_name, *dimm_name; |
| 409 | unsigned long cmd_mask, dsm_mask; |
| 410 | u32 offset, fw_status = 0; |
| 411 | acpi_handle handle; |
| 412 | unsigned int func; |
| 413 | const guid_t *guid; |
| 414 | int rc, i; |
| 415 | |
| 416 | func = cmd; |
| 417 | if (cmd == ND_CMD_CALL) { |
| 418 | call_pkg = buf; |
| 419 | func = call_pkg->nd_command; |
| 420 | |
| 421 | for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) |
| 422 | if (call_pkg->nd_reserved2[i]) |
| 423 | return -EINVAL; |
| 424 | } |
| 425 | |
| 426 | if (nvdimm) { |
| 427 | struct acpi_device *adev = nfit_mem->adev; |
| 428 | |
| 429 | if (!adev) |
| 430 | return -ENOTTY; |
| 431 | if (call_pkg && nfit_mem->family != call_pkg->nd_family) |
| 432 | return -ENOTTY; |
| 433 | |
| 434 | dimm_name = nvdimm_name(nvdimm); |
| 435 | cmd_name = nvdimm_cmd_name(cmd); |
| 436 | cmd_mask = nvdimm_cmd_mask(nvdimm); |
| 437 | dsm_mask = nfit_mem->dsm_mask; |
| 438 | desc = nd_cmd_dimm_desc(cmd); |
| 439 | guid = to_nfit_uuid(nfit_mem->family); |
| 440 | handle = adev->handle; |
| 441 | } else { |
| 442 | struct acpi_device *adev = to_acpi_dev(acpi_desc); |
| 443 | |
| 444 | cmd_name = nvdimm_bus_cmd_name(cmd); |
| 445 | cmd_mask = nd_desc->cmd_mask; |
| 446 | dsm_mask = cmd_mask; |
| 447 | if (cmd == ND_CMD_CALL) |
| 448 | dsm_mask = nd_desc->bus_dsm_mask; |
| 449 | desc = nd_cmd_bus_desc(cmd); |
| 450 | guid = to_nfit_uuid(NFIT_DEV_BUS); |
| 451 | handle = adev->handle; |
| 452 | dimm_name = "bus"; |
| 453 | } |
| 454 | |
| 455 | if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) |
| 456 | return -ENOTTY; |
| 457 | |
| 458 | if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) |
| 459 | return -ENOTTY; |
| 460 | |
| 461 | in_obj.type = ACPI_TYPE_PACKAGE; |
| 462 | in_obj.package.count = 1; |
| 463 | in_obj.package.elements = &in_buf; |
| 464 | in_buf.type = ACPI_TYPE_BUFFER; |
| 465 | in_buf.buffer.pointer = buf; |
| 466 | in_buf.buffer.length = 0; |
| 467 | |
| 468 | /* libnvdimm has already validated the input envelope */ |
| 469 | for (i = 0; i < desc->in_num; i++) |
| 470 | in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, |
| 471 | i, buf); |
| 472 | |
| 473 | if (call_pkg) { |
| 474 | /* skip over package wrapper */ |
| 475 | in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; |
| 476 | in_buf.buffer.length = call_pkg->nd_size_in; |
| 477 | } |
| 478 | |
| 479 | dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n", |
| 480 | __func__, dimm_name, cmd, func, in_buf.buffer.length); |
| 481 | print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, |
| 482 | in_buf.buffer.pointer, |
| 483 | min_t(u32, 256, in_buf.buffer.length), true); |
| 484 | |
| 485 | /* call the BIOS, prefer the named methods over _DSM if available */ |
| 486 | if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsi) |
| 487 | out_obj = acpi_label_info(handle); |
| 488 | else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) { |
| 489 | struct nd_cmd_get_config_data_hdr *p = buf; |
| 490 | |
| 491 | out_obj = acpi_label_read(handle, p->in_offset, p->in_length); |
| 492 | } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA |
| 493 | && nfit_mem->has_lsw) { |
| 494 | struct nd_cmd_set_config_hdr *p = buf; |
| 495 | |
| 496 | out_obj = acpi_label_write(handle, p->in_offset, p->in_length, |
| 497 | p->in_buf); |
| 498 | } else { |
| 499 | u8 revid; |
| 500 | |
| 501 | if (nvdimm) |
| 502 | revid = nfit_dsm_revid(nfit_mem->family, func); |
| 503 | else |
| 504 | revid = 1; |
| 505 | out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); |
| 506 | } |
| 507 | |
| 508 | if (!out_obj) { |
| 509 | dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, |
| 510 | cmd_name); |
| 511 | return -EINVAL; |
| 512 | } |
| 513 | |
| 514 | if (call_pkg) { |
| 515 | call_pkg->nd_fw_size = out_obj->buffer.length; |
| 516 | memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, |
| 517 | out_obj->buffer.pointer, |
| 518 | min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); |
| 519 | |
| 520 | ACPI_FREE(out_obj); |
| 521 | /* |
| 522 | * Need to support FW function w/o known size in advance. |
| 523 | * Caller can determine required size based upon nd_fw_size. |
| 524 | * If we return an error (like elsewhere) then caller wouldn't |
| 525 | * be able to rely upon data returned to make calculation. |
| 526 | */ |
| 527 | return 0; |
| 528 | } |
| 529 | |
| 530 | if (out_obj->package.type != ACPI_TYPE_BUFFER) { |
| 531 | dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", |
| 532 | __func__, dimm_name, cmd_name, out_obj->type); |
| 533 | rc = -EINVAL; |
| 534 | goto out; |
| 535 | } |
| 536 | |
| 537 | dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, dimm_name, |
| 538 | cmd_name, out_obj->buffer.length); |
| 539 | print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, |
| 540 | out_obj->buffer.pointer, |
| 541 | min_t(u32, 128, out_obj->buffer.length), true); |
| 542 | |
| 543 | for (i = 0, offset = 0; i < desc->out_num; i++) { |
| 544 | u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, |
| 545 | (u32 *) out_obj->buffer.pointer, |
| 546 | out_obj->buffer.length - offset); |
| 547 | |
| 548 | if (offset + out_size > out_obj->buffer.length) { |
| 549 | dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", |
| 550 | __func__, dimm_name, cmd_name, i); |
| 551 | break; |
| 552 | } |
| 553 | |
| 554 | if (in_buf.buffer.length + offset + out_size > buf_len) { |
| 555 | dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", |
| 556 | __func__, dimm_name, cmd_name, i); |
| 557 | rc = -ENXIO; |
| 558 | goto out; |
| 559 | } |
| 560 | memcpy(buf + in_buf.buffer.length + offset, |
| 561 | out_obj->buffer.pointer + offset, out_size); |
| 562 | offset += out_size; |
| 563 | } |
| 564 | |
| 565 | /* |
| 566 | * Set fw_status for all the commands with a known format to be |
| 567 | * later interpreted by xlat_status(). |
| 568 | */ |
| 569 | if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP |
| 570 | && cmd <= ND_CMD_CLEAR_ERROR) |
| 571 | || (nvdimm && cmd >= ND_CMD_SMART |
| 572 | && cmd <= ND_CMD_VENDOR))) |
| 573 | fw_status = *(u32 *) out_obj->buffer.pointer; |
| 574 | |
| 575 | if (offset + in_buf.buffer.length < buf_len) { |
| 576 | if (i >= 1) { |
| 577 | /* |
| 578 | * status valid, return the number of bytes left |
| 579 | * unfilled in the output buffer |
| 580 | */ |
| 581 | rc = buf_len - offset - in_buf.buffer.length; |
| 582 | if (cmd_rc) |
| 583 | *cmd_rc = xlat_status(nvdimm, buf, cmd, |
| 584 | fw_status); |
| 585 | } else { |
| 586 | dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", |
| 587 | __func__, dimm_name, cmd_name, buf_len, |
| 588 | offset); |
| 589 | rc = -ENXIO; |
| 590 | } |
| 591 | } else { |
| 592 | rc = 0; |
| 593 | if (cmd_rc) |
| 594 | *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status); |
| 595 | } |
| 596 | |
| 597 | out: |
| 598 | ACPI_FREE(out_obj); |
| 599 | |
| 600 | return rc; |
| 601 | } |
| 602 | EXPORT_SYMBOL_GPL(acpi_nfit_ctl); |
| 603 | |
| 604 | static const char *spa_type_name(u16 type) |
| 605 | { |
| 606 | static const char *to_name[] = { |
| 607 | [NFIT_SPA_VOLATILE] = "volatile", |
| 608 | [NFIT_SPA_PM] = "pmem", |
| 609 | [NFIT_SPA_DCR] = "dimm-control-region", |
| 610 | [NFIT_SPA_BDW] = "block-data-window", |
| 611 | [NFIT_SPA_VDISK] = "volatile-disk", |
| 612 | [NFIT_SPA_VCD] = "volatile-cd", |
| 613 | [NFIT_SPA_PDISK] = "persistent-disk", |
| 614 | [NFIT_SPA_PCD] = "persistent-cd", |
| 615 | |
| 616 | }; |
| 617 | |
| 618 | if (type > NFIT_SPA_PCD) |
| 619 | return "unknown"; |
| 620 | |
| 621 | return to_name[type]; |
| 622 | } |
| 623 | |
| 624 | int nfit_spa_type(struct acpi_nfit_system_address *spa) |
| 625 | { |
| 626 | int i; |
| 627 | |
| 628 | for (i = 0; i < NFIT_UUID_MAX; i++) |
| 629 | if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid)) |
| 630 | return i; |
| 631 | return -1; |
| 632 | } |
| 633 | |
| 634 | static bool add_spa(struct acpi_nfit_desc *acpi_desc, |
| 635 | struct nfit_table_prev *prev, |
| 636 | struct acpi_nfit_system_address *spa) |
| 637 | { |
| 638 | struct device *dev = acpi_desc->dev; |
| 639 | struct nfit_spa *nfit_spa; |
| 640 | |
| 641 | if (spa->header.length != sizeof(*spa)) |
| 642 | return false; |
| 643 | |
| 644 | list_for_each_entry(nfit_spa, &prev->spas, list) { |
| 645 | if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { |
| 646 | list_move_tail(&nfit_spa->list, &acpi_desc->spas); |
| 647 | return true; |
| 648 | } |
| 649 | } |
| 650 | |
| 651 | nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), |
| 652 | GFP_KERNEL); |
| 653 | if (!nfit_spa) |
| 654 | return false; |
| 655 | INIT_LIST_HEAD(&nfit_spa->list); |
| 656 | memcpy(nfit_spa->spa, spa, sizeof(*spa)); |
| 657 | list_add_tail(&nfit_spa->list, &acpi_desc->spas); |
| 658 | dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, |
| 659 | spa->range_index, |
| 660 | spa_type_name(nfit_spa_type(spa))); |
| 661 | return true; |
| 662 | } |
| 663 | |
| 664 | static bool add_memdev(struct acpi_nfit_desc *acpi_desc, |
| 665 | struct nfit_table_prev *prev, |
| 666 | struct acpi_nfit_memory_map *memdev) |
| 667 | { |
| 668 | struct device *dev = acpi_desc->dev; |
| 669 | struct nfit_memdev *nfit_memdev; |
| 670 | |
| 671 | if (memdev->header.length != sizeof(*memdev)) |
| 672 | return false; |
| 673 | |
| 674 | list_for_each_entry(nfit_memdev, &prev->memdevs, list) |
| 675 | if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { |
| 676 | list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); |
| 677 | return true; |
| 678 | } |
| 679 | |
| 680 | nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), |
| 681 | GFP_KERNEL); |
| 682 | if (!nfit_memdev) |
| 683 | return false; |
| 684 | INIT_LIST_HEAD(&nfit_memdev->list); |
| 685 | memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); |
| 686 | list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); |
| 687 | dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d flags: %#x\n", |
| 688 | __func__, memdev->device_handle, memdev->range_index, |
| 689 | memdev->region_index, memdev->flags); |
| 690 | return true; |
| 691 | } |
| 692 | |
| 693 | /* |
| 694 | * An implementation may provide a truncated control region if no block windows |
| 695 | * are defined. |
| 696 | */ |
| 697 | static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) |
| 698 | { |
| 699 | if (dcr->header.length < offsetof(struct acpi_nfit_control_region, |
| 700 | window_size)) |
| 701 | return 0; |
| 702 | if (dcr->windows) |
| 703 | return sizeof(*dcr); |
| 704 | return offsetof(struct acpi_nfit_control_region, window_size); |
| 705 | } |
| 706 | |
| 707 | static bool add_dcr(struct acpi_nfit_desc *acpi_desc, |
| 708 | struct nfit_table_prev *prev, |
| 709 | struct acpi_nfit_control_region *dcr) |
| 710 | { |
| 711 | struct device *dev = acpi_desc->dev; |
| 712 | struct nfit_dcr *nfit_dcr; |
| 713 | |
| 714 | if (!sizeof_dcr(dcr)) |
| 715 | return false; |
| 716 | |
| 717 | list_for_each_entry(nfit_dcr, &prev->dcrs, list) |
| 718 | if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { |
| 719 | list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); |
| 720 | return true; |
| 721 | } |
| 722 | |
| 723 | nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), |
| 724 | GFP_KERNEL); |
| 725 | if (!nfit_dcr) |
| 726 | return false; |
| 727 | INIT_LIST_HEAD(&nfit_dcr->list); |
| 728 | memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); |
| 729 | list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); |
| 730 | dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, |
| 731 | dcr->region_index, dcr->windows); |
| 732 | return true; |
| 733 | } |
| 734 | |
| 735 | static bool add_bdw(struct acpi_nfit_desc *acpi_desc, |
| 736 | struct nfit_table_prev *prev, |
| 737 | struct acpi_nfit_data_region *bdw) |
| 738 | { |
| 739 | struct device *dev = acpi_desc->dev; |
| 740 | struct nfit_bdw *nfit_bdw; |
| 741 | |
| 742 | if (bdw->header.length != sizeof(*bdw)) |
| 743 | return false; |
| 744 | list_for_each_entry(nfit_bdw, &prev->bdws, list) |
| 745 | if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { |
| 746 | list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); |
| 747 | return true; |
| 748 | } |
| 749 | |
| 750 | nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), |
| 751 | GFP_KERNEL); |
| 752 | if (!nfit_bdw) |
| 753 | return false; |
| 754 | INIT_LIST_HEAD(&nfit_bdw->list); |
| 755 | memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); |
| 756 | list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); |
| 757 | dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, |
| 758 | bdw->region_index, bdw->windows); |
| 759 | return true; |
| 760 | } |
| 761 | |
| 762 | static size_t sizeof_idt(struct acpi_nfit_interleave *idt) |
| 763 | { |
| 764 | if (idt->header.length < sizeof(*idt)) |
| 765 | return 0; |
| 766 | return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); |
| 767 | } |
| 768 | |
| 769 | static bool add_idt(struct acpi_nfit_desc *acpi_desc, |
| 770 | struct nfit_table_prev *prev, |
| 771 | struct acpi_nfit_interleave *idt) |
| 772 | { |
| 773 | struct device *dev = acpi_desc->dev; |
| 774 | struct nfit_idt *nfit_idt; |
| 775 | |
| 776 | if (!sizeof_idt(idt)) |
| 777 | return false; |
| 778 | |
| 779 | list_for_each_entry(nfit_idt, &prev->idts, list) { |
| 780 | if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) |
| 781 | continue; |
| 782 | |
| 783 | if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { |
| 784 | list_move_tail(&nfit_idt->list, &acpi_desc->idts); |
| 785 | return true; |
| 786 | } |
| 787 | } |
| 788 | |
| 789 | nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), |
| 790 | GFP_KERNEL); |
| 791 | if (!nfit_idt) |
| 792 | return false; |
| 793 | INIT_LIST_HEAD(&nfit_idt->list); |
| 794 | memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); |
| 795 | list_add_tail(&nfit_idt->list, &acpi_desc->idts); |
| 796 | dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, |
| 797 | idt->interleave_index, idt->line_count); |
| 798 | return true; |
| 799 | } |
| 800 | |
| 801 | static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) |
| 802 | { |
| 803 | if (flush->header.length < sizeof(*flush)) |
| 804 | return 0; |
| 805 | return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); |
| 806 | } |
| 807 | |
| 808 | static bool add_flush(struct acpi_nfit_desc *acpi_desc, |
| 809 | struct nfit_table_prev *prev, |
| 810 | struct acpi_nfit_flush_address *flush) |
| 811 | { |
| 812 | struct device *dev = acpi_desc->dev; |
| 813 | struct nfit_flush *nfit_flush; |
| 814 | |
| 815 | if (!sizeof_flush(flush)) |
| 816 | return false; |
| 817 | |
| 818 | list_for_each_entry(nfit_flush, &prev->flushes, list) { |
| 819 | if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) |
| 820 | continue; |
| 821 | |
| 822 | if (memcmp(nfit_flush->flush, flush, |
| 823 | sizeof_flush(flush)) == 0) { |
| 824 | list_move_tail(&nfit_flush->list, &acpi_desc->flushes); |
| 825 | return true; |
| 826 | } |
| 827 | } |
| 828 | |
| 829 | nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) |
| 830 | + sizeof_flush(flush), GFP_KERNEL); |
| 831 | if (!nfit_flush) |
| 832 | return false; |
| 833 | INIT_LIST_HEAD(&nfit_flush->list); |
| 834 | memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); |
| 835 | list_add_tail(&nfit_flush->list, &acpi_desc->flushes); |
| 836 | dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, |
| 837 | flush->device_handle, flush->hint_count); |
| 838 | return true; |
| 839 | } |
| 840 | |
| 841 | static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc, |
| 842 | struct acpi_nfit_capabilities *pcap) |
| 843 | { |
| 844 | struct device *dev = acpi_desc->dev; |
| 845 | u32 mask; |
| 846 | |
| 847 | mask = (1 << (pcap->highest_capability + 1)) - 1; |
| 848 | acpi_desc->platform_cap = pcap->capabilities & mask; |
| 849 | dev_dbg(dev, "%s: cap: %#x\n", __func__, acpi_desc->platform_cap); |
| 850 | return true; |
| 851 | } |
| 852 | |
| 853 | static void *add_table(struct acpi_nfit_desc *acpi_desc, |
| 854 | struct nfit_table_prev *prev, void *table, const void *end) |
| 855 | { |
| 856 | struct device *dev = acpi_desc->dev; |
| 857 | struct acpi_nfit_header *hdr; |
| 858 | void *err = ERR_PTR(-ENOMEM); |
| 859 | |
| 860 | if (table >= end) |
| 861 | return NULL; |
| 862 | |
| 863 | hdr = table; |
| 864 | if (!hdr->length) { |
| 865 | dev_warn(dev, "found a zero length table '%d' parsing nfit\n", |
| 866 | hdr->type); |
| 867 | return NULL; |
| 868 | } |
| 869 | |
| 870 | switch (hdr->type) { |
| 871 | case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: |
| 872 | if (!add_spa(acpi_desc, prev, table)) |
| 873 | return err; |
| 874 | break; |
| 875 | case ACPI_NFIT_TYPE_MEMORY_MAP: |
| 876 | if (!add_memdev(acpi_desc, prev, table)) |
| 877 | return err; |
| 878 | break; |
| 879 | case ACPI_NFIT_TYPE_CONTROL_REGION: |
| 880 | if (!add_dcr(acpi_desc, prev, table)) |
| 881 | return err; |
| 882 | break; |
| 883 | case ACPI_NFIT_TYPE_DATA_REGION: |
| 884 | if (!add_bdw(acpi_desc, prev, table)) |
| 885 | return err; |
| 886 | break; |
| 887 | case ACPI_NFIT_TYPE_INTERLEAVE: |
| 888 | if (!add_idt(acpi_desc, prev, table)) |
| 889 | return err; |
| 890 | break; |
| 891 | case ACPI_NFIT_TYPE_FLUSH_ADDRESS: |
| 892 | if (!add_flush(acpi_desc, prev, table)) |
| 893 | return err; |
| 894 | break; |
| 895 | case ACPI_NFIT_TYPE_SMBIOS: |
| 896 | dev_dbg(dev, "%s: smbios\n", __func__); |
| 897 | break; |
| 898 | case ACPI_NFIT_TYPE_CAPABILITIES: |
| 899 | if (!add_platform_cap(acpi_desc, table)) |
| 900 | return err; |
| 901 | break; |
| 902 | default: |
| 903 | dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); |
| 904 | break; |
| 905 | } |
| 906 | |
| 907 | return table + hdr->length; |
| 908 | } |
| 909 | |
| 910 | static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, |
| 911 | struct nfit_mem *nfit_mem) |
| 912 | { |
| 913 | u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; |
| 914 | u16 dcr = nfit_mem->dcr->region_index; |
| 915 | struct nfit_spa *nfit_spa; |
| 916 | |
| 917 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
| 918 | u16 range_index = nfit_spa->spa->range_index; |
| 919 | int type = nfit_spa_type(nfit_spa->spa); |
| 920 | struct nfit_memdev *nfit_memdev; |
| 921 | |
| 922 | if (type != NFIT_SPA_BDW) |
| 923 | continue; |
| 924 | |
| 925 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
| 926 | if (nfit_memdev->memdev->range_index != range_index) |
| 927 | continue; |
| 928 | if (nfit_memdev->memdev->device_handle != device_handle) |
| 929 | continue; |
| 930 | if (nfit_memdev->memdev->region_index != dcr) |
| 931 | continue; |
| 932 | |
| 933 | nfit_mem->spa_bdw = nfit_spa->spa; |
| 934 | return; |
| 935 | } |
| 936 | } |
| 937 | |
| 938 | dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", |
| 939 | nfit_mem->spa_dcr->range_index); |
| 940 | nfit_mem->bdw = NULL; |
| 941 | } |
| 942 | |
| 943 | static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, |
| 944 | struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) |
| 945 | { |
| 946 | u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; |
| 947 | struct nfit_memdev *nfit_memdev; |
| 948 | struct nfit_bdw *nfit_bdw; |
| 949 | struct nfit_idt *nfit_idt; |
| 950 | u16 idt_idx, range_index; |
| 951 | |
| 952 | list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { |
| 953 | if (nfit_bdw->bdw->region_index != dcr) |
| 954 | continue; |
| 955 | nfit_mem->bdw = nfit_bdw->bdw; |
| 956 | break; |
| 957 | } |
| 958 | |
| 959 | if (!nfit_mem->bdw) |
| 960 | return; |
| 961 | |
| 962 | nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); |
| 963 | |
| 964 | if (!nfit_mem->spa_bdw) |
| 965 | return; |
| 966 | |
| 967 | range_index = nfit_mem->spa_bdw->range_index; |
| 968 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
| 969 | if (nfit_memdev->memdev->range_index != range_index || |
| 970 | nfit_memdev->memdev->region_index != dcr) |
| 971 | continue; |
| 972 | nfit_mem->memdev_bdw = nfit_memdev->memdev; |
| 973 | idt_idx = nfit_memdev->memdev->interleave_index; |
| 974 | list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { |
| 975 | if (nfit_idt->idt->interleave_index != idt_idx) |
| 976 | continue; |
| 977 | nfit_mem->idt_bdw = nfit_idt->idt; |
| 978 | break; |
| 979 | } |
| 980 | break; |
| 981 | } |
| 982 | } |
| 983 | |
| 984 | static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, |
| 985 | struct acpi_nfit_system_address *spa) |
| 986 | { |
| 987 | struct nfit_mem *nfit_mem, *found; |
| 988 | struct nfit_memdev *nfit_memdev; |
| 989 | int type = spa ? nfit_spa_type(spa) : 0; |
| 990 | |
| 991 | switch (type) { |
| 992 | case NFIT_SPA_DCR: |
| 993 | case NFIT_SPA_PM: |
| 994 | break; |
| 995 | default: |
| 996 | if (spa) |
| 997 | return 0; |
| 998 | } |
| 999 | |
| 1000 | /* |
| 1001 | * This loop runs in two modes, when a dimm is mapped the loop |
| 1002 | * adds memdev associations to an existing dimm, or creates a |
| 1003 | * dimm. In the unmapped dimm case this loop sweeps for memdev |
| 1004 | * instances with an invalid / zero range_index and adds those |
| 1005 | * dimms without spa associations. |
| 1006 | */ |
| 1007 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
| 1008 | struct nfit_flush *nfit_flush; |
| 1009 | struct nfit_dcr *nfit_dcr; |
| 1010 | u32 device_handle; |
| 1011 | u16 dcr; |
| 1012 | |
| 1013 | if (spa && nfit_memdev->memdev->range_index != spa->range_index) |
| 1014 | continue; |
| 1015 | if (!spa && nfit_memdev->memdev->range_index) |
| 1016 | continue; |
| 1017 | found = NULL; |
| 1018 | dcr = nfit_memdev->memdev->region_index; |
| 1019 | device_handle = nfit_memdev->memdev->device_handle; |
| 1020 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) |
| 1021 | if (__to_nfit_memdev(nfit_mem)->device_handle |
| 1022 | == device_handle) { |
| 1023 | found = nfit_mem; |
| 1024 | break; |
| 1025 | } |
| 1026 | |
| 1027 | if (found) |
| 1028 | nfit_mem = found; |
| 1029 | else { |
| 1030 | nfit_mem = devm_kzalloc(acpi_desc->dev, |
| 1031 | sizeof(*nfit_mem), GFP_KERNEL); |
| 1032 | if (!nfit_mem) |
| 1033 | return -ENOMEM; |
| 1034 | INIT_LIST_HEAD(&nfit_mem->list); |
| 1035 | nfit_mem->acpi_desc = acpi_desc; |
| 1036 | list_add(&nfit_mem->list, &acpi_desc->dimms); |
| 1037 | } |
| 1038 | |
| 1039 | list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { |
| 1040 | if (nfit_dcr->dcr->region_index != dcr) |
| 1041 | continue; |
| 1042 | /* |
| 1043 | * Record the control region for the dimm. For |
| 1044 | * the ACPI 6.1 case, where there are separate |
| 1045 | * control regions for the pmem vs blk |
| 1046 | * interfaces, be sure to record the extended |
| 1047 | * blk details. |
| 1048 | */ |
| 1049 | if (!nfit_mem->dcr) |
| 1050 | nfit_mem->dcr = nfit_dcr->dcr; |
| 1051 | else if (nfit_mem->dcr->windows == 0 |
| 1052 | && nfit_dcr->dcr->windows) |
| 1053 | nfit_mem->dcr = nfit_dcr->dcr; |
| 1054 | break; |
| 1055 | } |
| 1056 | |
| 1057 | list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { |
| 1058 | struct acpi_nfit_flush_address *flush; |
| 1059 | u16 i; |
| 1060 | |
| 1061 | if (nfit_flush->flush->device_handle != device_handle) |
| 1062 | continue; |
| 1063 | nfit_mem->nfit_flush = nfit_flush; |
| 1064 | flush = nfit_flush->flush; |
| 1065 | nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, |
| 1066 | flush->hint_count |
| 1067 | * sizeof(struct resource), GFP_KERNEL); |
| 1068 | if (!nfit_mem->flush_wpq) |
| 1069 | return -ENOMEM; |
| 1070 | for (i = 0; i < flush->hint_count; i++) { |
| 1071 | struct resource *res = &nfit_mem->flush_wpq[i]; |
| 1072 | |
| 1073 | res->start = flush->hint_address[i]; |
| 1074 | res->end = res->start + 8 - 1; |
| 1075 | } |
| 1076 | break; |
| 1077 | } |
| 1078 | |
| 1079 | if (dcr && !nfit_mem->dcr) { |
| 1080 | dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", |
| 1081 | spa->range_index, dcr); |
| 1082 | return -ENODEV; |
| 1083 | } |
| 1084 | |
| 1085 | if (type == NFIT_SPA_DCR) { |
| 1086 | struct nfit_idt *nfit_idt; |
| 1087 | u16 idt_idx; |
| 1088 | |
| 1089 | /* multiple dimms may share a SPA when interleaved */ |
| 1090 | nfit_mem->spa_dcr = spa; |
| 1091 | nfit_mem->memdev_dcr = nfit_memdev->memdev; |
| 1092 | idt_idx = nfit_memdev->memdev->interleave_index; |
| 1093 | list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { |
| 1094 | if (nfit_idt->idt->interleave_index != idt_idx) |
| 1095 | continue; |
| 1096 | nfit_mem->idt_dcr = nfit_idt->idt; |
| 1097 | break; |
| 1098 | } |
| 1099 | nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); |
| 1100 | } else if (type == NFIT_SPA_PM) { |
| 1101 | /* |
| 1102 | * A single dimm may belong to multiple SPA-PM |
| 1103 | * ranges, record at least one in addition to |
| 1104 | * any SPA-DCR range. |
| 1105 | */ |
| 1106 | nfit_mem->memdev_pmem = nfit_memdev->memdev; |
| 1107 | } else |
| 1108 | nfit_mem->memdev_dcr = nfit_memdev->memdev; |
| 1109 | } |
| 1110 | |
| 1111 | return 0; |
| 1112 | } |
| 1113 | |
| 1114 | static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) |
| 1115 | { |
| 1116 | struct nfit_mem *a = container_of(_a, typeof(*a), list); |
| 1117 | struct nfit_mem *b = container_of(_b, typeof(*b), list); |
| 1118 | u32 handleA, handleB; |
| 1119 | |
| 1120 | handleA = __to_nfit_memdev(a)->device_handle; |
| 1121 | handleB = __to_nfit_memdev(b)->device_handle; |
| 1122 | if (handleA < handleB) |
| 1123 | return -1; |
| 1124 | else if (handleA > handleB) |
| 1125 | return 1; |
| 1126 | return 0; |
| 1127 | } |
| 1128 | |
| 1129 | static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) |
| 1130 | { |
| 1131 | struct nfit_spa *nfit_spa; |
| 1132 | int rc; |
| 1133 | |
| 1134 | |
| 1135 | /* |
| 1136 | * For each SPA-DCR or SPA-PMEM address range find its |
| 1137 | * corresponding MEMDEV(s). From each MEMDEV find the |
| 1138 | * corresponding DCR. Then, if we're operating on a SPA-DCR, |
| 1139 | * try to find a SPA-BDW and a corresponding BDW that references |
| 1140 | * the DCR. Throw it all into an nfit_mem object. Note, that |
| 1141 | * BDWs are optional. |
| 1142 | */ |
| 1143 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
| 1144 | rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); |
| 1145 | if (rc) |
| 1146 | return rc; |
| 1147 | } |
| 1148 | |
| 1149 | /* |
| 1150 | * If a DIMM has failed to be mapped into SPA there will be no |
| 1151 | * SPA entries above. Find and register all the unmapped DIMMs |
| 1152 | * for reporting and recovery purposes. |
| 1153 | */ |
| 1154 | rc = __nfit_mem_init(acpi_desc, NULL); |
| 1155 | if (rc) |
| 1156 | return rc; |
| 1157 | |
| 1158 | list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); |
| 1159 | |
| 1160 | return 0; |
| 1161 | } |
| 1162 | |
| 1163 | static ssize_t bus_dsm_mask_show(struct device *dev, |
| 1164 | struct device_attribute *attr, char *buf) |
| 1165 | { |
| 1166 | struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); |
| 1167 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); |
| 1168 | |
| 1169 | return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask); |
| 1170 | } |
| 1171 | static struct device_attribute dev_attr_bus_dsm_mask = |
| 1172 | __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); |
| 1173 | |
| 1174 | static ssize_t revision_show(struct device *dev, |
| 1175 | struct device_attribute *attr, char *buf) |
| 1176 | { |
| 1177 | struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); |
| 1178 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); |
| 1179 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 1180 | |
| 1181 | return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); |
| 1182 | } |
| 1183 | static DEVICE_ATTR_RO(revision); |
| 1184 | |
| 1185 | static ssize_t hw_error_scrub_show(struct device *dev, |
| 1186 | struct device_attribute *attr, char *buf) |
| 1187 | { |
| 1188 | struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); |
| 1189 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); |
| 1190 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 1191 | |
| 1192 | return sprintf(buf, "%d\n", acpi_desc->scrub_mode); |
| 1193 | } |
| 1194 | |
| 1195 | /* |
| 1196 | * The 'hw_error_scrub' attribute can have the following values written to it: |
| 1197 | * '0': Switch to the default mode where an exception will only insert |
| 1198 | * the address of the memory error into the poison and badblocks lists. |
| 1199 | * '1': Enable a full scrub to happen if an exception for a memory error is |
| 1200 | * received. |
| 1201 | */ |
| 1202 | static ssize_t hw_error_scrub_store(struct device *dev, |
| 1203 | struct device_attribute *attr, const char *buf, size_t size) |
| 1204 | { |
| 1205 | struct nvdimm_bus_descriptor *nd_desc; |
| 1206 | ssize_t rc; |
| 1207 | long val; |
| 1208 | |
| 1209 | rc = kstrtol(buf, 0, &val); |
| 1210 | if (rc) |
| 1211 | return rc; |
| 1212 | |
| 1213 | device_lock(dev); |
| 1214 | nd_desc = dev_get_drvdata(dev); |
| 1215 | if (nd_desc) { |
| 1216 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 1217 | |
| 1218 | switch (val) { |
| 1219 | case HW_ERROR_SCRUB_ON: |
| 1220 | acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; |
| 1221 | break; |
| 1222 | case HW_ERROR_SCRUB_OFF: |
| 1223 | acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; |
| 1224 | break; |
| 1225 | default: |
| 1226 | rc = -EINVAL; |
| 1227 | break; |
| 1228 | } |
| 1229 | } |
| 1230 | device_unlock(dev); |
| 1231 | if (rc) |
| 1232 | return rc; |
| 1233 | return size; |
| 1234 | } |
| 1235 | static DEVICE_ATTR_RW(hw_error_scrub); |
| 1236 | |
| 1237 | /* |
| 1238 | * This shows the number of full Address Range Scrubs that have been |
| 1239 | * completed since driver load time. Userspace can wait on this using |
| 1240 | * select/poll etc. A '+' at the end indicates an ARS is in progress |
| 1241 | */ |
| 1242 | static ssize_t scrub_show(struct device *dev, |
| 1243 | struct device_attribute *attr, char *buf) |
| 1244 | { |
| 1245 | struct nvdimm_bus_descriptor *nd_desc; |
| 1246 | ssize_t rc = -ENXIO; |
| 1247 | |
| 1248 | device_lock(dev); |
| 1249 | nd_desc = dev_get_drvdata(dev); |
| 1250 | if (nd_desc) { |
| 1251 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 1252 | |
| 1253 | rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, |
| 1254 | (work_busy(&acpi_desc->work)) ? "+\n" : "\n"); |
| 1255 | } |
| 1256 | device_unlock(dev); |
| 1257 | return rc; |
| 1258 | } |
| 1259 | |
| 1260 | static ssize_t scrub_store(struct device *dev, |
| 1261 | struct device_attribute *attr, const char *buf, size_t size) |
| 1262 | { |
| 1263 | struct nvdimm_bus_descriptor *nd_desc; |
| 1264 | ssize_t rc; |
| 1265 | long val; |
| 1266 | |
| 1267 | rc = kstrtol(buf, 0, &val); |
| 1268 | if (rc) |
| 1269 | return rc; |
| 1270 | if (val != 1) |
| 1271 | return -EINVAL; |
| 1272 | |
| 1273 | device_lock(dev); |
| 1274 | nd_desc = dev_get_drvdata(dev); |
| 1275 | if (nd_desc) { |
| 1276 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 1277 | |
| 1278 | rc = acpi_nfit_ars_rescan(acpi_desc, 0); |
| 1279 | } |
| 1280 | device_unlock(dev); |
| 1281 | if (rc) |
| 1282 | return rc; |
| 1283 | return size; |
| 1284 | } |
| 1285 | static DEVICE_ATTR_RW(scrub); |
| 1286 | |
| 1287 | static bool ars_supported(struct nvdimm_bus *nvdimm_bus) |
| 1288 | { |
| 1289 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); |
| 1290 | const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START |
| 1291 | | 1 << ND_CMD_ARS_STATUS; |
| 1292 | |
| 1293 | return (nd_desc->cmd_mask & mask) == mask; |
| 1294 | } |
| 1295 | |
| 1296 | static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) |
| 1297 | { |
| 1298 | struct device *dev = container_of(kobj, struct device, kobj); |
| 1299 | struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); |
| 1300 | |
| 1301 | if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) |
| 1302 | return 0; |
| 1303 | return a->mode; |
| 1304 | } |
| 1305 | |
| 1306 | static struct attribute *acpi_nfit_attributes[] = { |
| 1307 | &dev_attr_revision.attr, |
| 1308 | &dev_attr_scrub.attr, |
| 1309 | &dev_attr_hw_error_scrub.attr, |
| 1310 | &dev_attr_bus_dsm_mask.attr, |
| 1311 | NULL, |
| 1312 | }; |
| 1313 | |
| 1314 | static const struct attribute_group acpi_nfit_attribute_group = { |
| 1315 | .name = "nfit", |
| 1316 | .attrs = acpi_nfit_attributes, |
| 1317 | .is_visible = nfit_visible, |
| 1318 | }; |
| 1319 | |
| 1320 | static const struct attribute_group *acpi_nfit_attribute_groups[] = { |
| 1321 | &nvdimm_bus_attribute_group, |
| 1322 | &acpi_nfit_attribute_group, |
| 1323 | NULL, |
| 1324 | }; |
| 1325 | |
| 1326 | static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) |
| 1327 | { |
| 1328 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 1329 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 1330 | |
| 1331 | return __to_nfit_memdev(nfit_mem); |
| 1332 | } |
| 1333 | |
| 1334 | static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) |
| 1335 | { |
| 1336 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 1337 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 1338 | |
| 1339 | return nfit_mem->dcr; |
| 1340 | } |
| 1341 | |
| 1342 | static ssize_t handle_show(struct device *dev, |
| 1343 | struct device_attribute *attr, char *buf) |
| 1344 | { |
| 1345 | struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); |
| 1346 | |
| 1347 | return sprintf(buf, "%#x\n", memdev->device_handle); |
| 1348 | } |
| 1349 | static DEVICE_ATTR_RO(handle); |
| 1350 | |
| 1351 | static ssize_t phys_id_show(struct device *dev, |
| 1352 | struct device_attribute *attr, char *buf) |
| 1353 | { |
| 1354 | struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); |
| 1355 | |
| 1356 | return sprintf(buf, "%#x\n", memdev->physical_id); |
| 1357 | } |
| 1358 | static DEVICE_ATTR_RO(phys_id); |
| 1359 | |
| 1360 | static ssize_t vendor_show(struct device *dev, |
| 1361 | struct device_attribute *attr, char *buf) |
| 1362 | { |
| 1363 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
| 1364 | |
| 1365 | return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); |
| 1366 | } |
| 1367 | static DEVICE_ATTR_RO(vendor); |
| 1368 | |
| 1369 | static ssize_t rev_id_show(struct device *dev, |
| 1370 | struct device_attribute *attr, char *buf) |
| 1371 | { |
| 1372 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
| 1373 | |
| 1374 | return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); |
| 1375 | } |
| 1376 | static DEVICE_ATTR_RO(rev_id); |
| 1377 | |
| 1378 | static ssize_t device_show(struct device *dev, |
| 1379 | struct device_attribute *attr, char *buf) |
| 1380 | { |
| 1381 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
| 1382 | |
| 1383 | return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); |
| 1384 | } |
| 1385 | static DEVICE_ATTR_RO(device); |
| 1386 | |
| 1387 | static ssize_t subsystem_vendor_show(struct device *dev, |
| 1388 | struct device_attribute *attr, char *buf) |
| 1389 | { |
| 1390 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
| 1391 | |
| 1392 | return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); |
| 1393 | } |
| 1394 | static DEVICE_ATTR_RO(subsystem_vendor); |
| 1395 | |
| 1396 | static ssize_t subsystem_rev_id_show(struct device *dev, |
| 1397 | struct device_attribute *attr, char *buf) |
| 1398 | { |
| 1399 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
| 1400 | |
| 1401 | return sprintf(buf, "0x%04x\n", |
| 1402 | be16_to_cpu(dcr->subsystem_revision_id)); |
| 1403 | } |
| 1404 | static DEVICE_ATTR_RO(subsystem_rev_id); |
| 1405 | |
| 1406 | static ssize_t subsystem_device_show(struct device *dev, |
| 1407 | struct device_attribute *attr, char *buf) |
| 1408 | { |
| 1409 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
| 1410 | |
| 1411 | return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); |
| 1412 | } |
| 1413 | static DEVICE_ATTR_RO(subsystem_device); |
| 1414 | |
| 1415 | static int num_nvdimm_formats(struct nvdimm *nvdimm) |
| 1416 | { |
| 1417 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 1418 | int formats = 0; |
| 1419 | |
| 1420 | if (nfit_mem->memdev_pmem) |
| 1421 | formats++; |
| 1422 | if (nfit_mem->memdev_bdw) |
| 1423 | formats++; |
| 1424 | return formats; |
| 1425 | } |
| 1426 | |
| 1427 | static ssize_t format_show(struct device *dev, |
| 1428 | struct device_attribute *attr, char *buf) |
| 1429 | { |
| 1430 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
| 1431 | |
| 1432 | return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); |
| 1433 | } |
| 1434 | static DEVICE_ATTR_RO(format); |
| 1435 | |
| 1436 | static ssize_t format1_show(struct device *dev, |
| 1437 | struct device_attribute *attr, char *buf) |
| 1438 | { |
| 1439 | u32 handle; |
| 1440 | ssize_t rc = -ENXIO; |
| 1441 | struct nfit_mem *nfit_mem; |
| 1442 | struct nfit_memdev *nfit_memdev; |
| 1443 | struct acpi_nfit_desc *acpi_desc; |
| 1444 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 1445 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
| 1446 | |
| 1447 | nfit_mem = nvdimm_provider_data(nvdimm); |
| 1448 | acpi_desc = nfit_mem->acpi_desc; |
| 1449 | handle = to_nfit_memdev(dev)->device_handle; |
| 1450 | |
| 1451 | /* assumes DIMMs have at most 2 published interface codes */ |
| 1452 | mutex_lock(&acpi_desc->init_mutex); |
| 1453 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
| 1454 | struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; |
| 1455 | struct nfit_dcr *nfit_dcr; |
| 1456 | |
| 1457 | if (memdev->device_handle != handle) |
| 1458 | continue; |
| 1459 | |
| 1460 | list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { |
| 1461 | if (nfit_dcr->dcr->region_index != memdev->region_index) |
| 1462 | continue; |
| 1463 | if (nfit_dcr->dcr->code == dcr->code) |
| 1464 | continue; |
| 1465 | rc = sprintf(buf, "0x%04x\n", |
| 1466 | le16_to_cpu(nfit_dcr->dcr->code)); |
| 1467 | break; |
| 1468 | } |
| 1469 | if (rc != ENXIO) |
| 1470 | break; |
| 1471 | } |
| 1472 | mutex_unlock(&acpi_desc->init_mutex); |
| 1473 | return rc; |
| 1474 | } |
| 1475 | static DEVICE_ATTR_RO(format1); |
| 1476 | |
| 1477 | static ssize_t formats_show(struct device *dev, |
| 1478 | struct device_attribute *attr, char *buf) |
| 1479 | { |
| 1480 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 1481 | |
| 1482 | return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); |
| 1483 | } |
| 1484 | static DEVICE_ATTR_RO(formats); |
| 1485 | |
| 1486 | static ssize_t serial_show(struct device *dev, |
| 1487 | struct device_attribute *attr, char *buf) |
| 1488 | { |
| 1489 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
| 1490 | |
| 1491 | return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); |
| 1492 | } |
| 1493 | static DEVICE_ATTR_RO(serial); |
| 1494 | |
| 1495 | static ssize_t family_show(struct device *dev, |
| 1496 | struct device_attribute *attr, char *buf) |
| 1497 | { |
| 1498 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 1499 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 1500 | |
| 1501 | if (nfit_mem->family < 0) |
| 1502 | return -ENXIO; |
| 1503 | return sprintf(buf, "%d\n", nfit_mem->family); |
| 1504 | } |
| 1505 | static DEVICE_ATTR_RO(family); |
| 1506 | |
| 1507 | static ssize_t dsm_mask_show(struct device *dev, |
| 1508 | struct device_attribute *attr, char *buf) |
| 1509 | { |
| 1510 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 1511 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 1512 | |
| 1513 | if (nfit_mem->family < 0) |
| 1514 | return -ENXIO; |
| 1515 | return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); |
| 1516 | } |
| 1517 | static DEVICE_ATTR_RO(dsm_mask); |
| 1518 | |
| 1519 | static ssize_t flags_show(struct device *dev, |
| 1520 | struct device_attribute *attr, char *buf) |
| 1521 | { |
| 1522 | u16 flags = to_nfit_memdev(dev)->flags; |
| 1523 | |
| 1524 | return sprintf(buf, "%s%s%s%s%s%s%s\n", |
| 1525 | flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", |
| 1526 | flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", |
| 1527 | flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", |
| 1528 | flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", |
| 1529 | flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "", |
| 1530 | flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "", |
| 1531 | flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : ""); |
| 1532 | } |
| 1533 | static DEVICE_ATTR_RO(flags); |
| 1534 | |
| 1535 | static ssize_t id_show(struct device *dev, |
| 1536 | struct device_attribute *attr, char *buf) |
| 1537 | { |
| 1538 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
| 1539 | |
| 1540 | if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) |
| 1541 | return sprintf(buf, "%04x-%02x-%04x-%08x\n", |
| 1542 | be16_to_cpu(dcr->vendor_id), |
| 1543 | dcr->manufacturing_location, |
| 1544 | be16_to_cpu(dcr->manufacturing_date), |
| 1545 | be32_to_cpu(dcr->serial_number)); |
| 1546 | else |
| 1547 | return sprintf(buf, "%04x-%08x\n", |
| 1548 | be16_to_cpu(dcr->vendor_id), |
| 1549 | be32_to_cpu(dcr->serial_number)); |
| 1550 | } |
| 1551 | static DEVICE_ATTR_RO(id); |
| 1552 | |
| 1553 | static struct attribute *acpi_nfit_dimm_attributes[] = { |
| 1554 | &dev_attr_handle.attr, |
| 1555 | &dev_attr_phys_id.attr, |
| 1556 | &dev_attr_vendor.attr, |
| 1557 | &dev_attr_device.attr, |
| 1558 | &dev_attr_rev_id.attr, |
| 1559 | &dev_attr_subsystem_vendor.attr, |
| 1560 | &dev_attr_subsystem_device.attr, |
| 1561 | &dev_attr_subsystem_rev_id.attr, |
| 1562 | &dev_attr_format.attr, |
| 1563 | &dev_attr_formats.attr, |
| 1564 | &dev_attr_format1.attr, |
| 1565 | &dev_attr_serial.attr, |
| 1566 | &dev_attr_flags.attr, |
| 1567 | &dev_attr_id.attr, |
| 1568 | &dev_attr_family.attr, |
| 1569 | &dev_attr_dsm_mask.attr, |
| 1570 | NULL, |
| 1571 | }; |
| 1572 | |
| 1573 | static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, |
| 1574 | struct attribute *a, int n) |
| 1575 | { |
| 1576 | struct device *dev = container_of(kobj, struct device, kobj); |
| 1577 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 1578 | |
| 1579 | if (!to_nfit_dcr(dev)) { |
| 1580 | /* Without a dcr only the memdev attributes can be surfaced */ |
| 1581 | if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr |
| 1582 | || a == &dev_attr_flags.attr |
| 1583 | || a == &dev_attr_family.attr |
| 1584 | || a == &dev_attr_dsm_mask.attr) |
| 1585 | return a->mode; |
| 1586 | return 0; |
| 1587 | } |
| 1588 | |
| 1589 | if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) |
| 1590 | return 0; |
| 1591 | return a->mode; |
| 1592 | } |
| 1593 | |
| 1594 | static const struct attribute_group acpi_nfit_dimm_attribute_group = { |
| 1595 | .name = "nfit", |
| 1596 | .attrs = acpi_nfit_dimm_attributes, |
| 1597 | .is_visible = acpi_nfit_dimm_attr_visible, |
| 1598 | }; |
| 1599 | |
| 1600 | static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { |
| 1601 | &nvdimm_attribute_group, |
| 1602 | &nd_device_attribute_group, |
| 1603 | &acpi_nfit_dimm_attribute_group, |
| 1604 | NULL, |
| 1605 | }; |
| 1606 | |
| 1607 | static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, |
| 1608 | u32 device_handle) |
| 1609 | { |
| 1610 | struct nfit_mem *nfit_mem; |
| 1611 | |
| 1612 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) |
| 1613 | if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) |
| 1614 | return nfit_mem->nvdimm; |
| 1615 | |
| 1616 | return NULL; |
| 1617 | } |
| 1618 | |
| 1619 | void __acpi_nvdimm_notify(struct device *dev, u32 event) |
| 1620 | { |
| 1621 | struct nfit_mem *nfit_mem; |
| 1622 | struct acpi_nfit_desc *acpi_desc; |
| 1623 | |
| 1624 | dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__, |
| 1625 | event); |
| 1626 | |
| 1627 | if (event != NFIT_NOTIFY_DIMM_HEALTH) { |
| 1628 | dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev), |
| 1629 | event); |
| 1630 | return; |
| 1631 | } |
| 1632 | |
| 1633 | acpi_desc = dev_get_drvdata(dev->parent); |
| 1634 | if (!acpi_desc) |
| 1635 | return; |
| 1636 | |
| 1637 | /* |
| 1638 | * If we successfully retrieved acpi_desc, then we know nfit_mem data |
| 1639 | * is still valid. |
| 1640 | */ |
| 1641 | nfit_mem = dev_get_drvdata(dev); |
| 1642 | if (nfit_mem && nfit_mem->flags_attr) |
| 1643 | sysfs_notify_dirent(nfit_mem->flags_attr); |
| 1644 | } |
| 1645 | EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); |
| 1646 | |
| 1647 | static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) |
| 1648 | { |
| 1649 | struct acpi_device *adev = data; |
| 1650 | struct device *dev = &adev->dev; |
| 1651 | |
| 1652 | device_lock(dev->parent); |
| 1653 | __acpi_nvdimm_notify(dev, event); |
| 1654 | device_unlock(dev->parent); |
| 1655 | } |
| 1656 | |
| 1657 | static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, |
| 1658 | struct nfit_mem *nfit_mem, u32 device_handle) |
| 1659 | { |
| 1660 | struct acpi_device *adev, *adev_dimm; |
| 1661 | struct device *dev = acpi_desc->dev; |
| 1662 | union acpi_object *obj; |
| 1663 | unsigned long dsm_mask; |
| 1664 | const guid_t *guid; |
| 1665 | int i; |
| 1666 | int family = -1; |
| 1667 | |
| 1668 | /* nfit test assumes 1:1 relationship between commands and dsms */ |
| 1669 | nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; |
| 1670 | nfit_mem->family = NVDIMM_FAMILY_INTEL; |
| 1671 | adev = to_acpi_dev(acpi_desc); |
| 1672 | if (!adev) |
| 1673 | return 0; |
| 1674 | |
| 1675 | adev_dimm = acpi_find_child_device(adev, device_handle, false); |
| 1676 | nfit_mem->adev = adev_dimm; |
| 1677 | if (!adev_dimm) { |
| 1678 | dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", |
| 1679 | device_handle); |
| 1680 | return force_enable_dimms ? 0 : -ENODEV; |
| 1681 | } |
| 1682 | |
| 1683 | if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, |
| 1684 | ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { |
| 1685 | dev_err(dev, "%s: notification registration failed\n", |
| 1686 | dev_name(&adev_dimm->dev)); |
| 1687 | return -ENXIO; |
| 1688 | } |
| 1689 | /* |
| 1690 | * Record nfit_mem for the notification path to track back to |
| 1691 | * the nfit sysfs attributes for this dimm device object. |
| 1692 | */ |
| 1693 | dev_set_drvdata(&adev_dimm->dev, nfit_mem); |
| 1694 | |
| 1695 | /* |
| 1696 | * Until standardization materializes we need to consider 4 |
| 1697 | * different command sets. Note, that checking for function0 (bit0) |
| 1698 | * tells us if any commands are reachable through this GUID. |
| 1699 | */ |
| 1700 | for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) |
| 1701 | if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) |
| 1702 | if (family < 0 || i == default_dsm_family) |
| 1703 | family = i; |
| 1704 | |
| 1705 | /* limit the supported commands to those that are publicly documented */ |
| 1706 | nfit_mem->family = family; |
| 1707 | if (override_dsm_mask && !disable_vendor_specific) |
| 1708 | dsm_mask = override_dsm_mask; |
| 1709 | else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { |
| 1710 | dsm_mask = NVDIMM_INTEL_CMDMASK; |
| 1711 | if (disable_vendor_specific) |
| 1712 | dsm_mask &= ~(1 << ND_CMD_VENDOR); |
| 1713 | } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { |
| 1714 | dsm_mask = 0x1c3c76; |
| 1715 | } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { |
| 1716 | dsm_mask = 0x1fe; |
| 1717 | if (disable_vendor_specific) |
| 1718 | dsm_mask &= ~(1 << 8); |
| 1719 | } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { |
| 1720 | dsm_mask = 0xffffffff; |
| 1721 | } else { |
| 1722 | dev_dbg(dev, "unknown dimm command family\n"); |
| 1723 | nfit_mem->family = -1; |
| 1724 | /* DSMs are optional, continue loading the driver... */ |
| 1725 | return 0; |
| 1726 | } |
| 1727 | |
| 1728 | guid = to_nfit_uuid(nfit_mem->family); |
| 1729 | for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) |
| 1730 | if (acpi_check_dsm(adev_dimm->handle, guid, |
| 1731 | nfit_dsm_revid(nfit_mem->family, i), |
| 1732 | 1ULL << i)) |
| 1733 | set_bit(i, &nfit_mem->dsm_mask); |
| 1734 | |
| 1735 | obj = acpi_label_info(adev_dimm->handle); |
| 1736 | if (obj) { |
| 1737 | ACPI_FREE(obj); |
| 1738 | nfit_mem->has_lsi = 1; |
| 1739 | dev_dbg(dev, "%s: has _LSI\n", dev_name(&adev_dimm->dev)); |
| 1740 | } |
| 1741 | |
| 1742 | obj = acpi_label_read(adev_dimm->handle, 0, 0); |
| 1743 | if (obj) { |
| 1744 | ACPI_FREE(obj); |
| 1745 | nfit_mem->has_lsr = 1; |
| 1746 | dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); |
| 1747 | } |
| 1748 | |
| 1749 | obj = acpi_label_write(adev_dimm->handle, 0, 0, NULL); |
| 1750 | if (obj) { |
| 1751 | ACPI_FREE(obj); |
| 1752 | nfit_mem->has_lsw = 1; |
| 1753 | dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); |
| 1754 | } |
| 1755 | |
| 1756 | return 0; |
| 1757 | } |
| 1758 | |
| 1759 | static void shutdown_dimm_notify(void *data) |
| 1760 | { |
| 1761 | struct acpi_nfit_desc *acpi_desc = data; |
| 1762 | struct nfit_mem *nfit_mem; |
| 1763 | |
| 1764 | mutex_lock(&acpi_desc->init_mutex); |
| 1765 | /* |
| 1766 | * Clear out the nfit_mem->flags_attr and shut down dimm event |
| 1767 | * notifications. |
| 1768 | */ |
| 1769 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { |
| 1770 | struct acpi_device *adev_dimm = nfit_mem->adev; |
| 1771 | |
| 1772 | if (nfit_mem->flags_attr) { |
| 1773 | sysfs_put(nfit_mem->flags_attr); |
| 1774 | nfit_mem->flags_attr = NULL; |
| 1775 | } |
| 1776 | if (adev_dimm) { |
| 1777 | acpi_remove_notify_handler(adev_dimm->handle, |
| 1778 | ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); |
| 1779 | dev_set_drvdata(&adev_dimm->dev, NULL); |
| 1780 | } |
| 1781 | } |
| 1782 | mutex_unlock(&acpi_desc->init_mutex); |
| 1783 | } |
| 1784 | |
| 1785 | static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) |
| 1786 | { |
| 1787 | struct nfit_mem *nfit_mem; |
| 1788 | int dimm_count = 0, rc; |
| 1789 | struct nvdimm *nvdimm; |
| 1790 | |
| 1791 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { |
| 1792 | struct acpi_nfit_flush_address *flush; |
| 1793 | unsigned long flags = 0, cmd_mask; |
| 1794 | struct nfit_memdev *nfit_memdev; |
| 1795 | u32 device_handle; |
| 1796 | u16 mem_flags; |
| 1797 | |
| 1798 | device_handle = __to_nfit_memdev(nfit_mem)->device_handle; |
| 1799 | nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); |
| 1800 | if (nvdimm) { |
| 1801 | dimm_count++; |
| 1802 | continue; |
| 1803 | } |
| 1804 | |
| 1805 | if (nfit_mem->bdw && nfit_mem->memdev_pmem) |
| 1806 | set_bit(NDD_ALIASING, &flags); |
| 1807 | |
| 1808 | /* collate flags across all memdevs for this dimm */ |
| 1809 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
| 1810 | struct acpi_nfit_memory_map *dimm_memdev; |
| 1811 | |
| 1812 | dimm_memdev = __to_nfit_memdev(nfit_mem); |
| 1813 | if (dimm_memdev->device_handle |
| 1814 | != nfit_memdev->memdev->device_handle) |
| 1815 | continue; |
| 1816 | dimm_memdev->flags |= nfit_memdev->memdev->flags; |
| 1817 | } |
| 1818 | |
| 1819 | mem_flags = __to_nfit_memdev(nfit_mem)->flags; |
| 1820 | if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) |
| 1821 | set_bit(NDD_UNARMED, &flags); |
| 1822 | |
| 1823 | rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); |
| 1824 | if (rc) |
| 1825 | continue; |
| 1826 | |
| 1827 | /* |
| 1828 | * TODO: provide translation for non-NVDIMM_FAMILY_INTEL |
| 1829 | * devices (i.e. from nd_cmd to acpi_dsm) to standardize the |
| 1830 | * userspace interface. |
| 1831 | */ |
| 1832 | cmd_mask = 1UL << ND_CMD_CALL; |
| 1833 | if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { |
| 1834 | /* |
| 1835 | * These commands have a 1:1 correspondence |
| 1836 | * between DSM payload and libnvdimm ioctl |
| 1837 | * payload format. |
| 1838 | */ |
| 1839 | cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; |
| 1840 | } |
| 1841 | |
| 1842 | if (nfit_mem->has_lsi) |
| 1843 | set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); |
| 1844 | if (nfit_mem->has_lsr) |
| 1845 | set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); |
| 1846 | if (nfit_mem->has_lsw) |
| 1847 | set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); |
| 1848 | |
| 1849 | flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush |
| 1850 | : NULL; |
| 1851 | nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, |
| 1852 | acpi_nfit_dimm_attribute_groups, |
| 1853 | flags, cmd_mask, flush ? flush->hint_count : 0, |
| 1854 | nfit_mem->flush_wpq); |
| 1855 | if (!nvdimm) |
| 1856 | return -ENOMEM; |
| 1857 | |
| 1858 | nfit_mem->nvdimm = nvdimm; |
| 1859 | dimm_count++; |
| 1860 | |
| 1861 | if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) |
| 1862 | continue; |
| 1863 | |
| 1864 | dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", |
| 1865 | nvdimm_name(nvdimm), |
| 1866 | mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", |
| 1867 | mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", |
| 1868 | mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", |
| 1869 | mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "", |
| 1870 | mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : ""); |
| 1871 | |
| 1872 | } |
| 1873 | |
| 1874 | rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); |
| 1875 | if (rc) |
| 1876 | return rc; |
| 1877 | |
| 1878 | /* |
| 1879 | * Now that dimms are successfully registered, and async registration |
| 1880 | * is flushed, attempt to enable event notification. |
| 1881 | */ |
| 1882 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { |
| 1883 | struct kernfs_node *nfit_kernfs; |
| 1884 | |
| 1885 | nvdimm = nfit_mem->nvdimm; |
| 1886 | if (!nvdimm) |
| 1887 | continue; |
| 1888 | |
| 1889 | nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); |
| 1890 | if (nfit_kernfs) |
| 1891 | nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, |
| 1892 | "flags"); |
| 1893 | sysfs_put(nfit_kernfs); |
| 1894 | if (!nfit_mem->flags_attr) |
| 1895 | dev_warn(acpi_desc->dev, "%s: notifications disabled\n", |
| 1896 | nvdimm_name(nvdimm)); |
| 1897 | } |
| 1898 | |
| 1899 | return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, |
| 1900 | acpi_desc); |
| 1901 | } |
| 1902 | |
| 1903 | /* |
| 1904 | * These constants are private because there are no kernel consumers of |
| 1905 | * these commands. |
| 1906 | */ |
| 1907 | enum nfit_aux_cmds { |
| 1908 | NFIT_CMD_TRANSLATE_SPA = 5, |
| 1909 | NFIT_CMD_ARS_INJECT_SET = 7, |
| 1910 | NFIT_CMD_ARS_INJECT_CLEAR = 8, |
| 1911 | NFIT_CMD_ARS_INJECT_GET = 9, |
| 1912 | }; |
| 1913 | |
| 1914 | static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) |
| 1915 | { |
| 1916 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
| 1917 | const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); |
| 1918 | struct acpi_device *adev; |
| 1919 | unsigned long dsm_mask; |
| 1920 | int i; |
| 1921 | |
| 1922 | nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; |
| 1923 | nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; |
| 1924 | adev = to_acpi_dev(acpi_desc); |
| 1925 | if (!adev) |
| 1926 | return; |
| 1927 | |
| 1928 | for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) |
| 1929 | if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) |
| 1930 | set_bit(i, &nd_desc->cmd_mask); |
| 1931 | set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); |
| 1932 | |
| 1933 | dsm_mask = |
| 1934 | (1 << ND_CMD_ARS_CAP) | |
| 1935 | (1 << ND_CMD_ARS_START) | |
| 1936 | (1 << ND_CMD_ARS_STATUS) | |
| 1937 | (1 << ND_CMD_CLEAR_ERROR) | |
| 1938 | (1 << NFIT_CMD_TRANSLATE_SPA) | |
| 1939 | (1 << NFIT_CMD_ARS_INJECT_SET) | |
| 1940 | (1 << NFIT_CMD_ARS_INJECT_CLEAR) | |
| 1941 | (1 << NFIT_CMD_ARS_INJECT_GET); |
| 1942 | for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) |
| 1943 | if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) |
| 1944 | set_bit(i, &nd_desc->bus_dsm_mask); |
| 1945 | } |
| 1946 | |
| 1947 | static ssize_t range_index_show(struct device *dev, |
| 1948 | struct device_attribute *attr, char *buf) |
| 1949 | { |
| 1950 | struct nd_region *nd_region = to_nd_region(dev); |
| 1951 | struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); |
| 1952 | |
| 1953 | return sprintf(buf, "%d\n", nfit_spa->spa->range_index); |
| 1954 | } |
| 1955 | static DEVICE_ATTR_RO(range_index); |
| 1956 | |
| 1957 | static ssize_t ecc_unit_size_show(struct device *dev, |
| 1958 | struct device_attribute *attr, char *buf) |
| 1959 | { |
| 1960 | struct nd_region *nd_region = to_nd_region(dev); |
| 1961 | struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); |
| 1962 | |
| 1963 | return sprintf(buf, "%d\n", nfit_spa->clear_err_unit); |
| 1964 | } |
| 1965 | static DEVICE_ATTR_RO(ecc_unit_size); |
| 1966 | |
| 1967 | static struct attribute *acpi_nfit_region_attributes[] = { |
| 1968 | &dev_attr_range_index.attr, |
| 1969 | &dev_attr_ecc_unit_size.attr, |
| 1970 | NULL, |
| 1971 | }; |
| 1972 | |
| 1973 | static const struct attribute_group acpi_nfit_region_attribute_group = { |
| 1974 | .name = "nfit", |
| 1975 | .attrs = acpi_nfit_region_attributes, |
| 1976 | }; |
| 1977 | |
| 1978 | static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { |
| 1979 | &nd_region_attribute_group, |
| 1980 | &nd_mapping_attribute_group, |
| 1981 | &nd_device_attribute_group, |
| 1982 | &nd_numa_attribute_group, |
| 1983 | &acpi_nfit_region_attribute_group, |
| 1984 | NULL, |
| 1985 | }; |
| 1986 | |
| 1987 | /* enough info to uniquely specify an interleave set */ |
| 1988 | struct nfit_set_info { |
| 1989 | struct nfit_set_info_map { |
| 1990 | u64 region_offset; |
| 1991 | u32 serial_number; |
| 1992 | u32 pad; |
| 1993 | } mapping[0]; |
| 1994 | }; |
| 1995 | |
| 1996 | struct nfit_set_info2 { |
| 1997 | struct nfit_set_info_map2 { |
| 1998 | u64 region_offset; |
| 1999 | u32 serial_number; |
| 2000 | u16 vendor_id; |
| 2001 | u16 manufacturing_date; |
| 2002 | u8 manufacturing_location; |
| 2003 | u8 reserved[31]; |
| 2004 | } mapping[0]; |
| 2005 | }; |
| 2006 | |
| 2007 | static size_t sizeof_nfit_set_info(int num_mappings) |
| 2008 | { |
| 2009 | return sizeof(struct nfit_set_info) |
| 2010 | + num_mappings * sizeof(struct nfit_set_info_map); |
| 2011 | } |
| 2012 | |
| 2013 | static size_t sizeof_nfit_set_info2(int num_mappings) |
| 2014 | { |
| 2015 | return sizeof(struct nfit_set_info2) |
| 2016 | + num_mappings * sizeof(struct nfit_set_info_map2); |
| 2017 | } |
| 2018 | |
| 2019 | static int cmp_map_compat(const void *m0, const void *m1) |
| 2020 | { |
| 2021 | const struct nfit_set_info_map *map0 = m0; |
| 2022 | const struct nfit_set_info_map *map1 = m1; |
| 2023 | |
| 2024 | return memcmp(&map0->region_offset, &map1->region_offset, |
| 2025 | sizeof(u64)); |
| 2026 | } |
| 2027 | |
| 2028 | static int cmp_map(const void *m0, const void *m1) |
| 2029 | { |
| 2030 | const struct nfit_set_info_map *map0 = m0; |
| 2031 | const struct nfit_set_info_map *map1 = m1; |
| 2032 | |
| 2033 | if (map0->region_offset < map1->region_offset) |
| 2034 | return -1; |
| 2035 | else if (map0->region_offset > map1->region_offset) |
| 2036 | return 1; |
| 2037 | return 0; |
| 2038 | } |
| 2039 | |
| 2040 | static int cmp_map2(const void *m0, const void *m1) |
| 2041 | { |
| 2042 | const struct nfit_set_info_map2 *map0 = m0; |
| 2043 | const struct nfit_set_info_map2 *map1 = m1; |
| 2044 | |
| 2045 | if (map0->region_offset < map1->region_offset) |
| 2046 | return -1; |
| 2047 | else if (map0->region_offset > map1->region_offset) |
| 2048 | return 1; |
| 2049 | return 0; |
| 2050 | } |
| 2051 | |
| 2052 | /* Retrieve the nth entry referencing this spa */ |
| 2053 | static struct acpi_nfit_memory_map *memdev_from_spa( |
| 2054 | struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) |
| 2055 | { |
| 2056 | struct nfit_memdev *nfit_memdev; |
| 2057 | |
| 2058 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) |
| 2059 | if (nfit_memdev->memdev->range_index == range_index) |
| 2060 | if (n-- == 0) |
| 2061 | return nfit_memdev->memdev; |
| 2062 | return NULL; |
| 2063 | } |
| 2064 | |
| 2065 | static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, |
| 2066 | struct nd_region_desc *ndr_desc, |
| 2067 | struct acpi_nfit_system_address *spa) |
| 2068 | { |
| 2069 | struct device *dev = acpi_desc->dev; |
| 2070 | struct nd_interleave_set *nd_set; |
| 2071 | u16 nr = ndr_desc->num_mappings; |
| 2072 | struct nfit_set_info2 *info2; |
| 2073 | struct nfit_set_info *info; |
| 2074 | int i; |
| 2075 | |
| 2076 | nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); |
| 2077 | if (!nd_set) |
| 2078 | return -ENOMEM; |
| 2079 | ndr_desc->nd_set = nd_set; |
| 2080 | guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); |
| 2081 | |
| 2082 | info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); |
| 2083 | if (!info) |
| 2084 | return -ENOMEM; |
| 2085 | |
| 2086 | info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL); |
| 2087 | if (!info2) |
| 2088 | return -ENOMEM; |
| 2089 | |
| 2090 | for (i = 0; i < nr; i++) { |
| 2091 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
| 2092 | struct nfit_set_info_map *map = &info->mapping[i]; |
| 2093 | struct nfit_set_info_map2 *map2 = &info2->mapping[i]; |
| 2094 | struct nvdimm *nvdimm = mapping->nvdimm; |
| 2095 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 2096 | struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, |
| 2097 | spa->range_index, i); |
| 2098 | struct acpi_nfit_control_region *dcr = nfit_mem->dcr; |
| 2099 | |
| 2100 | if (!memdev || !nfit_mem->dcr) { |
| 2101 | dev_err(dev, "%s: failed to find DCR\n", __func__); |
| 2102 | return -ENODEV; |
| 2103 | } |
| 2104 | |
| 2105 | map->region_offset = memdev->region_offset; |
| 2106 | map->serial_number = dcr->serial_number; |
| 2107 | |
| 2108 | map2->region_offset = memdev->region_offset; |
| 2109 | map2->serial_number = dcr->serial_number; |
| 2110 | map2->vendor_id = dcr->vendor_id; |
| 2111 | map2->manufacturing_date = dcr->manufacturing_date; |
| 2112 | map2->manufacturing_location = dcr->manufacturing_location; |
| 2113 | } |
| 2114 | |
| 2115 | /* v1.1 namespaces */ |
| 2116 | sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), |
| 2117 | cmp_map, NULL); |
| 2118 | nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); |
| 2119 | |
| 2120 | /* v1.2 namespaces */ |
| 2121 | sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2), |
| 2122 | cmp_map2, NULL); |
| 2123 | nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0); |
| 2124 | |
| 2125 | /* support v1.1 namespaces created with the wrong sort order */ |
| 2126 | sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), |
| 2127 | cmp_map_compat, NULL); |
| 2128 | nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); |
| 2129 | |
| 2130 | /* record the result of the sort for the mapping position */ |
| 2131 | for (i = 0; i < nr; i++) { |
| 2132 | struct nfit_set_info_map2 *map2 = &info2->mapping[i]; |
| 2133 | int j; |
| 2134 | |
| 2135 | for (j = 0; j < nr; j++) { |
| 2136 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; |
| 2137 | struct nvdimm *nvdimm = mapping->nvdimm; |
| 2138 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 2139 | struct acpi_nfit_control_region *dcr = nfit_mem->dcr; |
| 2140 | |
| 2141 | if (map2->serial_number == dcr->serial_number && |
| 2142 | map2->vendor_id == dcr->vendor_id && |
| 2143 | map2->manufacturing_date == dcr->manufacturing_date && |
| 2144 | map2->manufacturing_location |
| 2145 | == dcr->manufacturing_location) { |
| 2146 | mapping->position = i; |
| 2147 | break; |
| 2148 | } |
| 2149 | } |
| 2150 | } |
| 2151 | |
| 2152 | ndr_desc->nd_set = nd_set; |
| 2153 | devm_kfree(dev, info); |
| 2154 | devm_kfree(dev, info2); |
| 2155 | |
| 2156 | return 0; |
| 2157 | } |
| 2158 | |
| 2159 | static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) |
| 2160 | { |
| 2161 | struct acpi_nfit_interleave *idt = mmio->idt; |
| 2162 | u32 sub_line_offset, line_index, line_offset; |
| 2163 | u64 line_no, table_skip_count, table_offset; |
| 2164 | |
| 2165 | line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); |
| 2166 | table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); |
| 2167 | line_offset = idt->line_offset[line_index] |
| 2168 | * mmio->line_size; |
| 2169 | table_offset = table_skip_count * mmio->table_size; |
| 2170 | |
| 2171 | return mmio->base_offset + line_offset + table_offset + sub_line_offset; |
| 2172 | } |
| 2173 | |
| 2174 | static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) |
| 2175 | { |
| 2176 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; |
| 2177 | u64 offset = nfit_blk->stat_offset + mmio->size * bw; |
| 2178 | const u32 STATUS_MASK = 0x80000037; |
| 2179 | |
| 2180 | if (mmio->num_lines) |
| 2181 | offset = to_interleave_offset(offset, mmio); |
| 2182 | |
| 2183 | return readl(mmio->addr.base + offset) & STATUS_MASK; |
| 2184 | } |
| 2185 | |
| 2186 | static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, |
| 2187 | resource_size_t dpa, unsigned int len, unsigned int write) |
| 2188 | { |
| 2189 | u64 cmd, offset; |
| 2190 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; |
| 2191 | |
| 2192 | enum { |
| 2193 | BCW_OFFSET_MASK = (1ULL << 48)-1, |
| 2194 | BCW_LEN_SHIFT = 48, |
| 2195 | BCW_LEN_MASK = (1ULL << 8) - 1, |
| 2196 | BCW_CMD_SHIFT = 56, |
| 2197 | }; |
| 2198 | |
| 2199 | cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; |
| 2200 | len = len >> L1_CACHE_SHIFT; |
| 2201 | cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; |
| 2202 | cmd |= ((u64) write) << BCW_CMD_SHIFT; |
| 2203 | |
| 2204 | offset = nfit_blk->cmd_offset + mmio->size * bw; |
| 2205 | if (mmio->num_lines) |
| 2206 | offset = to_interleave_offset(offset, mmio); |
| 2207 | |
| 2208 | writeq(cmd, mmio->addr.base + offset); |
| 2209 | nvdimm_flush(nfit_blk->nd_region); |
| 2210 | |
| 2211 | if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) |
| 2212 | readq(mmio->addr.base + offset); |
| 2213 | } |
| 2214 | |
| 2215 | static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, |
| 2216 | resource_size_t dpa, void *iobuf, size_t len, int rw, |
| 2217 | unsigned int lane) |
| 2218 | { |
| 2219 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; |
| 2220 | unsigned int copied = 0; |
| 2221 | u64 base_offset; |
| 2222 | int rc; |
| 2223 | |
| 2224 | base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES |
| 2225 | + lane * mmio->size; |
| 2226 | write_blk_ctl(nfit_blk, lane, dpa, len, rw); |
| 2227 | while (len) { |
| 2228 | unsigned int c; |
| 2229 | u64 offset; |
| 2230 | |
| 2231 | if (mmio->num_lines) { |
| 2232 | u32 line_offset; |
| 2233 | |
| 2234 | offset = to_interleave_offset(base_offset + copied, |
| 2235 | mmio); |
| 2236 | div_u64_rem(offset, mmio->line_size, &line_offset); |
| 2237 | c = min_t(size_t, len, mmio->line_size - line_offset); |
| 2238 | } else { |
| 2239 | offset = base_offset + nfit_blk->bdw_offset; |
| 2240 | c = len; |
| 2241 | } |
| 2242 | |
| 2243 | if (rw) |
| 2244 | memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c); |
| 2245 | else { |
| 2246 | if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) |
| 2247 | arch_invalidate_pmem((void __force *) |
| 2248 | mmio->addr.aperture + offset, c); |
| 2249 | |
| 2250 | memcpy(iobuf + copied, mmio->addr.aperture + offset, c); |
| 2251 | } |
| 2252 | |
| 2253 | copied += c; |
| 2254 | len -= c; |
| 2255 | } |
| 2256 | |
| 2257 | if (rw) |
| 2258 | nvdimm_flush(nfit_blk->nd_region); |
| 2259 | |
| 2260 | rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; |
| 2261 | return rc; |
| 2262 | } |
| 2263 | |
| 2264 | static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, |
| 2265 | resource_size_t dpa, void *iobuf, u64 len, int rw) |
| 2266 | { |
| 2267 | struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); |
| 2268 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; |
| 2269 | struct nd_region *nd_region = nfit_blk->nd_region; |
| 2270 | unsigned int lane, copied = 0; |
| 2271 | int rc = 0; |
| 2272 | |
| 2273 | lane = nd_region_acquire_lane(nd_region); |
| 2274 | while (len) { |
| 2275 | u64 c = min(len, mmio->size); |
| 2276 | |
| 2277 | rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, |
| 2278 | iobuf + copied, c, rw, lane); |
| 2279 | if (rc) |
| 2280 | break; |
| 2281 | |
| 2282 | copied += c; |
| 2283 | len -= c; |
| 2284 | } |
| 2285 | nd_region_release_lane(nd_region, lane); |
| 2286 | |
| 2287 | return rc; |
| 2288 | } |
| 2289 | |
| 2290 | static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, |
| 2291 | struct acpi_nfit_interleave *idt, u16 interleave_ways) |
| 2292 | { |
| 2293 | if (idt) { |
| 2294 | mmio->num_lines = idt->line_count; |
| 2295 | mmio->line_size = idt->line_size; |
| 2296 | if (interleave_ways == 0) |
| 2297 | return -ENXIO; |
| 2298 | mmio->table_size = mmio->num_lines * interleave_ways |
| 2299 | * mmio->line_size; |
| 2300 | } |
| 2301 | |
| 2302 | return 0; |
| 2303 | } |
| 2304 | |
| 2305 | static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, |
| 2306 | struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) |
| 2307 | { |
| 2308 | struct nd_cmd_dimm_flags flags; |
| 2309 | int rc; |
| 2310 | |
| 2311 | memset(&flags, 0, sizeof(flags)); |
| 2312 | rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, |
| 2313 | sizeof(flags), NULL); |
| 2314 | |
| 2315 | if (rc >= 0 && flags.status == 0) |
| 2316 | nfit_blk->dimm_flags = flags.flags; |
| 2317 | else if (rc == -ENOTTY) { |
| 2318 | /* fall back to a conservative default */ |
| 2319 | nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; |
| 2320 | rc = 0; |
| 2321 | } else |
| 2322 | rc = -ENXIO; |
| 2323 | |
| 2324 | return rc; |
| 2325 | } |
| 2326 | |
| 2327 | static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, |
| 2328 | struct device *dev) |
| 2329 | { |
| 2330 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); |
| 2331 | struct nd_blk_region *ndbr = to_nd_blk_region(dev); |
| 2332 | struct nfit_blk_mmio *mmio; |
| 2333 | struct nfit_blk *nfit_blk; |
| 2334 | struct nfit_mem *nfit_mem; |
| 2335 | struct nvdimm *nvdimm; |
| 2336 | int rc; |
| 2337 | |
| 2338 | nvdimm = nd_blk_region_to_dimm(ndbr); |
| 2339 | nfit_mem = nvdimm_provider_data(nvdimm); |
| 2340 | if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { |
| 2341 | dev_dbg(dev, "%s: missing%s%s%s\n", __func__, |
| 2342 | nfit_mem ? "" : " nfit_mem", |
| 2343 | (nfit_mem && nfit_mem->dcr) ? "" : " dcr", |
| 2344 | (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); |
| 2345 | return -ENXIO; |
| 2346 | } |
| 2347 | |
| 2348 | nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); |
| 2349 | if (!nfit_blk) |
| 2350 | return -ENOMEM; |
| 2351 | nd_blk_region_set_provider_data(ndbr, nfit_blk); |
| 2352 | nfit_blk->nd_region = to_nd_region(dev); |
| 2353 | |
| 2354 | /* map block aperture memory */ |
| 2355 | nfit_blk->bdw_offset = nfit_mem->bdw->offset; |
| 2356 | mmio = &nfit_blk->mmio[BDW]; |
| 2357 | mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, |
| 2358 | nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr)); |
| 2359 | if (!mmio->addr.base) { |
| 2360 | dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, |
| 2361 | nvdimm_name(nvdimm)); |
| 2362 | return -ENOMEM; |
| 2363 | } |
| 2364 | mmio->size = nfit_mem->bdw->size; |
| 2365 | mmio->base_offset = nfit_mem->memdev_bdw->region_offset; |
| 2366 | mmio->idt = nfit_mem->idt_bdw; |
| 2367 | mmio->spa = nfit_mem->spa_bdw; |
| 2368 | rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, |
| 2369 | nfit_mem->memdev_bdw->interleave_ways); |
| 2370 | if (rc) { |
| 2371 | dev_dbg(dev, "%s: %s failed to init bdw interleave\n", |
| 2372 | __func__, nvdimm_name(nvdimm)); |
| 2373 | return rc; |
| 2374 | } |
| 2375 | |
| 2376 | /* map block control memory */ |
| 2377 | nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; |
| 2378 | nfit_blk->stat_offset = nfit_mem->dcr->status_offset; |
| 2379 | mmio = &nfit_blk->mmio[DCR]; |
| 2380 | mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, |
| 2381 | nfit_mem->spa_dcr->length); |
| 2382 | if (!mmio->addr.base) { |
| 2383 | dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, |
| 2384 | nvdimm_name(nvdimm)); |
| 2385 | return -ENOMEM; |
| 2386 | } |
| 2387 | mmio->size = nfit_mem->dcr->window_size; |
| 2388 | mmio->base_offset = nfit_mem->memdev_dcr->region_offset; |
| 2389 | mmio->idt = nfit_mem->idt_dcr; |
| 2390 | mmio->spa = nfit_mem->spa_dcr; |
| 2391 | rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, |
| 2392 | nfit_mem->memdev_dcr->interleave_ways); |
| 2393 | if (rc) { |
| 2394 | dev_dbg(dev, "%s: %s failed to init dcr interleave\n", |
| 2395 | __func__, nvdimm_name(nvdimm)); |
| 2396 | return rc; |
| 2397 | } |
| 2398 | |
| 2399 | rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); |
| 2400 | if (rc < 0) { |
| 2401 | dev_dbg(dev, "%s: %s failed get DIMM flags\n", |
| 2402 | __func__, nvdimm_name(nvdimm)); |
| 2403 | return rc; |
| 2404 | } |
| 2405 | |
| 2406 | if (nvdimm_has_flush(nfit_blk->nd_region) < 0) |
| 2407 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
| 2408 | |
| 2409 | if (mmio->line_size == 0) |
| 2410 | return 0; |
| 2411 | |
| 2412 | if ((u32) nfit_blk->cmd_offset % mmio->line_size |
| 2413 | + 8 > mmio->line_size) { |
| 2414 | dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); |
| 2415 | return -ENXIO; |
| 2416 | } else if ((u32) nfit_blk->stat_offset % mmio->line_size |
| 2417 | + 8 > mmio->line_size) { |
| 2418 | dev_dbg(dev, "stat_offset crosses interleave boundary\n"); |
| 2419 | return -ENXIO; |
| 2420 | } |
| 2421 | |
| 2422 | return 0; |
| 2423 | } |
| 2424 | |
| 2425 | static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, |
| 2426 | struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) |
| 2427 | { |
| 2428 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
| 2429 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
| 2430 | int cmd_rc, rc; |
| 2431 | |
| 2432 | cmd->address = spa->address; |
| 2433 | cmd->length = spa->length; |
| 2434 | rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, |
| 2435 | sizeof(*cmd), &cmd_rc); |
| 2436 | if (rc < 0) |
| 2437 | return rc; |
| 2438 | return cmd_rc; |
| 2439 | } |
| 2440 | |
| 2441 | static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) |
| 2442 | { |
| 2443 | int rc; |
| 2444 | int cmd_rc; |
| 2445 | struct nd_cmd_ars_start ars_start; |
| 2446 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
| 2447 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
| 2448 | |
| 2449 | memset(&ars_start, 0, sizeof(ars_start)); |
| 2450 | ars_start.address = spa->address; |
| 2451 | ars_start.length = spa->length; |
| 2452 | ars_start.flags = acpi_desc->ars_start_flags; |
| 2453 | if (nfit_spa_type(spa) == NFIT_SPA_PM) |
| 2454 | ars_start.type = ND_ARS_PERSISTENT; |
| 2455 | else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) |
| 2456 | ars_start.type = ND_ARS_VOLATILE; |
| 2457 | else |
| 2458 | return -ENOTTY; |
| 2459 | |
| 2460 | rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, |
| 2461 | sizeof(ars_start), &cmd_rc); |
| 2462 | |
| 2463 | if (rc < 0) |
| 2464 | return rc; |
| 2465 | return cmd_rc; |
| 2466 | } |
| 2467 | |
| 2468 | static int ars_continue(struct acpi_nfit_desc *acpi_desc) |
| 2469 | { |
| 2470 | int rc, cmd_rc; |
| 2471 | struct nd_cmd_ars_start ars_start; |
| 2472 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
| 2473 | struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; |
| 2474 | |
| 2475 | memset(&ars_start, 0, sizeof(ars_start)); |
| 2476 | ars_start.address = ars_status->restart_address; |
| 2477 | ars_start.length = ars_status->restart_length; |
| 2478 | ars_start.type = ars_status->type; |
| 2479 | ars_start.flags = acpi_desc->ars_start_flags; |
| 2480 | rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, |
| 2481 | sizeof(ars_start), &cmd_rc); |
| 2482 | if (rc < 0) |
| 2483 | return rc; |
| 2484 | return cmd_rc; |
| 2485 | } |
| 2486 | |
| 2487 | static int ars_get_status(struct acpi_nfit_desc *acpi_desc) |
| 2488 | { |
| 2489 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
| 2490 | struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; |
| 2491 | int rc, cmd_rc; |
| 2492 | |
| 2493 | rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, |
| 2494 | acpi_desc->ars_status_size, &cmd_rc); |
| 2495 | if (rc < 0) |
| 2496 | return rc; |
| 2497 | return cmd_rc; |
| 2498 | } |
| 2499 | |
| 2500 | static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc, |
| 2501 | struct nd_cmd_ars_status *ars_status) |
| 2502 | { |
| 2503 | struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; |
| 2504 | int rc; |
| 2505 | u32 i; |
| 2506 | |
| 2507 | /* |
| 2508 | * First record starts at 44 byte offset from the start of the |
| 2509 | * payload. |
| 2510 | */ |
| 2511 | if (ars_status->out_length < 44) |
| 2512 | return 0; |
| 2513 | for (i = 0; i < ars_status->num_records; i++) { |
| 2514 | /* only process full records */ |
| 2515 | if (ars_status->out_length |
| 2516 | < 44 + sizeof(struct nd_ars_record) * (i + 1)) |
| 2517 | break; |
| 2518 | rc = nvdimm_bus_add_badrange(nvdimm_bus, |
| 2519 | ars_status->records[i].err_address, |
| 2520 | ars_status->records[i].length); |
| 2521 | if (rc) |
| 2522 | return rc; |
| 2523 | } |
| 2524 | if (i < ars_status->num_records) |
| 2525 | dev_warn(acpi_desc->dev, "detected truncated ars results\n"); |
| 2526 | |
| 2527 | return 0; |
| 2528 | } |
| 2529 | |
| 2530 | static void acpi_nfit_remove_resource(void *data) |
| 2531 | { |
| 2532 | struct resource *res = data; |
| 2533 | |
| 2534 | remove_resource(res); |
| 2535 | } |
| 2536 | |
| 2537 | static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, |
| 2538 | struct nd_region_desc *ndr_desc) |
| 2539 | { |
| 2540 | struct resource *res, *nd_res = ndr_desc->res; |
| 2541 | int is_pmem, ret; |
| 2542 | |
| 2543 | /* No operation if the region is already registered as PMEM */ |
| 2544 | is_pmem = region_intersects(nd_res->start, resource_size(nd_res), |
| 2545 | IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); |
| 2546 | if (is_pmem == REGION_INTERSECTS) |
| 2547 | return 0; |
| 2548 | |
| 2549 | res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); |
| 2550 | if (!res) |
| 2551 | return -ENOMEM; |
| 2552 | |
| 2553 | res->name = "Persistent Memory"; |
| 2554 | res->start = nd_res->start; |
| 2555 | res->end = nd_res->end; |
| 2556 | res->flags = IORESOURCE_MEM; |
| 2557 | res->desc = IORES_DESC_PERSISTENT_MEMORY; |
| 2558 | |
| 2559 | ret = insert_resource(&iomem_resource, res); |
| 2560 | if (ret) |
| 2561 | return ret; |
| 2562 | |
| 2563 | ret = devm_add_action_or_reset(acpi_desc->dev, |
| 2564 | acpi_nfit_remove_resource, |
| 2565 | res); |
| 2566 | if (ret) |
| 2567 | return ret; |
| 2568 | |
| 2569 | return 0; |
| 2570 | } |
| 2571 | |
| 2572 | static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, |
| 2573 | struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, |
| 2574 | struct acpi_nfit_memory_map *memdev, |
| 2575 | struct nfit_spa *nfit_spa) |
| 2576 | { |
| 2577 | struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, |
| 2578 | memdev->device_handle); |
| 2579 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
| 2580 | struct nd_blk_region_desc *ndbr_desc; |
| 2581 | struct nfit_mem *nfit_mem; |
| 2582 | int blk_valid = 0, rc; |
| 2583 | |
| 2584 | if (!nvdimm) { |
| 2585 | dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", |
| 2586 | spa->range_index, memdev->device_handle); |
| 2587 | return -ENODEV; |
| 2588 | } |
| 2589 | |
| 2590 | mapping->nvdimm = nvdimm; |
| 2591 | switch (nfit_spa_type(spa)) { |
| 2592 | case NFIT_SPA_PM: |
| 2593 | case NFIT_SPA_VOLATILE: |
| 2594 | mapping->start = memdev->address; |
| 2595 | mapping->size = memdev->region_size; |
| 2596 | break; |
| 2597 | case NFIT_SPA_DCR: |
| 2598 | nfit_mem = nvdimm_provider_data(nvdimm); |
| 2599 | if (!nfit_mem || !nfit_mem->bdw) { |
| 2600 | dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", |
| 2601 | spa->range_index, nvdimm_name(nvdimm)); |
| 2602 | } else { |
| 2603 | mapping->size = nfit_mem->bdw->capacity; |
| 2604 | mapping->start = nfit_mem->bdw->start_address; |
| 2605 | ndr_desc->num_lanes = nfit_mem->bdw->windows; |
| 2606 | blk_valid = 1; |
| 2607 | } |
| 2608 | |
| 2609 | ndr_desc->mapping = mapping; |
| 2610 | ndr_desc->num_mappings = blk_valid; |
| 2611 | ndbr_desc = to_blk_region_desc(ndr_desc); |
| 2612 | ndbr_desc->enable = acpi_nfit_blk_region_enable; |
| 2613 | ndbr_desc->do_io = acpi_desc->blk_do_io; |
| 2614 | rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); |
| 2615 | if (rc) |
| 2616 | return rc; |
| 2617 | nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, |
| 2618 | ndr_desc); |
| 2619 | if (!nfit_spa->nd_region) |
| 2620 | return -ENOMEM; |
| 2621 | break; |
| 2622 | } |
| 2623 | |
| 2624 | return 0; |
| 2625 | } |
| 2626 | |
| 2627 | static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) |
| 2628 | { |
| 2629 | return (nfit_spa_type(spa) == NFIT_SPA_VDISK || |
| 2630 | nfit_spa_type(spa) == NFIT_SPA_VCD || |
| 2631 | nfit_spa_type(spa) == NFIT_SPA_PDISK || |
| 2632 | nfit_spa_type(spa) == NFIT_SPA_PCD); |
| 2633 | } |
| 2634 | |
| 2635 | static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa) |
| 2636 | { |
| 2637 | return (nfit_spa_type(spa) == NFIT_SPA_VDISK || |
| 2638 | nfit_spa_type(spa) == NFIT_SPA_VCD || |
| 2639 | nfit_spa_type(spa) == NFIT_SPA_VOLATILE); |
| 2640 | } |
| 2641 | |
| 2642 | static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, |
| 2643 | struct nfit_spa *nfit_spa) |
| 2644 | { |
| 2645 | static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; |
| 2646 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
| 2647 | struct nd_blk_region_desc ndbr_desc; |
| 2648 | struct nd_region_desc *ndr_desc; |
| 2649 | struct nfit_memdev *nfit_memdev; |
| 2650 | struct nvdimm_bus *nvdimm_bus; |
| 2651 | struct resource res; |
| 2652 | int count = 0, rc; |
| 2653 | |
| 2654 | if (nfit_spa->nd_region) |
| 2655 | return 0; |
| 2656 | |
| 2657 | if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { |
| 2658 | dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", |
| 2659 | __func__); |
| 2660 | return 0; |
| 2661 | } |
| 2662 | |
| 2663 | memset(&res, 0, sizeof(res)); |
| 2664 | memset(&mappings, 0, sizeof(mappings)); |
| 2665 | memset(&ndbr_desc, 0, sizeof(ndbr_desc)); |
| 2666 | res.start = spa->address; |
| 2667 | res.end = res.start + spa->length - 1; |
| 2668 | ndr_desc = &ndbr_desc.ndr_desc; |
| 2669 | ndr_desc->res = &res; |
| 2670 | ndr_desc->provider_data = nfit_spa; |
| 2671 | ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; |
| 2672 | if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) |
| 2673 | ndr_desc->numa_node = acpi_map_pxm_to_online_node( |
| 2674 | spa->proximity_domain); |
| 2675 | else |
| 2676 | ndr_desc->numa_node = NUMA_NO_NODE; |
| 2677 | |
| 2678 | /* |
| 2679 | * Persistence domain bits are hierarchical, if |
| 2680 | * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then |
| 2681 | * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. |
| 2682 | */ |
| 2683 | if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) |
| 2684 | set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); |
| 2685 | else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) |
| 2686 | set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); |
| 2687 | |
| 2688 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
| 2689 | struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; |
| 2690 | struct nd_mapping_desc *mapping; |
| 2691 | |
| 2692 | if (memdev->range_index != spa->range_index) |
| 2693 | continue; |
| 2694 | if (count >= ND_MAX_MAPPINGS) { |
| 2695 | dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", |
| 2696 | spa->range_index, ND_MAX_MAPPINGS); |
| 2697 | return -ENXIO; |
| 2698 | } |
| 2699 | mapping = &mappings[count++]; |
| 2700 | rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, |
| 2701 | memdev, nfit_spa); |
| 2702 | if (rc) |
| 2703 | goto out; |
| 2704 | } |
| 2705 | |
| 2706 | ndr_desc->mapping = mappings; |
| 2707 | ndr_desc->num_mappings = count; |
| 2708 | rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); |
| 2709 | if (rc) |
| 2710 | goto out; |
| 2711 | |
| 2712 | nvdimm_bus = acpi_desc->nvdimm_bus; |
| 2713 | if (nfit_spa_type(spa) == NFIT_SPA_PM) { |
| 2714 | rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); |
| 2715 | if (rc) { |
| 2716 | dev_warn(acpi_desc->dev, |
| 2717 | "failed to insert pmem resource to iomem: %d\n", |
| 2718 | rc); |
| 2719 | goto out; |
| 2720 | } |
| 2721 | |
| 2722 | nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, |
| 2723 | ndr_desc); |
| 2724 | if (!nfit_spa->nd_region) |
| 2725 | rc = -ENOMEM; |
| 2726 | } else if (nfit_spa_is_volatile(spa)) { |
| 2727 | nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, |
| 2728 | ndr_desc); |
| 2729 | if (!nfit_spa->nd_region) |
| 2730 | rc = -ENOMEM; |
| 2731 | } else if (nfit_spa_is_virtual(spa)) { |
| 2732 | nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, |
| 2733 | ndr_desc); |
| 2734 | if (!nfit_spa->nd_region) |
| 2735 | rc = -ENOMEM; |
| 2736 | } |
| 2737 | |
| 2738 | out: |
| 2739 | if (rc) |
| 2740 | dev_err(acpi_desc->dev, "failed to register spa range %d\n", |
| 2741 | nfit_spa->spa->range_index); |
| 2742 | return rc; |
| 2743 | } |
| 2744 | |
| 2745 | static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc, |
| 2746 | u32 max_ars) |
| 2747 | { |
| 2748 | struct device *dev = acpi_desc->dev; |
| 2749 | struct nd_cmd_ars_status *ars_status; |
| 2750 | |
| 2751 | if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) { |
| 2752 | memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size); |
| 2753 | return 0; |
| 2754 | } |
| 2755 | |
| 2756 | if (acpi_desc->ars_status) |
| 2757 | devm_kfree(dev, acpi_desc->ars_status); |
| 2758 | acpi_desc->ars_status = NULL; |
| 2759 | ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL); |
| 2760 | if (!ars_status) |
| 2761 | return -ENOMEM; |
| 2762 | acpi_desc->ars_status = ars_status; |
| 2763 | acpi_desc->ars_status_size = max_ars; |
| 2764 | return 0; |
| 2765 | } |
| 2766 | |
| 2767 | static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc, |
| 2768 | struct nfit_spa *nfit_spa) |
| 2769 | { |
| 2770 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
| 2771 | int rc; |
| 2772 | |
| 2773 | if (!nfit_spa->max_ars) { |
| 2774 | struct nd_cmd_ars_cap ars_cap; |
| 2775 | |
| 2776 | memset(&ars_cap, 0, sizeof(ars_cap)); |
| 2777 | rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); |
| 2778 | if (rc < 0) |
| 2779 | return rc; |
| 2780 | nfit_spa->max_ars = ars_cap.max_ars_out; |
| 2781 | nfit_spa->clear_err_unit = ars_cap.clear_err_unit; |
| 2782 | /* check that the supported scrub types match the spa type */ |
| 2783 | if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE && |
| 2784 | ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0) |
| 2785 | return -ENOTTY; |
| 2786 | else if (nfit_spa_type(spa) == NFIT_SPA_PM && |
| 2787 | ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0) |
| 2788 | return -ENOTTY; |
| 2789 | } |
| 2790 | |
| 2791 | if (ars_status_alloc(acpi_desc, nfit_spa->max_ars)) |
| 2792 | return -ENOMEM; |
| 2793 | |
| 2794 | rc = ars_get_status(acpi_desc); |
| 2795 | if (rc < 0 && rc != -ENOSPC) |
| 2796 | return rc; |
| 2797 | |
| 2798 | if (ars_status_process_records(acpi_desc, acpi_desc->ars_status)) |
| 2799 | return -ENOMEM; |
| 2800 | |
| 2801 | return 0; |
| 2802 | } |
| 2803 | |
| 2804 | static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc, |
| 2805 | struct nfit_spa *nfit_spa) |
| 2806 | { |
| 2807 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
| 2808 | unsigned int overflow_retry = scrub_overflow_abort; |
| 2809 | u64 init_ars_start = 0, init_ars_len = 0; |
| 2810 | struct device *dev = acpi_desc->dev; |
| 2811 | unsigned int tmo = scrub_timeout; |
| 2812 | int rc; |
| 2813 | |
| 2814 | if (!nfit_spa->ars_required || !nfit_spa->nd_region) |
| 2815 | return; |
| 2816 | |
| 2817 | rc = ars_start(acpi_desc, nfit_spa); |
| 2818 | /* |
| 2819 | * If we timed out the initial scan we'll still be busy here, |
| 2820 | * and will wait another timeout before giving up permanently. |
| 2821 | */ |
| 2822 | if (rc < 0 && rc != -EBUSY) |
| 2823 | return; |
| 2824 | |
| 2825 | do { |
| 2826 | u64 ars_start, ars_len; |
| 2827 | |
| 2828 | if (acpi_desc->cancel) |
| 2829 | break; |
| 2830 | rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); |
| 2831 | if (rc == -ENOTTY) |
| 2832 | break; |
| 2833 | if (rc == -EBUSY && !tmo) { |
| 2834 | dev_warn(dev, "range %d ars timeout, aborting\n", |
| 2835 | spa->range_index); |
| 2836 | break; |
| 2837 | } |
| 2838 | |
| 2839 | if (rc == -EBUSY) { |
| 2840 | /* |
| 2841 | * Note, entries may be appended to the list |
| 2842 | * while the lock is dropped, but the workqueue |
| 2843 | * being active prevents entries being deleted / |
| 2844 | * freed. |
| 2845 | */ |
| 2846 | mutex_unlock(&acpi_desc->init_mutex); |
| 2847 | ssleep(1); |
| 2848 | tmo--; |
| 2849 | mutex_lock(&acpi_desc->init_mutex); |
| 2850 | continue; |
| 2851 | } |
| 2852 | |
| 2853 | /* we got some results, but there are more pending... */ |
| 2854 | if (rc == -ENOSPC && overflow_retry--) { |
| 2855 | if (!init_ars_len) { |
| 2856 | init_ars_len = acpi_desc->ars_status->length; |
| 2857 | init_ars_start = acpi_desc->ars_status->address; |
| 2858 | } |
| 2859 | rc = ars_continue(acpi_desc); |
| 2860 | } |
| 2861 | |
| 2862 | if (rc < 0) { |
| 2863 | dev_warn(dev, "range %d ars continuation failed\n", |
| 2864 | spa->range_index); |
| 2865 | break; |
| 2866 | } |
| 2867 | |
| 2868 | if (init_ars_len) { |
| 2869 | ars_start = init_ars_start; |
| 2870 | ars_len = init_ars_len; |
| 2871 | } else { |
| 2872 | ars_start = acpi_desc->ars_status->address; |
| 2873 | ars_len = acpi_desc->ars_status->length; |
| 2874 | } |
| 2875 | dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n", |
| 2876 | spa->range_index, ars_start, ars_len); |
| 2877 | /* notify the region about new poison entries */ |
| 2878 | nvdimm_region_notify(nfit_spa->nd_region, |
| 2879 | NVDIMM_REVALIDATE_POISON); |
| 2880 | break; |
| 2881 | } while (1); |
| 2882 | } |
| 2883 | |
| 2884 | static void acpi_nfit_scrub(struct work_struct *work) |
| 2885 | { |
| 2886 | struct device *dev; |
| 2887 | u64 init_scrub_length = 0; |
| 2888 | struct nfit_spa *nfit_spa; |
| 2889 | u64 init_scrub_address = 0; |
| 2890 | bool init_ars_done = false; |
| 2891 | struct acpi_nfit_desc *acpi_desc; |
| 2892 | unsigned int tmo = scrub_timeout; |
| 2893 | unsigned int overflow_retry = scrub_overflow_abort; |
| 2894 | |
| 2895 | acpi_desc = container_of(work, typeof(*acpi_desc), work); |
| 2896 | dev = acpi_desc->dev; |
| 2897 | |
| 2898 | /* |
| 2899 | * We scrub in 2 phases. The first phase waits for any platform |
| 2900 | * firmware initiated scrubs to complete and then we go search for the |
| 2901 | * affected spa regions to mark them scanned. In the second phase we |
| 2902 | * initiate a directed scrub for every range that was not scrubbed in |
| 2903 | * phase 1. If we're called for a 'rescan', we harmlessly pass through |
| 2904 | * the first phase, but really only care about running phase 2, where |
| 2905 | * regions can be notified of new poison. |
| 2906 | */ |
| 2907 | |
| 2908 | /* process platform firmware initiated scrubs */ |
| 2909 | retry: |
| 2910 | mutex_lock(&acpi_desc->init_mutex); |
| 2911 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
| 2912 | struct nd_cmd_ars_status *ars_status; |
| 2913 | struct acpi_nfit_system_address *spa; |
| 2914 | u64 ars_start, ars_len; |
| 2915 | int rc; |
| 2916 | |
| 2917 | if (acpi_desc->cancel) |
| 2918 | break; |
| 2919 | |
| 2920 | if (nfit_spa->nd_region) |
| 2921 | continue; |
| 2922 | |
| 2923 | if (init_ars_done) { |
| 2924 | /* |
| 2925 | * No need to re-query, we're now just |
| 2926 | * reconciling all the ranges covered by the |
| 2927 | * initial scrub |
| 2928 | */ |
| 2929 | rc = 0; |
| 2930 | } else |
| 2931 | rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); |
| 2932 | |
| 2933 | if (rc == -ENOTTY) { |
| 2934 | /* no ars capability, just register spa and move on */ |
| 2935 | acpi_nfit_register_region(acpi_desc, nfit_spa); |
| 2936 | continue; |
| 2937 | } |
| 2938 | |
| 2939 | if (rc == -EBUSY && !tmo) { |
| 2940 | /* fallthrough to directed scrub in phase 2 */ |
| 2941 | dev_warn(dev, "timeout awaiting ars results, continuing...\n"); |
| 2942 | break; |
| 2943 | } else if (rc == -EBUSY) { |
| 2944 | mutex_unlock(&acpi_desc->init_mutex); |
| 2945 | ssleep(1); |
| 2946 | tmo--; |
| 2947 | goto retry; |
| 2948 | } |
| 2949 | |
| 2950 | /* we got some results, but there are more pending... */ |
| 2951 | if (rc == -ENOSPC && overflow_retry--) { |
| 2952 | ars_status = acpi_desc->ars_status; |
| 2953 | /* |
| 2954 | * Record the original scrub range, so that we |
| 2955 | * can recall all the ranges impacted by the |
| 2956 | * initial scrub. |
| 2957 | */ |
| 2958 | if (!init_scrub_length) { |
| 2959 | init_scrub_length = ars_status->length; |
| 2960 | init_scrub_address = ars_status->address; |
| 2961 | } |
| 2962 | rc = ars_continue(acpi_desc); |
| 2963 | if (rc == 0) { |
| 2964 | mutex_unlock(&acpi_desc->init_mutex); |
| 2965 | goto retry; |
| 2966 | } |
| 2967 | } |
| 2968 | |
| 2969 | if (rc < 0) { |
| 2970 | /* |
| 2971 | * Initial scrub failed, we'll give it one more |
| 2972 | * try below... |
| 2973 | */ |
| 2974 | break; |
| 2975 | } |
| 2976 | |
| 2977 | /* We got some final results, record completed ranges */ |
| 2978 | ars_status = acpi_desc->ars_status; |
| 2979 | if (init_scrub_length) { |
| 2980 | ars_start = init_scrub_address; |
| 2981 | ars_len = ars_start + init_scrub_length; |
| 2982 | } else { |
| 2983 | ars_start = ars_status->address; |
| 2984 | ars_len = ars_status->length; |
| 2985 | } |
| 2986 | spa = nfit_spa->spa; |
| 2987 | |
| 2988 | if (!init_ars_done) { |
| 2989 | init_ars_done = true; |
| 2990 | dev_dbg(dev, "init scrub %#llx + %#llx complete\n", |
| 2991 | ars_start, ars_len); |
| 2992 | } |
| 2993 | if (ars_start <= spa->address && ars_start + ars_len |
| 2994 | >= spa->address + spa->length) |
| 2995 | acpi_nfit_register_region(acpi_desc, nfit_spa); |
| 2996 | } |
| 2997 | |
| 2998 | /* |
| 2999 | * For all the ranges not covered by an initial scrub we still |
| 3000 | * want to see if there are errors, but it's ok to discover them |
| 3001 | * asynchronously. |
| 3002 | */ |
| 3003 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
| 3004 | /* |
| 3005 | * Flag all the ranges that still need scrubbing, but |
| 3006 | * register them now to make data available. |
| 3007 | */ |
| 3008 | if (!nfit_spa->nd_region) { |
| 3009 | nfit_spa->ars_required = 1; |
| 3010 | acpi_nfit_register_region(acpi_desc, nfit_spa); |
| 3011 | } |
| 3012 | } |
| 3013 | acpi_desc->init_complete = 1; |
| 3014 | |
| 3015 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) |
| 3016 | acpi_nfit_async_scrub(acpi_desc, nfit_spa); |
| 3017 | acpi_desc->scrub_count++; |
| 3018 | acpi_desc->ars_start_flags = 0; |
| 3019 | if (acpi_desc->scrub_count_state) |
| 3020 | sysfs_notify_dirent(acpi_desc->scrub_count_state); |
| 3021 | mutex_unlock(&acpi_desc->init_mutex); |
| 3022 | } |
| 3023 | |
| 3024 | static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) |
| 3025 | { |
| 3026 | struct nfit_spa *nfit_spa; |
| 3027 | int rc; |
| 3028 | |
| 3029 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) |
| 3030 | if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) { |
| 3031 | /* BLK regions don't need to wait for ars results */ |
| 3032 | rc = acpi_nfit_register_region(acpi_desc, nfit_spa); |
| 3033 | if (rc) |
| 3034 | return rc; |
| 3035 | } |
| 3036 | |
| 3037 | acpi_desc->ars_start_flags = 0; |
| 3038 | if (!acpi_desc->cancel) |
| 3039 | queue_work(nfit_wq, &acpi_desc->work); |
| 3040 | return 0; |
| 3041 | } |
| 3042 | |
| 3043 | static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, |
| 3044 | struct nfit_table_prev *prev) |
| 3045 | { |
| 3046 | struct device *dev = acpi_desc->dev; |
| 3047 | |
| 3048 | if (!list_empty(&prev->spas) || |
| 3049 | !list_empty(&prev->memdevs) || |
| 3050 | !list_empty(&prev->dcrs) || |
| 3051 | !list_empty(&prev->bdws) || |
| 3052 | !list_empty(&prev->idts) || |
| 3053 | !list_empty(&prev->flushes)) { |
| 3054 | dev_err(dev, "new nfit deletes entries (unsupported)\n"); |
| 3055 | return -ENXIO; |
| 3056 | } |
| 3057 | return 0; |
| 3058 | } |
| 3059 | |
| 3060 | static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) |
| 3061 | { |
| 3062 | struct device *dev = acpi_desc->dev; |
| 3063 | struct kernfs_node *nfit; |
| 3064 | struct device *bus_dev; |
| 3065 | |
| 3066 | if (!ars_supported(acpi_desc->nvdimm_bus)) |
| 3067 | return 0; |
| 3068 | |
| 3069 | bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); |
| 3070 | nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); |
| 3071 | if (!nfit) { |
| 3072 | dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); |
| 3073 | return -ENODEV; |
| 3074 | } |
| 3075 | acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); |
| 3076 | sysfs_put(nfit); |
| 3077 | if (!acpi_desc->scrub_count_state) { |
| 3078 | dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); |
| 3079 | return -ENODEV; |
| 3080 | } |
| 3081 | |
| 3082 | return 0; |
| 3083 | } |
| 3084 | |
| 3085 | static void acpi_nfit_unregister(void *data) |
| 3086 | { |
| 3087 | struct acpi_nfit_desc *acpi_desc = data; |
| 3088 | |
| 3089 | nvdimm_bus_unregister(acpi_desc->nvdimm_bus); |
| 3090 | } |
| 3091 | |
| 3092 | int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) |
| 3093 | { |
| 3094 | struct device *dev = acpi_desc->dev; |
| 3095 | struct nfit_table_prev prev; |
| 3096 | const void *end; |
| 3097 | int rc; |
| 3098 | |
| 3099 | if (!acpi_desc->nvdimm_bus) { |
| 3100 | acpi_nfit_init_dsms(acpi_desc); |
| 3101 | |
| 3102 | acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, |
| 3103 | &acpi_desc->nd_desc); |
| 3104 | if (!acpi_desc->nvdimm_bus) |
| 3105 | return -ENOMEM; |
| 3106 | |
| 3107 | rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, |
| 3108 | acpi_desc); |
| 3109 | if (rc) |
| 3110 | return rc; |
| 3111 | |
| 3112 | rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); |
| 3113 | if (rc) |
| 3114 | return rc; |
| 3115 | |
| 3116 | /* register this acpi_desc for mce notifications */ |
| 3117 | mutex_lock(&acpi_desc_lock); |
| 3118 | list_add_tail(&acpi_desc->list, &acpi_descs); |
| 3119 | mutex_unlock(&acpi_desc_lock); |
| 3120 | } |
| 3121 | |
| 3122 | mutex_lock(&acpi_desc->init_mutex); |
| 3123 | |
| 3124 | INIT_LIST_HEAD(&prev.spas); |
| 3125 | INIT_LIST_HEAD(&prev.memdevs); |
| 3126 | INIT_LIST_HEAD(&prev.dcrs); |
| 3127 | INIT_LIST_HEAD(&prev.bdws); |
| 3128 | INIT_LIST_HEAD(&prev.idts); |
| 3129 | INIT_LIST_HEAD(&prev.flushes); |
| 3130 | |
| 3131 | list_cut_position(&prev.spas, &acpi_desc->spas, |
| 3132 | acpi_desc->spas.prev); |
| 3133 | list_cut_position(&prev.memdevs, &acpi_desc->memdevs, |
| 3134 | acpi_desc->memdevs.prev); |
| 3135 | list_cut_position(&prev.dcrs, &acpi_desc->dcrs, |
| 3136 | acpi_desc->dcrs.prev); |
| 3137 | list_cut_position(&prev.bdws, &acpi_desc->bdws, |
| 3138 | acpi_desc->bdws.prev); |
| 3139 | list_cut_position(&prev.idts, &acpi_desc->idts, |
| 3140 | acpi_desc->idts.prev); |
| 3141 | list_cut_position(&prev.flushes, &acpi_desc->flushes, |
| 3142 | acpi_desc->flushes.prev); |
| 3143 | |
| 3144 | end = data + sz; |
| 3145 | while (!IS_ERR_OR_NULL(data)) |
| 3146 | data = add_table(acpi_desc, &prev, data, end); |
| 3147 | |
| 3148 | if (IS_ERR(data)) { |
| 3149 | dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, |
| 3150 | PTR_ERR(data)); |
| 3151 | rc = PTR_ERR(data); |
| 3152 | goto out_unlock; |
| 3153 | } |
| 3154 | |
| 3155 | rc = acpi_nfit_check_deletions(acpi_desc, &prev); |
| 3156 | if (rc) |
| 3157 | goto out_unlock; |
| 3158 | |
| 3159 | rc = nfit_mem_init(acpi_desc); |
| 3160 | if (rc) |
| 3161 | goto out_unlock; |
| 3162 | |
| 3163 | rc = acpi_nfit_register_dimms(acpi_desc); |
| 3164 | if (rc) |
| 3165 | goto out_unlock; |
| 3166 | |
| 3167 | rc = acpi_nfit_register_regions(acpi_desc); |
| 3168 | |
| 3169 | out_unlock: |
| 3170 | mutex_unlock(&acpi_desc->init_mutex); |
| 3171 | return rc; |
| 3172 | } |
| 3173 | EXPORT_SYMBOL_GPL(acpi_nfit_init); |
| 3174 | |
| 3175 | struct acpi_nfit_flush_work { |
| 3176 | struct work_struct work; |
| 3177 | struct completion cmp; |
| 3178 | }; |
| 3179 | |
| 3180 | static void flush_probe(struct work_struct *work) |
| 3181 | { |
| 3182 | struct acpi_nfit_flush_work *flush; |
| 3183 | |
| 3184 | flush = container_of(work, typeof(*flush), work); |
| 3185 | complete(&flush->cmp); |
| 3186 | } |
| 3187 | |
| 3188 | static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) |
| 3189 | { |
| 3190 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); |
| 3191 | struct device *dev = acpi_desc->dev; |
| 3192 | struct acpi_nfit_flush_work flush; |
| 3193 | int rc; |
| 3194 | |
| 3195 | /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ |
| 3196 | device_lock(dev); |
| 3197 | device_unlock(dev); |
| 3198 | |
| 3199 | /* bounce the init_mutex to make init_complete valid */ |
| 3200 | mutex_lock(&acpi_desc->init_mutex); |
| 3201 | if (acpi_desc->cancel || acpi_desc->init_complete) { |
| 3202 | mutex_unlock(&acpi_desc->init_mutex); |
| 3203 | return 0; |
| 3204 | } |
| 3205 | |
| 3206 | /* |
| 3207 | * Scrub work could take 10s of seconds, userspace may give up so we |
| 3208 | * need to be interruptible while waiting. |
| 3209 | */ |
| 3210 | INIT_WORK_ONSTACK(&flush.work, flush_probe); |
| 3211 | init_completion(&flush.cmp); |
| 3212 | queue_work(nfit_wq, &flush.work); |
| 3213 | mutex_unlock(&acpi_desc->init_mutex); |
| 3214 | |
| 3215 | rc = wait_for_completion_interruptible(&flush.cmp); |
| 3216 | cancel_work_sync(&flush.work); |
| 3217 | return rc; |
| 3218 | } |
| 3219 | |
| 3220 | static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, |
| 3221 | struct nvdimm *nvdimm, unsigned int cmd) |
| 3222 | { |
| 3223 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); |
| 3224 | |
| 3225 | if (nvdimm) |
| 3226 | return 0; |
| 3227 | if (cmd != ND_CMD_ARS_START) |
| 3228 | return 0; |
| 3229 | |
| 3230 | /* |
| 3231 | * The kernel and userspace may race to initiate a scrub, but |
| 3232 | * the scrub thread is prepared to lose that initial race. It |
| 3233 | * just needs guarantees that any ars it initiates are not |
| 3234 | * interrupted by any intervening start reqeusts from userspace. |
| 3235 | */ |
| 3236 | if (work_busy(&acpi_desc->work)) |
| 3237 | return -EBUSY; |
| 3238 | |
| 3239 | return 0; |
| 3240 | } |
| 3241 | |
| 3242 | int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags) |
| 3243 | { |
| 3244 | struct device *dev = acpi_desc->dev; |
| 3245 | struct nfit_spa *nfit_spa; |
| 3246 | |
| 3247 | if (work_busy(&acpi_desc->work)) |
| 3248 | return -EBUSY; |
| 3249 | |
| 3250 | mutex_lock(&acpi_desc->init_mutex); |
| 3251 | if (acpi_desc->cancel) { |
| 3252 | mutex_unlock(&acpi_desc->init_mutex); |
| 3253 | return 0; |
| 3254 | } |
| 3255 | |
| 3256 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
| 3257 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
| 3258 | |
| 3259 | if (nfit_spa_type(spa) != NFIT_SPA_PM) |
| 3260 | continue; |
| 3261 | |
| 3262 | nfit_spa->ars_required = 1; |
| 3263 | } |
| 3264 | acpi_desc->ars_start_flags = flags; |
| 3265 | queue_work(nfit_wq, &acpi_desc->work); |
| 3266 | dev_dbg(dev, "%s: ars_scan triggered\n", __func__); |
| 3267 | mutex_unlock(&acpi_desc->init_mutex); |
| 3268 | |
| 3269 | return 0; |
| 3270 | } |
| 3271 | |
| 3272 | void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) |
| 3273 | { |
| 3274 | struct nvdimm_bus_descriptor *nd_desc; |
| 3275 | |
| 3276 | dev_set_drvdata(dev, acpi_desc); |
| 3277 | acpi_desc->dev = dev; |
| 3278 | acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; |
| 3279 | nd_desc = &acpi_desc->nd_desc; |
| 3280 | nd_desc->provider_name = "ACPI.NFIT"; |
| 3281 | nd_desc->module = THIS_MODULE; |
| 3282 | nd_desc->ndctl = acpi_nfit_ctl; |
| 3283 | nd_desc->flush_probe = acpi_nfit_flush_probe; |
| 3284 | nd_desc->clear_to_send = acpi_nfit_clear_to_send; |
| 3285 | nd_desc->attr_groups = acpi_nfit_attribute_groups; |
| 3286 | |
| 3287 | INIT_LIST_HEAD(&acpi_desc->spas); |
| 3288 | INIT_LIST_HEAD(&acpi_desc->dcrs); |
| 3289 | INIT_LIST_HEAD(&acpi_desc->bdws); |
| 3290 | INIT_LIST_HEAD(&acpi_desc->idts); |
| 3291 | INIT_LIST_HEAD(&acpi_desc->flushes); |
| 3292 | INIT_LIST_HEAD(&acpi_desc->memdevs); |
| 3293 | INIT_LIST_HEAD(&acpi_desc->dimms); |
| 3294 | INIT_LIST_HEAD(&acpi_desc->list); |
| 3295 | mutex_init(&acpi_desc->init_mutex); |
| 3296 | INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); |
| 3297 | } |
| 3298 | EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); |
| 3299 | |
| 3300 | static void acpi_nfit_put_table(void *table) |
| 3301 | { |
| 3302 | acpi_put_table(table); |
| 3303 | } |
| 3304 | |
| 3305 | void acpi_nfit_shutdown(void *data) |
| 3306 | { |
| 3307 | struct acpi_nfit_desc *acpi_desc = data; |
| 3308 | struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); |
| 3309 | |
| 3310 | /* |
| 3311 | * Destruct under acpi_desc_lock so that nfit_handle_mce does not |
| 3312 | * race teardown |
| 3313 | */ |
| 3314 | mutex_lock(&acpi_desc_lock); |
| 3315 | list_del(&acpi_desc->list); |
| 3316 | mutex_unlock(&acpi_desc_lock); |
| 3317 | |
| 3318 | mutex_lock(&acpi_desc->init_mutex); |
| 3319 | acpi_desc->cancel = 1; |
| 3320 | mutex_unlock(&acpi_desc->init_mutex); |
| 3321 | |
| 3322 | /* |
| 3323 | * Bounce the nvdimm bus lock to make sure any in-flight |
| 3324 | * acpi_nfit_ars_rescan() submissions have had a chance to |
| 3325 | * either submit or see ->cancel set. |
| 3326 | */ |
| 3327 | device_lock(bus_dev); |
| 3328 | device_unlock(bus_dev); |
| 3329 | |
| 3330 | flush_workqueue(nfit_wq); |
| 3331 | } |
| 3332 | EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); |
| 3333 | |
| 3334 | static int acpi_nfit_add(struct acpi_device *adev) |
| 3335 | { |
| 3336 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 3337 | struct acpi_nfit_desc *acpi_desc; |
| 3338 | struct device *dev = &adev->dev; |
| 3339 | struct acpi_table_header *tbl; |
| 3340 | acpi_status status = AE_OK; |
| 3341 | acpi_size sz; |
| 3342 | int rc = 0; |
| 3343 | |
| 3344 | status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); |
| 3345 | if (ACPI_FAILURE(status)) { |
| 3346 | /* This is ok, we could have an nvdimm hotplugged later */ |
| 3347 | dev_dbg(dev, "failed to find NFIT at startup\n"); |
| 3348 | return 0; |
| 3349 | } |
| 3350 | |
| 3351 | rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); |
| 3352 | if (rc) |
| 3353 | return rc; |
| 3354 | sz = tbl->length; |
| 3355 | |
| 3356 | acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); |
| 3357 | if (!acpi_desc) |
| 3358 | return -ENOMEM; |
| 3359 | acpi_nfit_desc_init(acpi_desc, &adev->dev); |
| 3360 | |
| 3361 | /* Save the acpi header for exporting the revision via sysfs */ |
| 3362 | acpi_desc->acpi_header = *tbl; |
| 3363 | |
| 3364 | /* Evaluate _FIT and override with that if present */ |
| 3365 | status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); |
| 3366 | if (ACPI_SUCCESS(status) && buf.length > 0) { |
| 3367 | union acpi_object *obj = buf.pointer; |
| 3368 | |
| 3369 | if (obj->type == ACPI_TYPE_BUFFER) |
| 3370 | rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, |
| 3371 | obj->buffer.length); |
| 3372 | else |
| 3373 | dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", |
| 3374 | __func__, (int) obj->type); |
| 3375 | kfree(buf.pointer); |
| 3376 | } else |
| 3377 | /* skip over the lead-in header table */ |
| 3378 | rc = acpi_nfit_init(acpi_desc, (void *) tbl |
| 3379 | + sizeof(struct acpi_table_nfit), |
| 3380 | sz - sizeof(struct acpi_table_nfit)); |
| 3381 | |
| 3382 | if (rc) |
| 3383 | return rc; |
| 3384 | return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); |
| 3385 | } |
| 3386 | |
| 3387 | static int acpi_nfit_remove(struct acpi_device *adev) |
| 3388 | { |
| 3389 | /* see acpi_nfit_unregister */ |
| 3390 | return 0; |
| 3391 | } |
| 3392 | |
| 3393 | static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) |
| 3394 | { |
| 3395 | struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); |
| 3396 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 3397 | union acpi_object *obj; |
| 3398 | acpi_status status; |
| 3399 | int ret; |
| 3400 | |
| 3401 | if (!dev->driver) { |
| 3402 | /* dev->driver may be null if we're being removed */ |
| 3403 | dev_dbg(dev, "%s: no driver found for dev\n", __func__); |
| 3404 | return; |
| 3405 | } |
| 3406 | |
| 3407 | if (!acpi_desc) { |
| 3408 | acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); |
| 3409 | if (!acpi_desc) |
| 3410 | return; |
| 3411 | acpi_nfit_desc_init(acpi_desc, dev); |
| 3412 | } else { |
| 3413 | /* |
| 3414 | * Finish previous registration before considering new |
| 3415 | * regions. |
| 3416 | */ |
| 3417 | flush_workqueue(nfit_wq); |
| 3418 | } |
| 3419 | |
| 3420 | /* Evaluate _FIT */ |
| 3421 | status = acpi_evaluate_object(handle, "_FIT", NULL, &buf); |
| 3422 | if (ACPI_FAILURE(status)) { |
| 3423 | dev_err(dev, "failed to evaluate _FIT\n"); |
| 3424 | return; |
| 3425 | } |
| 3426 | |
| 3427 | obj = buf.pointer; |
| 3428 | if (obj->type == ACPI_TYPE_BUFFER) { |
| 3429 | ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, |
| 3430 | obj->buffer.length); |
| 3431 | if (ret) |
| 3432 | dev_err(dev, "failed to merge updated NFIT\n"); |
| 3433 | } else |
| 3434 | dev_err(dev, "Invalid _FIT\n"); |
| 3435 | kfree(buf.pointer); |
| 3436 | } |
| 3437 | |
| 3438 | static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) |
| 3439 | { |
| 3440 | struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); |
| 3441 | u8 flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ? |
| 3442 | 0 : ND_ARS_RETURN_PREV_DATA; |
| 3443 | |
| 3444 | acpi_nfit_ars_rescan(acpi_desc, flags); |
| 3445 | } |
| 3446 | |
| 3447 | void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) |
| 3448 | { |
| 3449 | dev_dbg(dev, "%s: event: 0x%x\n", __func__, event); |
| 3450 | |
| 3451 | switch (event) { |
| 3452 | case NFIT_NOTIFY_UPDATE: |
| 3453 | return acpi_nfit_update_notify(dev, handle); |
| 3454 | case NFIT_NOTIFY_UC_MEMORY_ERROR: |
| 3455 | return acpi_nfit_uc_error_notify(dev, handle); |
| 3456 | default: |
| 3457 | return; |
| 3458 | } |
| 3459 | } |
| 3460 | EXPORT_SYMBOL_GPL(__acpi_nfit_notify); |
| 3461 | |
| 3462 | static void acpi_nfit_notify(struct acpi_device *adev, u32 event) |
| 3463 | { |
| 3464 | device_lock(&adev->dev); |
| 3465 | __acpi_nfit_notify(&adev->dev, adev->handle, event); |
| 3466 | device_unlock(&adev->dev); |
| 3467 | } |
| 3468 | |
| 3469 | static const struct acpi_device_id acpi_nfit_ids[] = { |
| 3470 | { "ACPI0012", 0 }, |
| 3471 | { "", 0 }, |
| 3472 | }; |
| 3473 | MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); |
| 3474 | |
| 3475 | static struct acpi_driver acpi_nfit_driver = { |
| 3476 | .name = KBUILD_MODNAME, |
| 3477 | .ids = acpi_nfit_ids, |
| 3478 | .ops = { |
| 3479 | .add = acpi_nfit_add, |
| 3480 | .remove = acpi_nfit_remove, |
| 3481 | .notify = acpi_nfit_notify, |
| 3482 | }, |
| 3483 | }; |
| 3484 | |
| 3485 | static __init int nfit_init(void) |
| 3486 | { |
| 3487 | int ret; |
| 3488 | |
| 3489 | BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); |
| 3490 | BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); |
| 3491 | BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); |
| 3492 | BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); |
| 3493 | BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); |
| 3494 | BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); |
| 3495 | BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); |
| 3496 | BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16); |
| 3497 | |
| 3498 | guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]); |
| 3499 | guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]); |
| 3500 | guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]); |
| 3501 | guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]); |
| 3502 | guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]); |
| 3503 | guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]); |
| 3504 | guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]); |
| 3505 | guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]); |
| 3506 | guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]); |
| 3507 | guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]); |
| 3508 | guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); |
| 3509 | guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); |
| 3510 | guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); |
| 3511 | |
| 3512 | nfit_wq = create_singlethread_workqueue("nfit"); |
| 3513 | if (!nfit_wq) |
| 3514 | return -ENOMEM; |
| 3515 | |
| 3516 | nfit_mce_register(); |
| 3517 | ret = acpi_bus_register_driver(&acpi_nfit_driver); |
| 3518 | if (ret) { |
| 3519 | nfit_mce_unregister(); |
| 3520 | destroy_workqueue(nfit_wq); |
| 3521 | } |
| 3522 | |
| 3523 | return ret; |
| 3524 | |
| 3525 | } |
| 3526 | |
| 3527 | static __exit void nfit_exit(void) |
| 3528 | { |
| 3529 | nfit_mce_unregister(); |
| 3530 | acpi_bus_unregister_driver(&acpi_nfit_driver); |
| 3531 | destroy_workqueue(nfit_wq); |
| 3532 | WARN_ON(!list_empty(&acpi_descs)); |
| 3533 | } |
| 3534 | |
| 3535 | module_init(nfit_init); |
| 3536 | module_exit(nfit_exit); |
| 3537 | MODULE_LICENSE("GPL v2"); |
| 3538 | MODULE_AUTHOR("Intel Corporation"); |