Commit | Line | Data |
---|---|---|
5b497af4 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e6dfb2de DW |
2 | /* |
3 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | |
e6dfb2de DW |
4 | */ |
5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
d5d30d5a | 6 | #include <linux/moduleparam.h> |
4d88a97a | 7 | #include <linux/vmalloc.h> |
e6dfb2de | 8 | #include <linux/device.h> |
62232e45 | 9 | #include <linux/ndctl.h> |
e6dfb2de DW |
10 | #include <linux/slab.h> |
11 | #include <linux/io.h> | |
12 | #include <linux/fs.h> | |
13 | #include <linux/mm.h> | |
14 | #include "nd-core.h" | |
0ba1c634 | 15 | #include "label.h" |
ca6a4657 | 16 | #include "pmem.h" |
4d88a97a | 17 | #include "nd.h" |
e6dfb2de DW |
18 | |
19 | static DEFINE_IDA(dimm_ida); | |
20 | ||
d5d30d5a DW |
21 | static bool noblk; |
22 | module_param(noblk, bool, 0444); | |
23 | MODULE_PARM_DESC(noblk, "force disable BLK / local alias support"); | |
24 | ||
4d88a97a DW |
25 | /* |
26 | * Retrieve bus and dimm handle and return if this bus supports | |
27 | * get_config_data commands | |
28 | */ | |
aee65987 | 29 | int nvdimm_check_config_data(struct device *dev) |
4d88a97a | 30 | { |
aee65987 | 31 | struct nvdimm *nvdimm = to_nvdimm(dev); |
4d88a97a | 32 | |
aee65987 TK |
33 | if (!nvdimm->cmd_mask || |
34 | !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { | |
a0e37452 | 35 | if (test_bit(NDD_LABELING, &nvdimm->flags)) |
aee65987 TK |
36 | return -ENXIO; |
37 | else | |
38 | return -ENOTTY; | |
39 | } | |
4d88a97a DW |
40 | |
41 | return 0; | |
42 | } | |
43 | ||
44 | static int validate_dimm(struct nvdimm_drvdata *ndd) | |
45 | { | |
aee65987 | 46 | int rc; |
4d88a97a | 47 | |
aee65987 TK |
48 | if (!ndd) |
49 | return -EINVAL; | |
50 | ||
51 | rc = nvdimm_check_config_data(ndd->dev); | |
52 | if (rc) | |
d75f773c | 53 | dev_dbg(ndd->dev, "%ps: %s error: %d\n", |
4d88a97a DW |
54 | __builtin_return_address(0), __func__, rc); |
55 | return rc; | |
56 | } | |
57 | ||
58 | /** | |
59 | * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area | |
60 | * @nvdimm: dimm to initialize | |
61 | */ | |
62 | int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) | |
63 | { | |
64 | struct nd_cmd_get_config_size *cmd = &ndd->nsarea; | |
65 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
66 | struct nvdimm_bus_descriptor *nd_desc; | |
67 | int rc = validate_dimm(ndd); | |
9d62ed96 | 68 | int cmd_rc = 0; |
4d88a97a DW |
69 | |
70 | if (rc) | |
71 | return rc; | |
72 | ||
73 | if (cmd->config_size) | |
74 | return 0; /* already valid */ | |
75 | ||
76 | memset(cmd, 0, sizeof(*cmd)); | |
77 | nd_desc = nvdimm_bus->nd_desc; | |
9d62ed96 DW |
78 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
79 | ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc); | |
80 | if (rc < 0) | |
81 | return rc; | |
82 | return cmd_rc; | |
4d88a97a DW |
83 | } |
84 | ||
2d657d17 AD |
85 | int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf, |
86 | size_t offset, size_t len) | |
4d88a97a DW |
87 | { |
88 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
2d657d17 | 89 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; |
e7c5a571 | 90 | int rc = validate_dimm(ndd), cmd_rc = 0; |
4d88a97a | 91 | struct nd_cmd_get_config_data_hdr *cmd; |
2d657d17 | 92 | size_t max_cmd_size, buf_offset; |
4d88a97a DW |
93 | |
94 | if (rc) | |
95 | return rc; | |
96 | ||
2d657d17 | 97 | if (offset + len > ndd->nsarea.config_size) |
4d88a97a | 98 | return -ENXIO; |
4d88a97a | 99 | |
2d657d17 | 100 | max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer); |
d11cf4a7 | 101 | cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL); |
4d88a97a DW |
102 | if (!cmd) |
103 | return -ENOMEM; | |
104 | ||
2d657d17 AD |
105 | for (buf_offset = 0; len; |
106 | len -= cmd->in_length, buf_offset += cmd->in_length) { | |
107 | size_t cmd_size; | |
108 | ||
109 | cmd->in_offset = offset + buf_offset; | |
110 | cmd->in_length = min(max_cmd_size, len); | |
111 | ||
112 | cmd_size = sizeof(*cmd) + cmd->in_length; | |
113 | ||
4d88a97a | 114 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
2d657d17 | 115 | ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); |
e7c5a571 DW |
116 | if (rc < 0) |
117 | break; | |
118 | if (cmd_rc < 0) { | |
119 | rc = cmd_rc; | |
4d88a97a DW |
120 | break; |
121 | } | |
2d657d17 AD |
122 | |
123 | /* out_buf should be valid, copy it into our output buffer */ | |
124 | memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length); | |
4d88a97a | 125 | } |
d11cf4a7 | 126 | kvfree(cmd); |
4d88a97a DW |
127 | |
128 | return rc; | |
129 | } | |
130 | ||
f524bf27 DW |
131 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, |
132 | void *buf, size_t len) | |
133 | { | |
f524bf27 DW |
134 | size_t max_cmd_size, buf_offset; |
135 | struct nd_cmd_set_config_hdr *cmd; | |
e7c5a571 | 136 | int rc = validate_dimm(ndd), cmd_rc = 0; |
f524bf27 DW |
137 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
138 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; | |
139 | ||
140 | if (rc) | |
141 | return rc; | |
142 | ||
f524bf27 DW |
143 | if (offset + len > ndd->nsarea.config_size) |
144 | return -ENXIO; | |
145 | ||
d11cf4a7 DW |
146 | max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer); |
147 | cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL); | |
f524bf27 DW |
148 | if (!cmd) |
149 | return -ENOMEM; | |
150 | ||
151 | for (buf_offset = 0; len; len -= cmd->in_length, | |
152 | buf_offset += cmd->in_length) { | |
153 | size_t cmd_size; | |
f524bf27 DW |
154 | |
155 | cmd->in_offset = offset + buf_offset; | |
156 | cmd->in_length = min(max_cmd_size, len); | |
157 | memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length); | |
158 | ||
159 | /* status is output in the last 4-bytes of the command buffer */ | |
160 | cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); | |
f524bf27 DW |
161 | |
162 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | |
e7c5a571 DW |
163 | ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); |
164 | if (rc < 0) | |
165 | break; | |
166 | if (cmd_rc < 0) { | |
167 | rc = cmd_rc; | |
f524bf27 DW |
168 | break; |
169 | } | |
170 | } | |
d11cf4a7 | 171 | kvfree(cmd); |
f524bf27 DW |
172 | |
173 | return rc; | |
174 | } | |
175 | ||
a0e37452 | 176 | void nvdimm_set_labeling(struct device *dev) |
42237e39 DW |
177 | { |
178 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
179 | ||
a0e37452 | 180 | set_bit(NDD_LABELING, &nvdimm->flags); |
8f078b38 DW |
181 | } |
182 | ||
183 | void nvdimm_set_locked(struct device *dev) | |
184 | { | |
185 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
186 | ||
187 | set_bit(NDD_LOCKED, &nvdimm->flags); | |
42237e39 DW |
188 | } |
189 | ||
d34cb808 DW |
190 | void nvdimm_clear_locked(struct device *dev) |
191 | { | |
192 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
193 | ||
194 | clear_bit(NDD_LOCKED, &nvdimm->flags); | |
195 | } | |
196 | ||
e6dfb2de DW |
197 | static void nvdimm_release(struct device *dev) |
198 | { | |
199 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
200 | ||
201 | ida_simple_remove(&dimm_ida, nvdimm->id); | |
202 | kfree(nvdimm); | |
203 | } | |
204 | ||
e6dfb2de DW |
205 | struct nvdimm *to_nvdimm(struct device *dev) |
206 | { | |
207 | struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev); | |
208 | ||
209 | WARN_ON(!is_nvdimm(dev)); | |
210 | return nvdimm; | |
211 | } | |
212 | EXPORT_SYMBOL_GPL(to_nvdimm); | |
213 | ||
047fc8a1 RZ |
214 | struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) |
215 | { | |
216 | struct nd_region *nd_region = &ndbr->nd_region; | |
217 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
218 | ||
219 | return nd_mapping->nvdimm; | |
220 | } | |
221 | EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); | |
222 | ||
ca6a4657 DW |
223 | unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr) |
224 | { | |
225 | /* pmem mapping properties are private to libnvdimm */ | |
226 | return ARCH_MEMREMAP_PMEM; | |
227 | } | |
228 | EXPORT_SYMBOL_GPL(nd_blk_memremap_flags); | |
229 | ||
bf9bccc1 DW |
230 | struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) |
231 | { | |
232 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
233 | ||
234 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); | |
235 | ||
236 | return dev_get_drvdata(&nvdimm->dev); | |
237 | } | |
238 | EXPORT_SYMBOL(to_ndd); | |
239 | ||
240 | void nvdimm_drvdata_release(struct kref *kref) | |
241 | { | |
242 | struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref); | |
243 | struct device *dev = ndd->dev; | |
244 | struct resource *res, *_r; | |
245 | ||
426824d6 | 246 | dev_dbg(dev, "trace\n"); |
bf9bccc1 DW |
247 | nvdimm_bus_lock(dev); |
248 | for_each_dpa_resource_safe(ndd, res, _r) | |
249 | nvdimm_free_dpa(ndd, res); | |
250 | nvdimm_bus_unlock(dev); | |
251 | ||
a06a7576 | 252 | kvfree(ndd->data); |
bf9bccc1 DW |
253 | kfree(ndd); |
254 | put_device(dev); | |
255 | } | |
256 | ||
257 | void get_ndd(struct nvdimm_drvdata *ndd) | |
258 | { | |
259 | kref_get(&ndd->kref); | |
260 | } | |
261 | ||
262 | void put_ndd(struct nvdimm_drvdata *ndd) | |
263 | { | |
264 | if (ndd) | |
265 | kref_put(&ndd->kref, nvdimm_drvdata_release); | |
266 | } | |
267 | ||
e6dfb2de DW |
268 | const char *nvdimm_name(struct nvdimm *nvdimm) |
269 | { | |
270 | return dev_name(&nvdimm->dev); | |
271 | } | |
272 | EXPORT_SYMBOL_GPL(nvdimm_name); | |
273 | ||
ba9c8dd3 DW |
274 | struct kobject *nvdimm_kobj(struct nvdimm *nvdimm) |
275 | { | |
276 | return &nvdimm->dev.kobj; | |
277 | } | |
278 | EXPORT_SYMBOL_GPL(nvdimm_kobj); | |
279 | ||
e3654eca DW |
280 | unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm) |
281 | { | |
282 | return nvdimm->cmd_mask; | |
283 | } | |
284 | EXPORT_SYMBOL_GPL(nvdimm_cmd_mask); | |
285 | ||
e6dfb2de DW |
286 | void *nvdimm_provider_data(struct nvdimm *nvdimm) |
287 | { | |
62232e45 DW |
288 | if (nvdimm) |
289 | return nvdimm->provider_data; | |
290 | return NULL; | |
e6dfb2de DW |
291 | } |
292 | EXPORT_SYMBOL_GPL(nvdimm_provider_data); | |
293 | ||
62232e45 DW |
294 | static ssize_t commands_show(struct device *dev, |
295 | struct device_attribute *attr, char *buf) | |
296 | { | |
297 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
298 | int cmd, len = 0; | |
299 | ||
e3654eca | 300 | if (!nvdimm->cmd_mask) |
62232e45 DW |
301 | return sprintf(buf, "\n"); |
302 | ||
e3654eca | 303 | for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG) |
62232e45 DW |
304 | len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd)); |
305 | len += sprintf(buf + len, "\n"); | |
306 | return len; | |
307 | } | |
308 | static DEVICE_ATTR_RO(commands); | |
309 | ||
efbf6f50 DW |
310 | static ssize_t flags_show(struct device *dev, |
311 | struct device_attribute *attr, char *buf) | |
312 | { | |
313 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
314 | ||
a0e37452 | 315 | return sprintf(buf, "%s%s%s\n", |
efbf6f50 | 316 | test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "", |
a0e37452 | 317 | test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "", |
efbf6f50 DW |
318 | test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : ""); |
319 | } | |
320 | static DEVICE_ATTR_RO(flags); | |
321 | ||
eaf96153 DW |
322 | static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
323 | char *buf) | |
324 | { | |
325 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
326 | ||
327 | /* | |
328 | * The state may be in the process of changing, userspace should | |
329 | * quiesce probing if it wants a static answer | |
330 | */ | |
331 | nvdimm_bus_lock(dev); | |
332 | nvdimm_bus_unlock(dev); | |
333 | return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy) | |
334 | ? "active" : "idle"); | |
335 | } | |
336 | static DEVICE_ATTR_RO(state); | |
337 | ||
0ba1c634 DW |
338 | static ssize_t available_slots_show(struct device *dev, |
339 | struct device_attribute *attr, char *buf) | |
340 | { | |
341 | struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); | |
342 | ssize_t rc; | |
343 | u32 nfree; | |
344 | ||
345 | if (!ndd) | |
346 | return -ENXIO; | |
347 | ||
348 | nvdimm_bus_lock(dev); | |
349 | nfree = nd_label_nfree(ndd); | |
350 | if (nfree - 1 > nfree) { | |
351 | dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); | |
352 | nfree = 0; | |
353 | } else | |
354 | nfree--; | |
355 | rc = sprintf(buf, "%d\n", nfree); | |
356 | nvdimm_bus_unlock(dev); | |
357 | return rc; | |
358 | } | |
359 | static DEVICE_ATTR_RO(available_slots); | |
360 | ||
3c13e2ac | 361 | __weak ssize_t security_show(struct device *dev, |
f2989396 DJ |
362 | struct device_attribute *attr, char *buf) |
363 | { | |
364 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
365 | ||
d78c620a | 366 | if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) |
f2989396 | 367 | return sprintf(buf, "disabled\n"); |
d78c620a | 368 | if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) |
f2989396 | 369 | return sprintf(buf, "unlocked\n"); |
d78c620a | 370 | if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags)) |
f2989396 | 371 | return sprintf(buf, "locked\n"); |
d78c620a | 372 | if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags)) |
f2989396 | 373 | return sprintf(buf, "overwrite\n"); |
f2989396 DJ |
374 | return -ENOTTY; |
375 | } | |
37833fb7 | 376 | |
d78c620a DW |
377 | static ssize_t frozen_show(struct device *dev, |
378 | struct device_attribute *attr, char *buf) | |
379 | { | |
380 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
381 | ||
382 | return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN, | |
383 | &nvdimm->sec.flags)); | |
384 | } | |
385 | static DEVICE_ATTR_RO(frozen); | |
386 | ||
37833fb7 DJ |
387 | static ssize_t security_store(struct device *dev, |
388 | struct device_attribute *attr, const char *buf, size_t len) | |
389 | ||
390 | { | |
391 | ssize_t rc; | |
392 | ||
393 | /* | |
394 | * Require all userspace triggered security management to be | |
395 | * done while probing is idle and the DIMM is not in active use | |
396 | * in any region. | |
397 | */ | |
87a30e1f | 398 | nd_device_lock(dev); |
37833fb7 DJ |
399 | nvdimm_bus_lock(dev); |
400 | wait_nvdimm_bus_probe_idle(dev); | |
7b60422c | 401 | rc = nvdimm_security_store(dev, buf, len); |
37833fb7 | 402 | nvdimm_bus_unlock(dev); |
87a30e1f | 403 | nd_device_unlock(dev); |
37833fb7 DJ |
404 | |
405 | return rc; | |
406 | } | |
407 | static DEVICE_ATTR_RW(security); | |
f2989396 | 408 | |
62232e45 | 409 | static struct attribute *nvdimm_attributes[] = { |
eaf96153 | 410 | &dev_attr_state.attr, |
efbf6f50 | 411 | &dev_attr_flags.attr, |
62232e45 | 412 | &dev_attr_commands.attr, |
0ba1c634 | 413 | &dev_attr_available_slots.attr, |
f2989396 | 414 | &dev_attr_security.attr, |
d78c620a | 415 | &dev_attr_frozen.attr, |
62232e45 DW |
416 | NULL, |
417 | }; | |
418 | ||
f2989396 DJ |
419 | static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n) |
420 | { | |
421 | struct device *dev = container_of(kobj, typeof(*dev), kobj); | |
422 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
423 | ||
d78c620a | 424 | if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr) |
f2989396 | 425 | return a->mode; |
d78c620a | 426 | if (!nvdimm->sec.flags) |
f2989396 | 427 | return 0; |
d78c620a DW |
428 | |
429 | if (a == &dev_attr_security.attr) { | |
430 | /* Are there any state mutation ops (make writable)? */ | |
431 | if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable | |
432 | || nvdimm->sec.ops->change_key | |
433 | || nvdimm->sec.ops->erase | |
434 | || nvdimm->sec.ops->overwrite) | |
435 | return a->mode; | |
436 | return 0444; | |
437 | } | |
438 | ||
439 | if (nvdimm->sec.ops->freeze) | |
37833fb7 | 440 | return a->mode; |
d78c620a | 441 | return 0; |
f2989396 DJ |
442 | } |
443 | ||
360eba7e | 444 | static const struct attribute_group nvdimm_attribute_group = { |
62232e45 | 445 | .attrs = nvdimm_attributes, |
f2989396 | 446 | .is_visible = nvdimm_visible, |
62232e45 | 447 | }; |
360eba7e | 448 | |
48001ea5 DW |
449 | static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf) |
450 | { | |
451 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
452 | enum nvdimm_fwa_result result; | |
453 | ||
454 | if (!nvdimm->fw_ops) | |
455 | return -EOPNOTSUPP; | |
456 | ||
457 | nvdimm_bus_lock(dev); | |
458 | result = nvdimm->fw_ops->activate_result(nvdimm); | |
459 | nvdimm_bus_unlock(dev); | |
460 | ||
461 | switch (result) { | |
462 | case NVDIMM_FWA_RESULT_NONE: | |
463 | return sprintf(buf, "none\n"); | |
464 | case NVDIMM_FWA_RESULT_SUCCESS: | |
465 | return sprintf(buf, "success\n"); | |
466 | case NVDIMM_FWA_RESULT_FAIL: | |
467 | return sprintf(buf, "fail\n"); | |
468 | case NVDIMM_FWA_RESULT_NOTSTAGED: | |
469 | return sprintf(buf, "not_staged\n"); | |
470 | case NVDIMM_FWA_RESULT_NEEDRESET: | |
471 | return sprintf(buf, "need_reset\n"); | |
472 | default: | |
473 | return -ENXIO; | |
474 | } | |
475 | } | |
476 | static DEVICE_ATTR_ADMIN_RO(result); | |
477 | ||
478 | static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf) | |
479 | { | |
480 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
481 | enum nvdimm_fwa_state state; | |
482 | ||
483 | if (!nvdimm->fw_ops) | |
484 | return -EOPNOTSUPP; | |
485 | ||
486 | nvdimm_bus_lock(dev); | |
487 | state = nvdimm->fw_ops->activate_state(nvdimm); | |
488 | nvdimm_bus_unlock(dev); | |
489 | ||
490 | switch (state) { | |
491 | case NVDIMM_FWA_IDLE: | |
492 | return sprintf(buf, "idle\n"); | |
493 | case NVDIMM_FWA_BUSY: | |
494 | return sprintf(buf, "busy\n"); | |
495 | case NVDIMM_FWA_ARMED: | |
496 | return sprintf(buf, "armed\n"); | |
497 | default: | |
498 | return -ENXIO; | |
499 | } | |
500 | } | |
501 | ||
502 | static ssize_t activate_store(struct device *dev, struct device_attribute *attr, | |
503 | const char *buf, size_t len) | |
504 | { | |
505 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
506 | enum nvdimm_fwa_trigger arg; | |
507 | int rc; | |
508 | ||
509 | if (!nvdimm->fw_ops) | |
510 | return -EOPNOTSUPP; | |
511 | ||
512 | if (sysfs_streq(buf, "arm")) | |
513 | arg = NVDIMM_FWA_ARM; | |
514 | else if (sysfs_streq(buf, "disarm")) | |
515 | arg = NVDIMM_FWA_DISARM; | |
516 | else | |
517 | return -EINVAL; | |
518 | ||
519 | nvdimm_bus_lock(dev); | |
520 | rc = nvdimm->fw_ops->arm(nvdimm, arg); | |
521 | nvdimm_bus_unlock(dev); | |
522 | ||
523 | if (rc < 0) | |
524 | return rc; | |
525 | return len; | |
526 | } | |
527 | static DEVICE_ATTR_ADMIN_RW(activate); | |
528 | ||
529 | static struct attribute *nvdimm_firmware_attributes[] = { | |
530 | &dev_attr_activate.attr, | |
531 | &dev_attr_result.attr, | |
532 | }; | |
533 | ||
534 | static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n) | |
535 | { | |
536 | struct device *dev = container_of(kobj, typeof(*dev), kobj); | |
537 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | |
538 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; | |
539 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
540 | enum nvdimm_fwa_capability cap; | |
541 | ||
542 | if (!nd_desc->fw_ops) | |
543 | return 0; | |
544 | if (!nvdimm->fw_ops) | |
545 | return 0; | |
546 | ||
547 | nvdimm_bus_lock(dev); | |
548 | cap = nd_desc->fw_ops->capability(nd_desc); | |
549 | nvdimm_bus_unlock(dev); | |
550 | ||
551 | if (cap < NVDIMM_FWA_CAP_QUIESCE) | |
552 | return 0; | |
553 | ||
554 | return a->mode; | |
555 | } | |
556 | ||
557 | static const struct attribute_group nvdimm_firmware_attribute_group = { | |
558 | .name = "firmware", | |
559 | .attrs = nvdimm_firmware_attributes, | |
560 | .is_visible = nvdimm_firmware_visible, | |
561 | }; | |
562 | ||
360eba7e DW |
563 | static const struct attribute_group *nvdimm_attribute_groups[] = { |
564 | &nd_device_attribute_group, | |
565 | &nvdimm_attribute_group, | |
48001ea5 | 566 | &nvdimm_firmware_attribute_group, |
360eba7e DW |
567 | NULL, |
568 | }; | |
569 | ||
570 | static const struct device_type nvdimm_device_type = { | |
571 | .name = "nvdimm", | |
572 | .release = nvdimm_release, | |
573 | .groups = nvdimm_attribute_groups, | |
574 | }; | |
575 | ||
576 | bool is_nvdimm(struct device *dev) | |
577 | { | |
578 | return dev->type == &nvdimm_device_type; | |
579 | } | |
62232e45 | 580 | |
d6548ae4 DJ |
581 | struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, |
582 | void *provider_data, const struct attribute_group **groups, | |
583 | unsigned long flags, unsigned long cmd_mask, int num_flush, | |
f2989396 | 584 | struct resource *flush_wpq, const char *dimm_id, |
a1facc1f DW |
585 | const struct nvdimm_security_ops *sec_ops, |
586 | const struct nvdimm_fw_ops *fw_ops) | |
e6dfb2de DW |
587 | { |
588 | struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); | |
589 | struct device *dev; | |
590 | ||
591 | if (!nvdimm) | |
592 | return NULL; | |
593 | ||
594 | nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL); | |
595 | if (nvdimm->id < 0) { | |
596 | kfree(nvdimm); | |
597 | return NULL; | |
598 | } | |
d6548ae4 DJ |
599 | |
600 | nvdimm->dimm_id = dimm_id; | |
e6dfb2de | 601 | nvdimm->provider_data = provider_data; |
d5d30d5a DW |
602 | if (noblk) |
603 | flags |= 1 << NDD_NOBLK; | |
e6dfb2de | 604 | nvdimm->flags = flags; |
e3654eca | 605 | nvdimm->cmd_mask = cmd_mask; |
e5ae3b25 DW |
606 | nvdimm->num_flush = num_flush; |
607 | nvdimm->flush_wpq = flush_wpq; | |
eaf96153 | 608 | atomic_set(&nvdimm->busy, 0); |
e6dfb2de DW |
609 | dev = &nvdimm->dev; |
610 | dev_set_name(dev, "nmem%d", nvdimm->id); | |
611 | dev->parent = &nvdimm_bus->dev; | |
612 | dev->type = &nvdimm_device_type; | |
62232e45 | 613 | dev->devt = MKDEV(nvdimm_major, nvdimm->id); |
e6dfb2de | 614 | dev->groups = groups; |
f2989396 | 615 | nvdimm->sec.ops = sec_ops; |
a1facc1f | 616 | nvdimm->fw_ops = fw_ops; |
7d988097 DJ |
617 | nvdimm->sec.overwrite_tmo = 0; |
618 | INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query); | |
f2989396 DJ |
619 | /* |
620 | * Security state must be initialized before device_add() for | |
621 | * attribute visibility. | |
622 | */ | |
89fa9d8e | 623 | /* get security state and extended (master) state */ |
d78c620a DW |
624 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); |
625 | nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); | |
4d88a97a | 626 | nd_device_register(dev); |
e6dfb2de DW |
627 | |
628 | return nvdimm; | |
629 | } | |
d6548ae4 | 630 | EXPORT_SYMBOL_GPL(__nvdimm_create); |
4d88a97a | 631 | |
1cd73865 | 632 | static void shutdown_security_notify(void *data) |
7d988097 | 633 | { |
1cd73865 DW |
634 | struct nvdimm *nvdimm = data; |
635 | ||
636 | sysfs_put(nvdimm->sec.overwrite_state); | |
637 | } | |
638 | ||
639 | int nvdimm_security_setup_events(struct device *dev) | |
640 | { | |
641 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
642 | ||
d78c620a | 643 | if (!nvdimm->sec.flags || !nvdimm->sec.ops |
1cd73865 DW |
644 | || !nvdimm->sec.ops->overwrite) |
645 | return 0; | |
646 | nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security"); | |
7d988097 | 647 | if (!nvdimm->sec.overwrite_state) |
1cd73865 DW |
648 | return -ENOMEM; |
649 | ||
650 | return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm); | |
7d988097 DJ |
651 | } |
652 | EXPORT_SYMBOL_GPL(nvdimm_security_setup_events); | |
653 | ||
654 | int nvdimm_in_overwrite(struct nvdimm *nvdimm) | |
655 | { | |
656 | return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags); | |
657 | } | |
658 | EXPORT_SYMBOL_GPL(nvdimm_in_overwrite); | |
659 | ||
37833fb7 DJ |
660 | int nvdimm_security_freeze(struct nvdimm *nvdimm) |
661 | { | |
662 | int rc; | |
663 | ||
664 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); | |
665 | ||
666 | if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze) | |
667 | return -EOPNOTSUPP; | |
668 | ||
d78c620a | 669 | if (!nvdimm->sec.flags) |
37833fb7 DJ |
670 | return -EIO; |
671 | ||
7d988097 DJ |
672 | if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { |
673 | dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n"); | |
674 | return -EBUSY; | |
675 | } | |
676 | ||
37833fb7 | 677 | rc = nvdimm->sec.ops->freeze(nvdimm); |
d78c620a | 678 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); |
37833fb7 DJ |
679 | |
680 | return rc; | |
681 | } | |
682 | ||
2522afb8 DW |
683 | static unsigned long dpa_align(struct nd_region *nd_region) |
684 | { | |
685 | struct device *dev = &nd_region->dev; | |
686 | ||
687 | if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), | |
688 | "bus lock required for capacity provision\n")) | |
689 | return 0; | |
690 | if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align | |
691 | % nd_region->ndr_mappings, | |
692 | "invalid region align %#lx mappings: %d\n", | |
693 | nd_region->align, nd_region->ndr_mappings)) | |
694 | return 0; | |
695 | return nd_region->align / nd_region->ndr_mappings; | |
696 | } | |
697 | ||
762d067d | 698 | int alias_dpa_busy(struct device *dev, void *data) |
a1f3e4d6 | 699 | { |
fe514739 | 700 | resource_size_t map_end, blk_start, new; |
a1f3e4d6 DW |
701 | struct blk_alloc_info *info = data; |
702 | struct nd_mapping *nd_mapping; | |
703 | struct nd_region *nd_region; | |
704 | struct nvdimm_drvdata *ndd; | |
705 | struct resource *res; | |
2522afb8 | 706 | unsigned long align; |
a1f3e4d6 DW |
707 | int i; |
708 | ||
c9e582aa | 709 | if (!is_memory(dev)) |
a1f3e4d6 DW |
710 | return 0; |
711 | ||
712 | nd_region = to_nd_region(dev); | |
713 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
714 | nd_mapping = &nd_region->mapping[i]; | |
715 | if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) | |
716 | break; | |
717 | } | |
718 | ||
719 | if (i >= nd_region->ndr_mappings) | |
720 | return 0; | |
721 | ||
722 | ndd = to_ndd(nd_mapping); | |
723 | map_end = nd_mapping->start + nd_mapping->size - 1; | |
724 | blk_start = nd_mapping->start; | |
762d067d DW |
725 | |
726 | /* | |
727 | * In the allocation case ->res is set to free space that we are | |
728 | * looking to validate against PMEM aliasing collision rules | |
729 | * (i.e. BLK is allocated after all aliased PMEM). | |
730 | */ | |
731 | if (info->res) { | |
732 | if (info->res->start >= nd_mapping->start | |
733 | && info->res->start < map_end) | |
734 | /* pass */; | |
735 | else | |
736 | return 0; | |
737 | } | |
738 | ||
a1f3e4d6 DW |
739 | retry: |
740 | /* | |
741 | * Find the free dpa from the end of the last pmem allocation to | |
fe514739 | 742 | * the end of the interleave-set mapping. |
a1f3e4d6 | 743 | */ |
2522afb8 DW |
744 | align = dpa_align(nd_region); |
745 | if (!align) | |
746 | return 0; | |
747 | ||
a1f3e4d6 | 748 | for_each_dpa_resource(ndd, res) { |
2522afb8 DW |
749 | resource_size_t start, end; |
750 | ||
fe514739 DW |
751 | if (strncmp(res->name, "pmem", 4) != 0) |
752 | continue; | |
2522afb8 DW |
753 | |
754 | start = ALIGN_DOWN(res->start, align); | |
755 | end = ALIGN(res->end + 1, align) - 1; | |
756 | if ((start >= blk_start && start < map_end) | |
757 | || (end >= blk_start && end <= map_end)) { | |
758 | new = max(blk_start, min(map_end, end) + 1); | |
fe514739 DW |
759 | if (new != blk_start) { |
760 | blk_start = new; | |
761 | goto retry; | |
762 | } | |
a1f3e4d6 DW |
763 | } |
764 | } | |
765 | ||
762d067d DW |
766 | /* update the free space range with the probed blk_start */ |
767 | if (info->res && blk_start > info->res->start) { | |
768 | info->res->start = max(info->res->start, blk_start); | |
769 | if (info->res->start > info->res->end) | |
770 | info->res->end = info->res->start - 1; | |
771 | return 1; | |
772 | } | |
773 | ||
fe514739 | 774 | info->available -= blk_start - nd_mapping->start; |
762d067d | 775 | |
a1f3e4d6 DW |
776 | return 0; |
777 | } | |
778 | ||
1b40e09a DW |
779 | /** |
780 | * nd_blk_available_dpa - account the unused dpa of BLK region | |
781 | * @nd_mapping: container of dpa-resource-root + labels | |
782 | * | |
a1f3e4d6 DW |
783 | * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but |
784 | * we arrange for them to never start at an lower dpa than the last | |
785 | * PMEM allocation in an aliased region. | |
1b40e09a | 786 | */ |
a1f3e4d6 | 787 | resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) |
1b40e09a | 788 | { |
a1f3e4d6 DW |
789 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); |
790 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
1b40e09a | 791 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
a1f3e4d6 DW |
792 | struct blk_alloc_info info = { |
793 | .nd_mapping = nd_mapping, | |
794 | .available = nd_mapping->size, | |
762d067d | 795 | .res = NULL, |
a1f3e4d6 | 796 | }; |
1b40e09a | 797 | struct resource *res; |
2522afb8 | 798 | unsigned long align; |
1b40e09a DW |
799 | |
800 | if (!ndd) | |
801 | return 0; | |
802 | ||
a1f3e4d6 | 803 | device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); |
1b40e09a | 804 | |
a1f3e4d6 | 805 | /* now account for busy blk allocations in unaliased dpa */ |
2522afb8 DW |
806 | align = dpa_align(nd_region); |
807 | if (!align) | |
808 | return 0; | |
a1f3e4d6 | 809 | for_each_dpa_resource(ndd, res) { |
2522afb8 DW |
810 | resource_size_t start, end, size; |
811 | ||
a1f3e4d6 DW |
812 | if (strncmp(res->name, "blk", 3) != 0) |
813 | continue; | |
2522afb8 DW |
814 | start = ALIGN_DOWN(res->start, align); |
815 | end = ALIGN(res->end + 1, align) - 1; | |
816 | size = end - start + 1; | |
817 | if (size >= info.available) | |
818 | return 0; | |
819 | info.available -= size; | |
a1f3e4d6 DW |
820 | } |
821 | ||
822 | return info.available; | |
1b40e09a DW |
823 | } |
824 | ||
12e3129e KB |
825 | /** |
826 | * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max | |
827 | * contiguous unallocated dpa range. | |
828 | * @nd_region: constrain available space check to this reference region | |
829 | * @nd_mapping: container of dpa-resource-root + labels | |
830 | */ | |
831 | resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, | |
832 | struct nd_mapping *nd_mapping) | |
833 | { | |
834 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
835 | struct nvdimm_bus *nvdimm_bus; | |
836 | resource_size_t max = 0; | |
837 | struct resource *res; | |
2522afb8 | 838 | unsigned long align; |
12e3129e KB |
839 | |
840 | /* if a dimm is disabled the available capacity is zero */ | |
841 | if (!ndd) | |
842 | return 0; | |
843 | ||
2522afb8 DW |
844 | align = dpa_align(nd_region); |
845 | if (!align) | |
846 | return 0; | |
847 | ||
12e3129e KB |
848 | nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
849 | if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm)) | |
850 | return 0; | |
851 | for_each_dpa_resource(ndd, res) { | |
2522afb8 DW |
852 | resource_size_t start, end; |
853 | ||
12e3129e KB |
854 | if (strcmp(res->name, "pmem-reserve") != 0) |
855 | continue; | |
2522afb8 DW |
856 | /* trim free space relative to current alignment setting */ |
857 | start = ALIGN(res->start, align); | |
858 | end = ALIGN_DOWN(res->end + 1, align) - 1; | |
859 | if (end < start) | |
860 | continue; | |
861 | if (end - start + 1 > max) | |
862 | max = end - start + 1; | |
12e3129e KB |
863 | } |
864 | release_free_pmem(nvdimm_bus, nd_mapping); | |
865 | return max; | |
866 | } | |
867 | ||
bf9bccc1 DW |
868 | /** |
869 | * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa | |
870 | * @nd_mapping: container of dpa-resource-root + labels | |
871 | * @nd_region: constrain available space check to this reference region | |
872 | * @overlap: calculate available space assuming this level of overlap | |
873 | * | |
874 | * Validate that a PMEM label, if present, aligns with the start of an | |
875 | * interleave set and truncate the available size at the lowest BLK | |
876 | * overlap point. | |
877 | * | |
878 | * The expectation is that this routine is called multiple times as it | |
879 | * probes for the largest BLK encroachment for any single member DIMM of | |
880 | * the interleave set. Once that value is determined the PMEM-limit for | |
881 | * the set can be established. | |
882 | */ | |
883 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | |
884 | struct nd_mapping *nd_mapping, resource_size_t *overlap) | |
885 | { | |
886 | resource_size_t map_start, map_end, busy = 0, available, blk_start; | |
887 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
888 | struct resource *res; | |
889 | const char *reason; | |
2522afb8 | 890 | unsigned long align; |
bf9bccc1 DW |
891 | |
892 | if (!ndd) | |
893 | return 0; | |
894 | ||
2522afb8 DW |
895 | align = dpa_align(nd_region); |
896 | if (!align) | |
897 | return 0; | |
898 | ||
bf9bccc1 DW |
899 | map_start = nd_mapping->start; |
900 | map_end = map_start + nd_mapping->size - 1; | |
901 | blk_start = max(map_start, map_end + 1 - *overlap); | |
a1f3e4d6 | 902 | for_each_dpa_resource(ndd, res) { |
2522afb8 DW |
903 | resource_size_t start, end; |
904 | ||
905 | start = ALIGN_DOWN(res->start, align); | |
906 | end = ALIGN(res->end + 1, align) - 1; | |
907 | if (start >= map_start && start < map_end) { | |
bf9bccc1 | 908 | if (strncmp(res->name, "blk", 3) == 0) |
a1f3e4d6 | 909 | blk_start = min(blk_start, |
2522afb8 DW |
910 | max(map_start, start)); |
911 | else if (end > map_end) { | |
bf9bccc1 DW |
912 | reason = "misaligned to iset"; |
913 | goto err; | |
a1f3e4d6 | 914 | } else |
2522afb8 DW |
915 | busy += end - start + 1; |
916 | } else if (end >= map_start && end <= map_end) { | |
bf9bccc1 DW |
917 | if (strncmp(res->name, "blk", 3) == 0) { |
918 | /* | |
919 | * If a BLK allocation overlaps the start of | |
920 | * PMEM the entire interleave set may now only | |
921 | * be used for BLK. | |
922 | */ | |
923 | blk_start = map_start; | |
a1f3e4d6 | 924 | } else |
2522afb8 DW |
925 | busy += end - start + 1; |
926 | } else if (map_start > start && map_start < end) { | |
bf9bccc1 DW |
927 | /* total eclipse of the mapping */ |
928 | busy += nd_mapping->size; | |
929 | blk_start = map_start; | |
930 | } | |
a1f3e4d6 | 931 | } |
bf9bccc1 DW |
932 | |
933 | *overlap = map_end + 1 - blk_start; | |
934 | available = blk_start - map_start; | |
935 | if (busy < available) | |
2522afb8 | 936 | return ALIGN_DOWN(available - busy, align); |
bf9bccc1 DW |
937 | return 0; |
938 | ||
939 | err: | |
bf9bccc1 DW |
940 | nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); |
941 | return 0; | |
942 | } | |
943 | ||
4a826c83 DW |
944 | void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res) |
945 | { | |
946 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
947 | kfree(res->name); | |
948 | __release_region(&ndd->dpa, res->start, resource_size(res)); | |
949 | } | |
950 | ||
951 | struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, | |
952 | struct nd_label_id *label_id, resource_size_t start, | |
953 | resource_size_t n) | |
954 | { | |
955 | char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL); | |
956 | struct resource *res; | |
957 | ||
958 | if (!name) | |
959 | return NULL; | |
960 | ||
961 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
962 | res = __request_region(&ndd->dpa, start, n, name, 0); | |
963 | if (!res) | |
964 | kfree(name); | |
965 | return res; | |
966 | } | |
967 | ||
bf9bccc1 DW |
968 | /** |
969 | * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id | |
970 | * @nvdimm: container of dpa-resource-root + labels | |
971 | * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid> | |
972 | */ | |
973 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, | |
974 | struct nd_label_id *label_id) | |
975 | { | |
976 | resource_size_t allocated = 0; | |
977 | struct resource *res; | |
978 | ||
979 | for_each_dpa_resource(ndd, res) | |
980 | if (strcmp(res->name, label_id->id) == 0) | |
981 | allocated += resource_size(res); | |
982 | ||
983 | return allocated; | |
984 | } | |
985 | ||
4d88a97a DW |
986 | static int count_dimms(struct device *dev, void *c) |
987 | { | |
988 | int *count = c; | |
989 | ||
990 | if (is_nvdimm(dev)) | |
991 | (*count)++; | |
992 | return 0; | |
993 | } | |
994 | ||
995 | int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count) | |
996 | { | |
997 | int count = 0; | |
998 | /* Flush any possible dimm registration failures */ | |
999 | nd_synchronize(); | |
1000 | ||
1001 | device_for_each_child(&nvdimm_bus->dev, &count, count_dimms); | |
426824d6 | 1002 | dev_dbg(&nvdimm_bus->dev, "count: %d\n", count); |
4d88a97a DW |
1003 | if (count != dimm_count) |
1004 | return -ENXIO; | |
1005 | return 0; | |
1006 | } | |
1007 | EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count); | |
b354aba0 DW |
1008 | |
1009 | void __exit nvdimm_devs_exit(void) | |
1010 | { | |
1011 | ida_destroy(&dimm_ida); | |
1012 | } |