Commit | Line | Data |
---|---|---|
5b497af4 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e6dfb2de DW |
2 | /* |
3 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | |
e6dfb2de DW |
4 | */ |
5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
d5d30d5a | 6 | #include <linux/moduleparam.h> |
4d88a97a | 7 | #include <linux/vmalloc.h> |
e6dfb2de | 8 | #include <linux/device.h> |
62232e45 | 9 | #include <linux/ndctl.h> |
e6dfb2de DW |
10 | #include <linux/slab.h> |
11 | #include <linux/io.h> | |
12 | #include <linux/fs.h> | |
13 | #include <linux/mm.h> | |
14 | #include "nd-core.h" | |
0ba1c634 | 15 | #include "label.h" |
ca6a4657 | 16 | #include "pmem.h" |
4d88a97a | 17 | #include "nd.h" |
e6dfb2de DW |
18 | |
19 | static DEFINE_IDA(dimm_ida); | |
20 | ||
d5d30d5a DW |
21 | static bool noblk; |
22 | module_param(noblk, bool, 0444); | |
23 | MODULE_PARM_DESC(noblk, "force disable BLK / local alias support"); | |
24 | ||
4d88a97a DW |
25 | /* |
26 | * Retrieve bus and dimm handle and return if this bus supports | |
27 | * get_config_data commands | |
28 | */ | |
aee65987 | 29 | int nvdimm_check_config_data(struct device *dev) |
4d88a97a | 30 | { |
aee65987 | 31 | struct nvdimm *nvdimm = to_nvdimm(dev); |
4d88a97a | 32 | |
aee65987 TK |
33 | if (!nvdimm->cmd_mask || |
34 | !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { | |
8f078b38 | 35 | if (test_bit(NDD_ALIASING, &nvdimm->flags)) |
aee65987 TK |
36 | return -ENXIO; |
37 | else | |
38 | return -ENOTTY; | |
39 | } | |
4d88a97a DW |
40 | |
41 | return 0; | |
42 | } | |
43 | ||
44 | static int validate_dimm(struct nvdimm_drvdata *ndd) | |
45 | { | |
aee65987 | 46 | int rc; |
4d88a97a | 47 | |
aee65987 TK |
48 | if (!ndd) |
49 | return -EINVAL; | |
50 | ||
51 | rc = nvdimm_check_config_data(ndd->dev); | |
52 | if (rc) | |
d75f773c | 53 | dev_dbg(ndd->dev, "%ps: %s error: %d\n", |
4d88a97a DW |
54 | __builtin_return_address(0), __func__, rc); |
55 | return rc; | |
56 | } | |
57 | ||
58 | /** | |
59 | * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area | |
60 | * @nvdimm: dimm to initialize | |
61 | */ | |
62 | int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) | |
63 | { | |
64 | struct nd_cmd_get_config_size *cmd = &ndd->nsarea; | |
65 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
66 | struct nvdimm_bus_descriptor *nd_desc; | |
67 | int rc = validate_dimm(ndd); | |
9d62ed96 | 68 | int cmd_rc = 0; |
4d88a97a DW |
69 | |
70 | if (rc) | |
71 | return rc; | |
72 | ||
73 | if (cmd->config_size) | |
74 | return 0; /* already valid */ | |
75 | ||
76 | memset(cmd, 0, sizeof(*cmd)); | |
77 | nd_desc = nvdimm_bus->nd_desc; | |
9d62ed96 DW |
78 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
79 | ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc); | |
80 | if (rc < 0) | |
81 | return rc; | |
82 | return cmd_rc; | |
4d88a97a DW |
83 | } |
84 | ||
2d657d17 AD |
85 | int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf, |
86 | size_t offset, size_t len) | |
4d88a97a DW |
87 | { |
88 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
2d657d17 | 89 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; |
e7c5a571 | 90 | int rc = validate_dimm(ndd), cmd_rc = 0; |
4d88a97a | 91 | struct nd_cmd_get_config_data_hdr *cmd; |
2d657d17 | 92 | size_t max_cmd_size, buf_offset; |
4d88a97a DW |
93 | |
94 | if (rc) | |
95 | return rc; | |
96 | ||
2d657d17 | 97 | if (offset + len > ndd->nsarea.config_size) |
4d88a97a | 98 | return -ENXIO; |
4d88a97a | 99 | |
2d657d17 | 100 | max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer); |
d11cf4a7 | 101 | cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL); |
4d88a97a DW |
102 | if (!cmd) |
103 | return -ENOMEM; | |
104 | ||
2d657d17 AD |
105 | for (buf_offset = 0; len; |
106 | len -= cmd->in_length, buf_offset += cmd->in_length) { | |
107 | size_t cmd_size; | |
108 | ||
109 | cmd->in_offset = offset + buf_offset; | |
110 | cmd->in_length = min(max_cmd_size, len); | |
111 | ||
112 | cmd_size = sizeof(*cmd) + cmd->in_length; | |
113 | ||
4d88a97a | 114 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
2d657d17 | 115 | ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); |
e7c5a571 DW |
116 | if (rc < 0) |
117 | break; | |
118 | if (cmd_rc < 0) { | |
119 | rc = cmd_rc; | |
4d88a97a DW |
120 | break; |
121 | } | |
2d657d17 AD |
122 | |
123 | /* out_buf should be valid, copy it into our output buffer */ | |
124 | memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length); | |
4d88a97a | 125 | } |
d11cf4a7 | 126 | kvfree(cmd); |
4d88a97a DW |
127 | |
128 | return rc; | |
129 | } | |
130 | ||
f524bf27 DW |
131 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, |
132 | void *buf, size_t len) | |
133 | { | |
f524bf27 DW |
134 | size_t max_cmd_size, buf_offset; |
135 | struct nd_cmd_set_config_hdr *cmd; | |
e7c5a571 | 136 | int rc = validate_dimm(ndd), cmd_rc = 0; |
f524bf27 DW |
137 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
138 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; | |
139 | ||
140 | if (rc) | |
141 | return rc; | |
142 | ||
f524bf27 DW |
143 | if (offset + len > ndd->nsarea.config_size) |
144 | return -ENXIO; | |
145 | ||
d11cf4a7 DW |
146 | max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer); |
147 | cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL); | |
f524bf27 DW |
148 | if (!cmd) |
149 | return -ENOMEM; | |
150 | ||
151 | for (buf_offset = 0; len; len -= cmd->in_length, | |
152 | buf_offset += cmd->in_length) { | |
153 | size_t cmd_size; | |
f524bf27 DW |
154 | |
155 | cmd->in_offset = offset + buf_offset; | |
156 | cmd->in_length = min(max_cmd_size, len); | |
157 | memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length); | |
158 | ||
159 | /* status is output in the last 4-bytes of the command buffer */ | |
160 | cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); | |
f524bf27 DW |
161 | |
162 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | |
e7c5a571 DW |
163 | ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); |
164 | if (rc < 0) | |
165 | break; | |
166 | if (cmd_rc < 0) { | |
167 | rc = cmd_rc; | |
f524bf27 DW |
168 | break; |
169 | } | |
170 | } | |
d11cf4a7 | 171 | kvfree(cmd); |
f524bf27 DW |
172 | |
173 | return rc; | |
174 | } | |
175 | ||
42237e39 DW |
176 | void nvdimm_set_aliasing(struct device *dev) |
177 | { | |
178 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
179 | ||
8f078b38 DW |
180 | set_bit(NDD_ALIASING, &nvdimm->flags); |
181 | } | |
182 | ||
183 | void nvdimm_set_locked(struct device *dev) | |
184 | { | |
185 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
186 | ||
187 | set_bit(NDD_LOCKED, &nvdimm->flags); | |
42237e39 DW |
188 | } |
189 | ||
d34cb808 DW |
190 | void nvdimm_clear_locked(struct device *dev) |
191 | { | |
192 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
193 | ||
194 | clear_bit(NDD_LOCKED, &nvdimm->flags); | |
195 | } | |
196 | ||
e6dfb2de DW |
197 | static void nvdimm_release(struct device *dev) |
198 | { | |
199 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
200 | ||
201 | ida_simple_remove(&dimm_ida, nvdimm->id); | |
202 | kfree(nvdimm); | |
203 | } | |
204 | ||
205 | static struct device_type nvdimm_device_type = { | |
206 | .name = "nvdimm", | |
207 | .release = nvdimm_release, | |
208 | }; | |
209 | ||
62232e45 | 210 | bool is_nvdimm(struct device *dev) |
e6dfb2de DW |
211 | { |
212 | return dev->type == &nvdimm_device_type; | |
213 | } | |
214 | ||
215 | struct nvdimm *to_nvdimm(struct device *dev) | |
216 | { | |
217 | struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev); | |
218 | ||
219 | WARN_ON(!is_nvdimm(dev)); | |
220 | return nvdimm; | |
221 | } | |
222 | EXPORT_SYMBOL_GPL(to_nvdimm); | |
223 | ||
047fc8a1 RZ |
224 | struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) |
225 | { | |
226 | struct nd_region *nd_region = &ndbr->nd_region; | |
227 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
228 | ||
229 | return nd_mapping->nvdimm; | |
230 | } | |
231 | EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); | |
232 | ||
ca6a4657 DW |
233 | unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr) |
234 | { | |
235 | /* pmem mapping properties are private to libnvdimm */ | |
236 | return ARCH_MEMREMAP_PMEM; | |
237 | } | |
238 | EXPORT_SYMBOL_GPL(nd_blk_memremap_flags); | |
239 | ||
bf9bccc1 DW |
240 | struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) |
241 | { | |
242 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
243 | ||
244 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); | |
245 | ||
246 | return dev_get_drvdata(&nvdimm->dev); | |
247 | } | |
248 | EXPORT_SYMBOL(to_ndd); | |
249 | ||
250 | void nvdimm_drvdata_release(struct kref *kref) | |
251 | { | |
252 | struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref); | |
253 | struct device *dev = ndd->dev; | |
254 | struct resource *res, *_r; | |
255 | ||
426824d6 | 256 | dev_dbg(dev, "trace\n"); |
bf9bccc1 DW |
257 | nvdimm_bus_lock(dev); |
258 | for_each_dpa_resource_safe(ndd, res, _r) | |
259 | nvdimm_free_dpa(ndd, res); | |
260 | nvdimm_bus_unlock(dev); | |
261 | ||
a06a7576 | 262 | kvfree(ndd->data); |
bf9bccc1 DW |
263 | kfree(ndd); |
264 | put_device(dev); | |
265 | } | |
266 | ||
267 | void get_ndd(struct nvdimm_drvdata *ndd) | |
268 | { | |
269 | kref_get(&ndd->kref); | |
270 | } | |
271 | ||
272 | void put_ndd(struct nvdimm_drvdata *ndd) | |
273 | { | |
274 | if (ndd) | |
275 | kref_put(&ndd->kref, nvdimm_drvdata_release); | |
276 | } | |
277 | ||
e6dfb2de DW |
278 | const char *nvdimm_name(struct nvdimm *nvdimm) |
279 | { | |
280 | return dev_name(&nvdimm->dev); | |
281 | } | |
282 | EXPORT_SYMBOL_GPL(nvdimm_name); | |
283 | ||
ba9c8dd3 DW |
284 | struct kobject *nvdimm_kobj(struct nvdimm *nvdimm) |
285 | { | |
286 | return &nvdimm->dev.kobj; | |
287 | } | |
288 | EXPORT_SYMBOL_GPL(nvdimm_kobj); | |
289 | ||
e3654eca DW |
290 | unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm) |
291 | { | |
292 | return nvdimm->cmd_mask; | |
293 | } | |
294 | EXPORT_SYMBOL_GPL(nvdimm_cmd_mask); | |
295 | ||
e6dfb2de DW |
296 | void *nvdimm_provider_data(struct nvdimm *nvdimm) |
297 | { | |
62232e45 DW |
298 | if (nvdimm) |
299 | return nvdimm->provider_data; | |
300 | return NULL; | |
e6dfb2de DW |
301 | } |
302 | EXPORT_SYMBOL_GPL(nvdimm_provider_data); | |
303 | ||
62232e45 DW |
304 | static ssize_t commands_show(struct device *dev, |
305 | struct device_attribute *attr, char *buf) | |
306 | { | |
307 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
308 | int cmd, len = 0; | |
309 | ||
e3654eca | 310 | if (!nvdimm->cmd_mask) |
62232e45 DW |
311 | return sprintf(buf, "\n"); |
312 | ||
e3654eca | 313 | for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG) |
62232e45 DW |
314 | len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd)); |
315 | len += sprintf(buf + len, "\n"); | |
316 | return len; | |
317 | } | |
318 | static DEVICE_ATTR_RO(commands); | |
319 | ||
efbf6f50 DW |
320 | static ssize_t flags_show(struct device *dev, |
321 | struct device_attribute *attr, char *buf) | |
322 | { | |
323 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
324 | ||
325 | return sprintf(buf, "%s%s\n", | |
326 | test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "", | |
327 | test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : ""); | |
328 | } | |
329 | static DEVICE_ATTR_RO(flags); | |
330 | ||
eaf96153 DW |
331 | static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
332 | char *buf) | |
333 | { | |
334 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
335 | ||
336 | /* | |
337 | * The state may be in the process of changing, userspace should | |
338 | * quiesce probing if it wants a static answer | |
339 | */ | |
340 | nvdimm_bus_lock(dev); | |
341 | nvdimm_bus_unlock(dev); | |
342 | return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy) | |
343 | ? "active" : "idle"); | |
344 | } | |
345 | static DEVICE_ATTR_RO(state); | |
346 | ||
0ba1c634 DW |
347 | static ssize_t available_slots_show(struct device *dev, |
348 | struct device_attribute *attr, char *buf) | |
349 | { | |
350 | struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); | |
351 | ssize_t rc; | |
352 | u32 nfree; | |
353 | ||
354 | if (!ndd) | |
355 | return -ENXIO; | |
356 | ||
357 | nvdimm_bus_lock(dev); | |
358 | nfree = nd_label_nfree(ndd); | |
359 | if (nfree - 1 > nfree) { | |
360 | dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); | |
361 | nfree = 0; | |
362 | } else | |
363 | nfree--; | |
364 | rc = sprintf(buf, "%d\n", nfree); | |
365 | nvdimm_bus_unlock(dev); | |
366 | return rc; | |
367 | } | |
368 | static DEVICE_ATTR_RO(available_slots); | |
369 | ||
3c13e2ac | 370 | __weak ssize_t security_show(struct device *dev, |
f2989396 DJ |
371 | struct device_attribute *attr, char *buf) |
372 | { | |
373 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
374 | ||
d78c620a | 375 | if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) |
f2989396 | 376 | return sprintf(buf, "disabled\n"); |
d78c620a | 377 | if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) |
f2989396 | 378 | return sprintf(buf, "unlocked\n"); |
d78c620a | 379 | if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags)) |
f2989396 | 380 | return sprintf(buf, "locked\n"); |
d78c620a | 381 | if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags)) |
f2989396 | 382 | return sprintf(buf, "overwrite\n"); |
f2989396 DJ |
383 | return -ENOTTY; |
384 | } | |
37833fb7 | 385 | |
d78c620a DW |
386 | static ssize_t frozen_show(struct device *dev, |
387 | struct device_attribute *attr, char *buf) | |
388 | { | |
389 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
390 | ||
391 | return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN, | |
392 | &nvdimm->sec.flags)); | |
393 | } | |
394 | static DEVICE_ATTR_RO(frozen); | |
395 | ||
37833fb7 DJ |
396 | static ssize_t security_store(struct device *dev, |
397 | struct device_attribute *attr, const char *buf, size_t len) | |
398 | ||
399 | { | |
400 | ssize_t rc; | |
401 | ||
402 | /* | |
403 | * Require all userspace triggered security management to be | |
404 | * done while probing is idle and the DIMM is not in active use | |
405 | * in any region. | |
406 | */ | |
87a30e1f | 407 | nd_device_lock(dev); |
37833fb7 DJ |
408 | nvdimm_bus_lock(dev); |
409 | wait_nvdimm_bus_probe_idle(dev); | |
7b60422c | 410 | rc = nvdimm_security_store(dev, buf, len); |
37833fb7 | 411 | nvdimm_bus_unlock(dev); |
87a30e1f | 412 | nd_device_unlock(dev); |
37833fb7 DJ |
413 | |
414 | return rc; | |
415 | } | |
416 | static DEVICE_ATTR_RW(security); | |
f2989396 | 417 | |
62232e45 | 418 | static struct attribute *nvdimm_attributes[] = { |
eaf96153 | 419 | &dev_attr_state.attr, |
efbf6f50 | 420 | &dev_attr_flags.attr, |
62232e45 | 421 | &dev_attr_commands.attr, |
0ba1c634 | 422 | &dev_attr_available_slots.attr, |
f2989396 | 423 | &dev_attr_security.attr, |
d78c620a | 424 | &dev_attr_frozen.attr, |
62232e45 DW |
425 | NULL, |
426 | }; | |
427 | ||
f2989396 DJ |
428 | static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n) |
429 | { | |
430 | struct device *dev = container_of(kobj, typeof(*dev), kobj); | |
431 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
432 | ||
d78c620a | 433 | if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr) |
f2989396 | 434 | return a->mode; |
d78c620a | 435 | if (!nvdimm->sec.flags) |
f2989396 | 436 | return 0; |
d78c620a DW |
437 | |
438 | if (a == &dev_attr_security.attr) { | |
439 | /* Are there any state mutation ops (make writable)? */ | |
440 | if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable | |
441 | || nvdimm->sec.ops->change_key | |
442 | || nvdimm->sec.ops->erase | |
443 | || nvdimm->sec.ops->overwrite) | |
444 | return a->mode; | |
445 | return 0444; | |
446 | } | |
447 | ||
448 | if (nvdimm->sec.ops->freeze) | |
37833fb7 | 449 | return a->mode; |
d78c620a | 450 | return 0; |
f2989396 DJ |
451 | } |
452 | ||
62232e45 DW |
453 | struct attribute_group nvdimm_attribute_group = { |
454 | .attrs = nvdimm_attributes, | |
f2989396 | 455 | .is_visible = nvdimm_visible, |
62232e45 DW |
456 | }; |
457 | EXPORT_SYMBOL_GPL(nvdimm_attribute_group); | |
458 | ||
d6548ae4 DJ |
459 | struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, |
460 | void *provider_data, const struct attribute_group **groups, | |
461 | unsigned long flags, unsigned long cmd_mask, int num_flush, | |
f2989396 DJ |
462 | struct resource *flush_wpq, const char *dimm_id, |
463 | const struct nvdimm_security_ops *sec_ops) | |
e6dfb2de DW |
464 | { |
465 | struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); | |
466 | struct device *dev; | |
467 | ||
468 | if (!nvdimm) | |
469 | return NULL; | |
470 | ||
471 | nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL); | |
472 | if (nvdimm->id < 0) { | |
473 | kfree(nvdimm); | |
474 | return NULL; | |
475 | } | |
d6548ae4 DJ |
476 | |
477 | nvdimm->dimm_id = dimm_id; | |
e6dfb2de | 478 | nvdimm->provider_data = provider_data; |
d5d30d5a DW |
479 | if (noblk) |
480 | flags |= 1 << NDD_NOBLK; | |
e6dfb2de | 481 | nvdimm->flags = flags; |
e3654eca | 482 | nvdimm->cmd_mask = cmd_mask; |
e5ae3b25 DW |
483 | nvdimm->num_flush = num_flush; |
484 | nvdimm->flush_wpq = flush_wpq; | |
eaf96153 | 485 | atomic_set(&nvdimm->busy, 0); |
e6dfb2de DW |
486 | dev = &nvdimm->dev; |
487 | dev_set_name(dev, "nmem%d", nvdimm->id); | |
488 | dev->parent = &nvdimm_bus->dev; | |
489 | dev->type = &nvdimm_device_type; | |
62232e45 | 490 | dev->devt = MKDEV(nvdimm_major, nvdimm->id); |
e6dfb2de | 491 | dev->groups = groups; |
f2989396 | 492 | nvdimm->sec.ops = sec_ops; |
7d988097 DJ |
493 | nvdimm->sec.overwrite_tmo = 0; |
494 | INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query); | |
f2989396 DJ |
495 | /* |
496 | * Security state must be initialized before device_add() for | |
497 | * attribute visibility. | |
498 | */ | |
89fa9d8e | 499 | /* get security state and extended (master) state */ |
d78c620a DW |
500 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); |
501 | nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); | |
4d88a97a | 502 | nd_device_register(dev); |
e6dfb2de DW |
503 | |
504 | return nvdimm; | |
505 | } | |
d6548ae4 | 506 | EXPORT_SYMBOL_GPL(__nvdimm_create); |
4d88a97a | 507 | |
1cd73865 | 508 | static void shutdown_security_notify(void *data) |
7d988097 | 509 | { |
1cd73865 DW |
510 | struct nvdimm *nvdimm = data; |
511 | ||
512 | sysfs_put(nvdimm->sec.overwrite_state); | |
513 | } | |
514 | ||
515 | int nvdimm_security_setup_events(struct device *dev) | |
516 | { | |
517 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
518 | ||
d78c620a | 519 | if (!nvdimm->sec.flags || !nvdimm->sec.ops |
1cd73865 DW |
520 | || !nvdimm->sec.ops->overwrite) |
521 | return 0; | |
522 | nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security"); | |
7d988097 | 523 | if (!nvdimm->sec.overwrite_state) |
1cd73865 DW |
524 | return -ENOMEM; |
525 | ||
526 | return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm); | |
7d988097 DJ |
527 | } |
528 | EXPORT_SYMBOL_GPL(nvdimm_security_setup_events); | |
529 | ||
530 | int nvdimm_in_overwrite(struct nvdimm *nvdimm) | |
531 | { | |
532 | return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags); | |
533 | } | |
534 | EXPORT_SYMBOL_GPL(nvdimm_in_overwrite); | |
535 | ||
37833fb7 DJ |
536 | int nvdimm_security_freeze(struct nvdimm *nvdimm) |
537 | { | |
538 | int rc; | |
539 | ||
540 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); | |
541 | ||
542 | if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze) | |
543 | return -EOPNOTSUPP; | |
544 | ||
d78c620a | 545 | if (!nvdimm->sec.flags) |
37833fb7 DJ |
546 | return -EIO; |
547 | ||
7d988097 DJ |
548 | if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { |
549 | dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n"); | |
550 | return -EBUSY; | |
551 | } | |
552 | ||
37833fb7 | 553 | rc = nvdimm->sec.ops->freeze(nvdimm); |
d78c620a | 554 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); |
37833fb7 DJ |
555 | |
556 | return rc; | |
557 | } | |
558 | ||
762d067d | 559 | int alias_dpa_busy(struct device *dev, void *data) |
a1f3e4d6 | 560 | { |
fe514739 | 561 | resource_size_t map_end, blk_start, new; |
a1f3e4d6 DW |
562 | struct blk_alloc_info *info = data; |
563 | struct nd_mapping *nd_mapping; | |
564 | struct nd_region *nd_region; | |
565 | struct nvdimm_drvdata *ndd; | |
566 | struct resource *res; | |
567 | int i; | |
568 | ||
c9e582aa | 569 | if (!is_memory(dev)) |
a1f3e4d6 DW |
570 | return 0; |
571 | ||
572 | nd_region = to_nd_region(dev); | |
573 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
574 | nd_mapping = &nd_region->mapping[i]; | |
575 | if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) | |
576 | break; | |
577 | } | |
578 | ||
579 | if (i >= nd_region->ndr_mappings) | |
580 | return 0; | |
581 | ||
582 | ndd = to_ndd(nd_mapping); | |
583 | map_end = nd_mapping->start + nd_mapping->size - 1; | |
584 | blk_start = nd_mapping->start; | |
762d067d DW |
585 | |
586 | /* | |
587 | * In the allocation case ->res is set to free space that we are | |
588 | * looking to validate against PMEM aliasing collision rules | |
589 | * (i.e. BLK is allocated after all aliased PMEM). | |
590 | */ | |
591 | if (info->res) { | |
592 | if (info->res->start >= nd_mapping->start | |
593 | && info->res->start < map_end) | |
594 | /* pass */; | |
595 | else | |
596 | return 0; | |
597 | } | |
598 | ||
a1f3e4d6 DW |
599 | retry: |
600 | /* | |
601 | * Find the free dpa from the end of the last pmem allocation to | |
fe514739 | 602 | * the end of the interleave-set mapping. |
a1f3e4d6 | 603 | */ |
a1f3e4d6 | 604 | for_each_dpa_resource(ndd, res) { |
fe514739 DW |
605 | if (strncmp(res->name, "pmem", 4) != 0) |
606 | continue; | |
a1f3e4d6 DW |
607 | if ((res->start >= blk_start && res->start < map_end) |
608 | || (res->end >= blk_start | |
609 | && res->end <= map_end)) { | |
fe514739 DW |
610 | new = max(blk_start, min(map_end + 1, res->end + 1)); |
611 | if (new != blk_start) { | |
612 | blk_start = new; | |
613 | goto retry; | |
614 | } | |
a1f3e4d6 DW |
615 | } |
616 | } | |
617 | ||
762d067d DW |
618 | /* update the free space range with the probed blk_start */ |
619 | if (info->res && blk_start > info->res->start) { | |
620 | info->res->start = max(info->res->start, blk_start); | |
621 | if (info->res->start > info->res->end) | |
622 | info->res->end = info->res->start - 1; | |
623 | return 1; | |
624 | } | |
625 | ||
fe514739 | 626 | info->available -= blk_start - nd_mapping->start; |
762d067d | 627 | |
a1f3e4d6 DW |
628 | return 0; |
629 | } | |
630 | ||
1b40e09a DW |
631 | /** |
632 | * nd_blk_available_dpa - account the unused dpa of BLK region | |
633 | * @nd_mapping: container of dpa-resource-root + labels | |
634 | * | |
a1f3e4d6 DW |
635 | * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but |
636 | * we arrange for them to never start at an lower dpa than the last | |
637 | * PMEM allocation in an aliased region. | |
1b40e09a | 638 | */ |
a1f3e4d6 | 639 | resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) |
1b40e09a | 640 | { |
a1f3e4d6 DW |
641 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); |
642 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
1b40e09a | 643 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
a1f3e4d6 DW |
644 | struct blk_alloc_info info = { |
645 | .nd_mapping = nd_mapping, | |
646 | .available = nd_mapping->size, | |
762d067d | 647 | .res = NULL, |
a1f3e4d6 | 648 | }; |
1b40e09a DW |
649 | struct resource *res; |
650 | ||
651 | if (!ndd) | |
652 | return 0; | |
653 | ||
a1f3e4d6 | 654 | device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); |
1b40e09a | 655 | |
a1f3e4d6 DW |
656 | /* now account for busy blk allocations in unaliased dpa */ |
657 | for_each_dpa_resource(ndd, res) { | |
658 | if (strncmp(res->name, "blk", 3) != 0) | |
659 | continue; | |
fe514739 | 660 | info.available -= resource_size(res); |
a1f3e4d6 DW |
661 | } |
662 | ||
663 | return info.available; | |
1b40e09a DW |
664 | } |
665 | ||
12e3129e KB |
666 | /** |
667 | * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max | |
668 | * contiguous unallocated dpa range. | |
669 | * @nd_region: constrain available space check to this reference region | |
670 | * @nd_mapping: container of dpa-resource-root + labels | |
671 | */ | |
672 | resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, | |
673 | struct nd_mapping *nd_mapping) | |
674 | { | |
675 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
676 | struct nvdimm_bus *nvdimm_bus; | |
677 | resource_size_t max = 0; | |
678 | struct resource *res; | |
679 | ||
680 | /* if a dimm is disabled the available capacity is zero */ | |
681 | if (!ndd) | |
682 | return 0; | |
683 | ||
684 | nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
685 | if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm)) | |
686 | return 0; | |
687 | for_each_dpa_resource(ndd, res) { | |
688 | if (strcmp(res->name, "pmem-reserve") != 0) | |
689 | continue; | |
690 | if (resource_size(res) > max) | |
691 | max = resource_size(res); | |
692 | } | |
693 | release_free_pmem(nvdimm_bus, nd_mapping); | |
694 | return max; | |
695 | } | |
696 | ||
bf9bccc1 DW |
697 | /** |
698 | * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa | |
699 | * @nd_mapping: container of dpa-resource-root + labels | |
700 | * @nd_region: constrain available space check to this reference region | |
701 | * @overlap: calculate available space assuming this level of overlap | |
702 | * | |
703 | * Validate that a PMEM label, if present, aligns with the start of an | |
704 | * interleave set and truncate the available size at the lowest BLK | |
705 | * overlap point. | |
706 | * | |
707 | * The expectation is that this routine is called multiple times as it | |
708 | * probes for the largest BLK encroachment for any single member DIMM of | |
709 | * the interleave set. Once that value is determined the PMEM-limit for | |
710 | * the set can be established. | |
711 | */ | |
712 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | |
713 | struct nd_mapping *nd_mapping, resource_size_t *overlap) | |
714 | { | |
715 | resource_size_t map_start, map_end, busy = 0, available, blk_start; | |
716 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
717 | struct resource *res; | |
718 | const char *reason; | |
719 | ||
720 | if (!ndd) | |
721 | return 0; | |
722 | ||
723 | map_start = nd_mapping->start; | |
724 | map_end = map_start + nd_mapping->size - 1; | |
725 | blk_start = max(map_start, map_end + 1 - *overlap); | |
a1f3e4d6 | 726 | for_each_dpa_resource(ndd, res) { |
bf9bccc1 DW |
727 | if (res->start >= map_start && res->start < map_end) { |
728 | if (strncmp(res->name, "blk", 3) == 0) | |
a1f3e4d6 DW |
729 | blk_start = min(blk_start, |
730 | max(map_start, res->start)); | |
731 | else if (res->end > map_end) { | |
bf9bccc1 DW |
732 | reason = "misaligned to iset"; |
733 | goto err; | |
a1f3e4d6 | 734 | } else |
bf9bccc1 | 735 | busy += resource_size(res); |
bf9bccc1 DW |
736 | } else if (res->end >= map_start && res->end <= map_end) { |
737 | if (strncmp(res->name, "blk", 3) == 0) { | |
738 | /* | |
739 | * If a BLK allocation overlaps the start of | |
740 | * PMEM the entire interleave set may now only | |
741 | * be used for BLK. | |
742 | */ | |
743 | blk_start = map_start; | |
a1f3e4d6 DW |
744 | } else |
745 | busy += resource_size(res); | |
bf9bccc1 DW |
746 | } else if (map_start > res->start && map_start < res->end) { |
747 | /* total eclipse of the mapping */ | |
748 | busy += nd_mapping->size; | |
749 | blk_start = map_start; | |
750 | } | |
a1f3e4d6 | 751 | } |
bf9bccc1 DW |
752 | |
753 | *overlap = map_end + 1 - blk_start; | |
754 | available = blk_start - map_start; | |
755 | if (busy < available) | |
756 | return available - busy; | |
757 | return 0; | |
758 | ||
759 | err: | |
bf9bccc1 DW |
760 | nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); |
761 | return 0; | |
762 | } | |
763 | ||
4a826c83 DW |
764 | void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res) |
765 | { | |
766 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
767 | kfree(res->name); | |
768 | __release_region(&ndd->dpa, res->start, resource_size(res)); | |
769 | } | |
770 | ||
771 | struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, | |
772 | struct nd_label_id *label_id, resource_size_t start, | |
773 | resource_size_t n) | |
774 | { | |
775 | char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL); | |
776 | struct resource *res; | |
777 | ||
778 | if (!name) | |
779 | return NULL; | |
780 | ||
781 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
782 | res = __request_region(&ndd->dpa, start, n, name, 0); | |
783 | if (!res) | |
784 | kfree(name); | |
785 | return res; | |
786 | } | |
787 | ||
bf9bccc1 DW |
788 | /** |
789 | * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id | |
790 | * @nvdimm: container of dpa-resource-root + labels | |
791 | * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid> | |
792 | */ | |
793 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, | |
794 | struct nd_label_id *label_id) | |
795 | { | |
796 | resource_size_t allocated = 0; | |
797 | struct resource *res; | |
798 | ||
799 | for_each_dpa_resource(ndd, res) | |
800 | if (strcmp(res->name, label_id->id) == 0) | |
801 | allocated += resource_size(res); | |
802 | ||
803 | return allocated; | |
804 | } | |
805 | ||
4d88a97a DW |
806 | static int count_dimms(struct device *dev, void *c) |
807 | { | |
808 | int *count = c; | |
809 | ||
810 | if (is_nvdimm(dev)) | |
811 | (*count)++; | |
812 | return 0; | |
813 | } | |
814 | ||
815 | int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count) | |
816 | { | |
817 | int count = 0; | |
818 | /* Flush any possible dimm registration failures */ | |
819 | nd_synchronize(); | |
820 | ||
821 | device_for_each_child(&nvdimm_bus->dev, &count, count_dimms); | |
426824d6 | 822 | dev_dbg(&nvdimm_bus->dev, "count: %d\n", count); |
4d88a97a DW |
823 | if (count != dimm_count) |
824 | return -ENXIO; | |
825 | return 0; | |
826 | } | |
827 | EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count); | |
b354aba0 DW |
828 | |
829 | void __exit nvdimm_devs_exit(void) | |
830 | { | |
831 | ida_destroy(&dimm_ida); | |
832 | } |