Commit | Line | Data |
---|---|---|
e6dfb2de DW |
1 | /* |
2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
4d88a97a | 14 | #include <linux/vmalloc.h> |
e6dfb2de | 15 | #include <linux/device.h> |
62232e45 | 16 | #include <linux/ndctl.h> |
e6dfb2de DW |
17 | #include <linux/slab.h> |
18 | #include <linux/io.h> | |
19 | #include <linux/fs.h> | |
20 | #include <linux/mm.h> | |
21 | #include "nd-core.h" | |
0ba1c634 | 22 | #include "label.h" |
ca6a4657 | 23 | #include "pmem.h" |
4d88a97a | 24 | #include "nd.h" |
e6dfb2de DW |
25 | |
26 | static DEFINE_IDA(dimm_ida); | |
27 | ||
4d88a97a DW |
28 | /* |
29 | * Retrieve bus and dimm handle and return if this bus supports | |
30 | * get_config_data commands | |
31 | */ | |
aee65987 | 32 | int nvdimm_check_config_data(struct device *dev) |
4d88a97a | 33 | { |
aee65987 | 34 | struct nvdimm *nvdimm = to_nvdimm(dev); |
4d88a97a | 35 | |
aee65987 TK |
36 | if (!nvdimm->cmd_mask || |
37 | !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { | |
8f078b38 | 38 | if (test_bit(NDD_ALIASING, &nvdimm->flags)) |
aee65987 TK |
39 | return -ENXIO; |
40 | else | |
41 | return -ENOTTY; | |
42 | } | |
4d88a97a DW |
43 | |
44 | return 0; | |
45 | } | |
46 | ||
47 | static int validate_dimm(struct nvdimm_drvdata *ndd) | |
48 | { | |
aee65987 | 49 | int rc; |
4d88a97a | 50 | |
aee65987 TK |
51 | if (!ndd) |
52 | return -EINVAL; | |
53 | ||
54 | rc = nvdimm_check_config_data(ndd->dev); | |
55 | if (rc) | |
4d88a97a DW |
56 | dev_dbg(ndd->dev, "%pf: %s error: %d\n", |
57 | __builtin_return_address(0), __func__, rc); | |
58 | return rc; | |
59 | } | |
60 | ||
61 | /** | |
62 | * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area | |
63 | * @nvdimm: dimm to initialize | |
64 | */ | |
65 | int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) | |
66 | { | |
67 | struct nd_cmd_get_config_size *cmd = &ndd->nsarea; | |
68 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
69 | struct nvdimm_bus_descriptor *nd_desc; | |
70 | int rc = validate_dimm(ndd); | |
9d62ed96 | 71 | int cmd_rc = 0; |
4d88a97a DW |
72 | |
73 | if (rc) | |
74 | return rc; | |
75 | ||
76 | if (cmd->config_size) | |
77 | return 0; /* already valid */ | |
78 | ||
79 | memset(cmd, 0, sizeof(*cmd)); | |
80 | nd_desc = nvdimm_bus->nd_desc; | |
9d62ed96 DW |
81 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
82 | ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc); | |
83 | if (rc < 0) | |
84 | return rc; | |
85 | return cmd_rc; | |
4d88a97a DW |
86 | } |
87 | ||
88 | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) | |
89 | { | |
90 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
e7c5a571 | 91 | int rc = validate_dimm(ndd), cmd_rc = 0; |
4d88a97a DW |
92 | struct nd_cmd_get_config_data_hdr *cmd; |
93 | struct nvdimm_bus_descriptor *nd_desc; | |
4d88a97a DW |
94 | u32 max_cmd_size, config_size; |
95 | size_t offset; | |
96 | ||
97 | if (rc) | |
98 | return rc; | |
99 | ||
100 | if (ndd->data) | |
101 | return 0; | |
102 | ||
4a826c83 DW |
103 | if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 |
104 | || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) { | |
105 | dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n", | |
106 | ndd->nsarea.max_xfer, ndd->nsarea.config_size); | |
4d88a97a | 107 | return -ENXIO; |
4a826c83 | 108 | } |
4d88a97a | 109 | |
752ade68 | 110 | ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL); |
4d88a97a DW |
111 | if (!ndd->data) |
112 | return -ENOMEM; | |
113 | ||
114 | max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer); | |
115 | cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL); | |
116 | if (!cmd) | |
117 | return -ENOMEM; | |
118 | ||
119 | nd_desc = nvdimm_bus->nd_desc; | |
120 | for (config_size = ndd->nsarea.config_size, offset = 0; | |
121 | config_size; config_size -= cmd->in_length, | |
122 | offset += cmd->in_length) { | |
123 | cmd->in_length = min(config_size, max_cmd_size); | |
124 | cmd->in_offset = offset; | |
125 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | |
126 | ND_CMD_GET_CONFIG_DATA, cmd, | |
e7c5a571 DW |
127 | cmd->in_length + sizeof(*cmd), &cmd_rc); |
128 | if (rc < 0) | |
129 | break; | |
130 | if (cmd_rc < 0) { | |
131 | rc = cmd_rc; | |
4d88a97a DW |
132 | break; |
133 | } | |
134 | memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); | |
135 | } | |
426824d6 | 136 | dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc); |
4d88a97a DW |
137 | kfree(cmd); |
138 | ||
139 | return rc; | |
140 | } | |
141 | ||
f524bf27 DW |
142 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, |
143 | void *buf, size_t len) | |
144 | { | |
f524bf27 DW |
145 | size_t max_cmd_size, buf_offset; |
146 | struct nd_cmd_set_config_hdr *cmd; | |
e7c5a571 | 147 | int rc = validate_dimm(ndd), cmd_rc = 0; |
f524bf27 DW |
148 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
149 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; | |
150 | ||
151 | if (rc) | |
152 | return rc; | |
153 | ||
154 | if (!ndd->data) | |
155 | return -ENXIO; | |
156 | ||
157 | if (offset + len > ndd->nsarea.config_size) | |
158 | return -ENXIO; | |
159 | ||
160 | max_cmd_size = min_t(u32, PAGE_SIZE, len); | |
161 | max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer); | |
162 | cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL); | |
163 | if (!cmd) | |
164 | return -ENOMEM; | |
165 | ||
166 | for (buf_offset = 0; len; len -= cmd->in_length, | |
167 | buf_offset += cmd->in_length) { | |
168 | size_t cmd_size; | |
f524bf27 DW |
169 | |
170 | cmd->in_offset = offset + buf_offset; | |
171 | cmd->in_length = min(max_cmd_size, len); | |
172 | memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length); | |
173 | ||
174 | /* status is output in the last 4-bytes of the command buffer */ | |
175 | cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); | |
f524bf27 DW |
176 | |
177 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | |
e7c5a571 DW |
178 | ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); |
179 | if (rc < 0) | |
180 | break; | |
181 | if (cmd_rc < 0) { | |
182 | rc = cmd_rc; | |
f524bf27 DW |
183 | break; |
184 | } | |
185 | } | |
186 | kfree(cmd); | |
187 | ||
188 | return rc; | |
189 | } | |
190 | ||
42237e39 DW |
191 | void nvdimm_set_aliasing(struct device *dev) |
192 | { | |
193 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
194 | ||
8f078b38 DW |
195 | set_bit(NDD_ALIASING, &nvdimm->flags); |
196 | } | |
197 | ||
198 | void nvdimm_set_locked(struct device *dev) | |
199 | { | |
200 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
201 | ||
202 | set_bit(NDD_LOCKED, &nvdimm->flags); | |
42237e39 DW |
203 | } |
204 | ||
d34cb808 DW |
205 | void nvdimm_clear_locked(struct device *dev) |
206 | { | |
207 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
208 | ||
209 | clear_bit(NDD_LOCKED, &nvdimm->flags); | |
210 | } | |
211 | ||
e6dfb2de DW |
212 | static void nvdimm_release(struct device *dev) |
213 | { | |
214 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
215 | ||
216 | ida_simple_remove(&dimm_ida, nvdimm->id); | |
217 | kfree(nvdimm); | |
218 | } | |
219 | ||
220 | static struct device_type nvdimm_device_type = { | |
221 | .name = "nvdimm", | |
222 | .release = nvdimm_release, | |
223 | }; | |
224 | ||
62232e45 | 225 | bool is_nvdimm(struct device *dev) |
e6dfb2de DW |
226 | { |
227 | return dev->type == &nvdimm_device_type; | |
228 | } | |
229 | ||
230 | struct nvdimm *to_nvdimm(struct device *dev) | |
231 | { | |
232 | struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev); | |
233 | ||
234 | WARN_ON(!is_nvdimm(dev)); | |
235 | return nvdimm; | |
236 | } | |
237 | EXPORT_SYMBOL_GPL(to_nvdimm); | |
238 | ||
047fc8a1 RZ |
239 | struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) |
240 | { | |
241 | struct nd_region *nd_region = &ndbr->nd_region; | |
242 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
243 | ||
244 | return nd_mapping->nvdimm; | |
245 | } | |
246 | EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); | |
247 | ||
ca6a4657 DW |
248 | unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr) |
249 | { | |
250 | /* pmem mapping properties are private to libnvdimm */ | |
251 | return ARCH_MEMREMAP_PMEM; | |
252 | } | |
253 | EXPORT_SYMBOL_GPL(nd_blk_memremap_flags); | |
254 | ||
bf9bccc1 DW |
255 | struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) |
256 | { | |
257 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
258 | ||
259 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); | |
260 | ||
261 | return dev_get_drvdata(&nvdimm->dev); | |
262 | } | |
263 | EXPORT_SYMBOL(to_ndd); | |
264 | ||
265 | void nvdimm_drvdata_release(struct kref *kref) | |
266 | { | |
267 | struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref); | |
268 | struct device *dev = ndd->dev; | |
269 | struct resource *res, *_r; | |
270 | ||
426824d6 | 271 | dev_dbg(dev, "trace\n"); |
bf9bccc1 DW |
272 | nvdimm_bus_lock(dev); |
273 | for_each_dpa_resource_safe(ndd, res, _r) | |
274 | nvdimm_free_dpa(ndd, res); | |
275 | nvdimm_bus_unlock(dev); | |
276 | ||
a06a7576 | 277 | kvfree(ndd->data); |
bf9bccc1 DW |
278 | kfree(ndd); |
279 | put_device(dev); | |
280 | } | |
281 | ||
282 | void get_ndd(struct nvdimm_drvdata *ndd) | |
283 | { | |
284 | kref_get(&ndd->kref); | |
285 | } | |
286 | ||
287 | void put_ndd(struct nvdimm_drvdata *ndd) | |
288 | { | |
289 | if (ndd) | |
290 | kref_put(&ndd->kref, nvdimm_drvdata_release); | |
291 | } | |
292 | ||
e6dfb2de DW |
293 | const char *nvdimm_name(struct nvdimm *nvdimm) |
294 | { | |
295 | return dev_name(&nvdimm->dev); | |
296 | } | |
297 | EXPORT_SYMBOL_GPL(nvdimm_name); | |
298 | ||
ba9c8dd3 DW |
299 | struct kobject *nvdimm_kobj(struct nvdimm *nvdimm) |
300 | { | |
301 | return &nvdimm->dev.kobj; | |
302 | } | |
303 | EXPORT_SYMBOL_GPL(nvdimm_kobj); | |
304 | ||
e3654eca DW |
305 | unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm) |
306 | { | |
307 | return nvdimm->cmd_mask; | |
308 | } | |
309 | EXPORT_SYMBOL_GPL(nvdimm_cmd_mask); | |
310 | ||
e6dfb2de DW |
311 | void *nvdimm_provider_data(struct nvdimm *nvdimm) |
312 | { | |
62232e45 DW |
313 | if (nvdimm) |
314 | return nvdimm->provider_data; | |
315 | return NULL; | |
e6dfb2de DW |
316 | } |
317 | EXPORT_SYMBOL_GPL(nvdimm_provider_data); | |
318 | ||
62232e45 DW |
319 | static ssize_t commands_show(struct device *dev, |
320 | struct device_attribute *attr, char *buf) | |
321 | { | |
322 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
323 | int cmd, len = 0; | |
324 | ||
e3654eca | 325 | if (!nvdimm->cmd_mask) |
62232e45 DW |
326 | return sprintf(buf, "\n"); |
327 | ||
e3654eca | 328 | for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG) |
62232e45 DW |
329 | len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd)); |
330 | len += sprintf(buf + len, "\n"); | |
331 | return len; | |
332 | } | |
333 | static DEVICE_ATTR_RO(commands); | |
334 | ||
efbf6f50 DW |
335 | static ssize_t flags_show(struct device *dev, |
336 | struct device_attribute *attr, char *buf) | |
337 | { | |
338 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
339 | ||
340 | return sprintf(buf, "%s%s\n", | |
341 | test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "", | |
342 | test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : ""); | |
343 | } | |
344 | static DEVICE_ATTR_RO(flags); | |
345 | ||
eaf96153 DW |
346 | static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
347 | char *buf) | |
348 | { | |
349 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
350 | ||
351 | /* | |
352 | * The state may be in the process of changing, userspace should | |
353 | * quiesce probing if it wants a static answer | |
354 | */ | |
355 | nvdimm_bus_lock(dev); | |
356 | nvdimm_bus_unlock(dev); | |
357 | return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy) | |
358 | ? "active" : "idle"); | |
359 | } | |
360 | static DEVICE_ATTR_RO(state); | |
361 | ||
0ba1c634 DW |
362 | static ssize_t available_slots_show(struct device *dev, |
363 | struct device_attribute *attr, char *buf) | |
364 | { | |
365 | struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); | |
366 | ssize_t rc; | |
367 | u32 nfree; | |
368 | ||
369 | if (!ndd) | |
370 | return -ENXIO; | |
371 | ||
372 | nvdimm_bus_lock(dev); | |
373 | nfree = nd_label_nfree(ndd); | |
374 | if (nfree - 1 > nfree) { | |
375 | dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); | |
376 | nfree = 0; | |
377 | } else | |
378 | nfree--; | |
379 | rc = sprintf(buf, "%d\n", nfree); | |
380 | nvdimm_bus_unlock(dev); | |
381 | return rc; | |
382 | } | |
383 | static DEVICE_ATTR_RO(available_slots); | |
384 | ||
62232e45 | 385 | static struct attribute *nvdimm_attributes[] = { |
eaf96153 | 386 | &dev_attr_state.attr, |
efbf6f50 | 387 | &dev_attr_flags.attr, |
62232e45 | 388 | &dev_attr_commands.attr, |
0ba1c634 | 389 | &dev_attr_available_slots.attr, |
62232e45 DW |
390 | NULL, |
391 | }; | |
392 | ||
393 | struct attribute_group nvdimm_attribute_group = { | |
394 | .attrs = nvdimm_attributes, | |
395 | }; | |
396 | EXPORT_SYMBOL_GPL(nvdimm_attribute_group); | |
397 | ||
e6dfb2de | 398 | struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, |
62232e45 | 399 | const struct attribute_group **groups, unsigned long flags, |
e5ae3b25 DW |
400 | unsigned long cmd_mask, int num_flush, |
401 | struct resource *flush_wpq) | |
e6dfb2de DW |
402 | { |
403 | struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); | |
404 | struct device *dev; | |
405 | ||
406 | if (!nvdimm) | |
407 | return NULL; | |
408 | ||
409 | nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL); | |
410 | if (nvdimm->id < 0) { | |
411 | kfree(nvdimm); | |
412 | return NULL; | |
413 | } | |
414 | nvdimm->provider_data = provider_data; | |
415 | nvdimm->flags = flags; | |
e3654eca | 416 | nvdimm->cmd_mask = cmd_mask; |
e5ae3b25 DW |
417 | nvdimm->num_flush = num_flush; |
418 | nvdimm->flush_wpq = flush_wpq; | |
eaf96153 | 419 | atomic_set(&nvdimm->busy, 0); |
e6dfb2de DW |
420 | dev = &nvdimm->dev; |
421 | dev_set_name(dev, "nmem%d", nvdimm->id); | |
422 | dev->parent = &nvdimm_bus->dev; | |
423 | dev->type = &nvdimm_device_type; | |
62232e45 | 424 | dev->devt = MKDEV(nvdimm_major, nvdimm->id); |
e6dfb2de | 425 | dev->groups = groups; |
4d88a97a | 426 | nd_device_register(dev); |
e6dfb2de DW |
427 | |
428 | return nvdimm; | |
429 | } | |
430 | EXPORT_SYMBOL_GPL(nvdimm_create); | |
4d88a97a | 431 | |
762d067d | 432 | int alias_dpa_busy(struct device *dev, void *data) |
a1f3e4d6 | 433 | { |
fe514739 | 434 | resource_size_t map_end, blk_start, new; |
a1f3e4d6 DW |
435 | struct blk_alloc_info *info = data; |
436 | struct nd_mapping *nd_mapping; | |
437 | struct nd_region *nd_region; | |
438 | struct nvdimm_drvdata *ndd; | |
439 | struct resource *res; | |
440 | int i; | |
441 | ||
c9e582aa | 442 | if (!is_memory(dev)) |
a1f3e4d6 DW |
443 | return 0; |
444 | ||
445 | nd_region = to_nd_region(dev); | |
446 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
447 | nd_mapping = &nd_region->mapping[i]; | |
448 | if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) | |
449 | break; | |
450 | } | |
451 | ||
452 | if (i >= nd_region->ndr_mappings) | |
453 | return 0; | |
454 | ||
455 | ndd = to_ndd(nd_mapping); | |
456 | map_end = nd_mapping->start + nd_mapping->size - 1; | |
457 | blk_start = nd_mapping->start; | |
762d067d DW |
458 | |
459 | /* | |
460 | * In the allocation case ->res is set to free space that we are | |
461 | * looking to validate against PMEM aliasing collision rules | |
462 | * (i.e. BLK is allocated after all aliased PMEM). | |
463 | */ | |
464 | if (info->res) { | |
465 | if (info->res->start >= nd_mapping->start | |
466 | && info->res->start < map_end) | |
467 | /* pass */; | |
468 | else | |
469 | return 0; | |
470 | } | |
471 | ||
a1f3e4d6 DW |
472 | retry: |
473 | /* | |
474 | * Find the free dpa from the end of the last pmem allocation to | |
fe514739 | 475 | * the end of the interleave-set mapping. |
a1f3e4d6 | 476 | */ |
a1f3e4d6 | 477 | for_each_dpa_resource(ndd, res) { |
fe514739 DW |
478 | if (strncmp(res->name, "pmem", 4) != 0) |
479 | continue; | |
a1f3e4d6 DW |
480 | if ((res->start >= blk_start && res->start < map_end) |
481 | || (res->end >= blk_start | |
482 | && res->end <= map_end)) { | |
fe514739 DW |
483 | new = max(blk_start, min(map_end + 1, res->end + 1)); |
484 | if (new != blk_start) { | |
485 | blk_start = new; | |
486 | goto retry; | |
487 | } | |
a1f3e4d6 DW |
488 | } |
489 | } | |
490 | ||
762d067d DW |
491 | /* update the free space range with the probed blk_start */ |
492 | if (info->res && blk_start > info->res->start) { | |
493 | info->res->start = max(info->res->start, blk_start); | |
494 | if (info->res->start > info->res->end) | |
495 | info->res->end = info->res->start - 1; | |
496 | return 1; | |
497 | } | |
498 | ||
fe514739 | 499 | info->available -= blk_start - nd_mapping->start; |
762d067d | 500 | |
a1f3e4d6 DW |
501 | return 0; |
502 | } | |
503 | ||
1b40e09a DW |
504 | /** |
505 | * nd_blk_available_dpa - account the unused dpa of BLK region | |
506 | * @nd_mapping: container of dpa-resource-root + labels | |
507 | * | |
a1f3e4d6 DW |
508 | * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but |
509 | * we arrange for them to never start at an lower dpa than the last | |
510 | * PMEM allocation in an aliased region. | |
1b40e09a | 511 | */ |
a1f3e4d6 | 512 | resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) |
1b40e09a | 513 | { |
a1f3e4d6 DW |
514 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); |
515 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
1b40e09a | 516 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
a1f3e4d6 DW |
517 | struct blk_alloc_info info = { |
518 | .nd_mapping = nd_mapping, | |
519 | .available = nd_mapping->size, | |
762d067d | 520 | .res = NULL, |
a1f3e4d6 | 521 | }; |
1b40e09a DW |
522 | struct resource *res; |
523 | ||
524 | if (!ndd) | |
525 | return 0; | |
526 | ||
a1f3e4d6 | 527 | device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); |
1b40e09a | 528 | |
a1f3e4d6 DW |
529 | /* now account for busy blk allocations in unaliased dpa */ |
530 | for_each_dpa_resource(ndd, res) { | |
531 | if (strncmp(res->name, "blk", 3) != 0) | |
532 | continue; | |
fe514739 | 533 | info.available -= resource_size(res); |
a1f3e4d6 DW |
534 | } |
535 | ||
536 | return info.available; | |
1b40e09a DW |
537 | } |
538 | ||
12e3129e KB |
539 | /** |
540 | * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max | |
541 | * contiguous unallocated dpa range. | |
542 | * @nd_region: constrain available space check to this reference region | |
543 | * @nd_mapping: container of dpa-resource-root + labels | |
544 | */ | |
545 | resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, | |
546 | struct nd_mapping *nd_mapping) | |
547 | { | |
548 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
549 | struct nvdimm_bus *nvdimm_bus; | |
550 | resource_size_t max = 0; | |
551 | struct resource *res; | |
552 | ||
553 | /* if a dimm is disabled the available capacity is zero */ | |
554 | if (!ndd) | |
555 | return 0; | |
556 | ||
557 | nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
558 | if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm)) | |
559 | return 0; | |
560 | for_each_dpa_resource(ndd, res) { | |
561 | if (strcmp(res->name, "pmem-reserve") != 0) | |
562 | continue; | |
563 | if (resource_size(res) > max) | |
564 | max = resource_size(res); | |
565 | } | |
566 | release_free_pmem(nvdimm_bus, nd_mapping); | |
567 | return max; | |
568 | } | |
569 | ||
bf9bccc1 DW |
570 | /** |
571 | * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa | |
572 | * @nd_mapping: container of dpa-resource-root + labels | |
573 | * @nd_region: constrain available space check to this reference region | |
574 | * @overlap: calculate available space assuming this level of overlap | |
575 | * | |
576 | * Validate that a PMEM label, if present, aligns with the start of an | |
577 | * interleave set and truncate the available size at the lowest BLK | |
578 | * overlap point. | |
579 | * | |
580 | * The expectation is that this routine is called multiple times as it | |
581 | * probes for the largest BLK encroachment for any single member DIMM of | |
582 | * the interleave set. Once that value is determined the PMEM-limit for | |
583 | * the set can be established. | |
584 | */ | |
585 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | |
586 | struct nd_mapping *nd_mapping, resource_size_t *overlap) | |
587 | { | |
588 | resource_size_t map_start, map_end, busy = 0, available, blk_start; | |
589 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
590 | struct resource *res; | |
591 | const char *reason; | |
592 | ||
593 | if (!ndd) | |
594 | return 0; | |
595 | ||
596 | map_start = nd_mapping->start; | |
597 | map_end = map_start + nd_mapping->size - 1; | |
598 | blk_start = max(map_start, map_end + 1 - *overlap); | |
a1f3e4d6 | 599 | for_each_dpa_resource(ndd, res) { |
bf9bccc1 DW |
600 | if (res->start >= map_start && res->start < map_end) { |
601 | if (strncmp(res->name, "blk", 3) == 0) | |
a1f3e4d6 DW |
602 | blk_start = min(blk_start, |
603 | max(map_start, res->start)); | |
604 | else if (res->end > map_end) { | |
bf9bccc1 DW |
605 | reason = "misaligned to iset"; |
606 | goto err; | |
a1f3e4d6 | 607 | } else |
bf9bccc1 | 608 | busy += resource_size(res); |
bf9bccc1 DW |
609 | } else if (res->end >= map_start && res->end <= map_end) { |
610 | if (strncmp(res->name, "blk", 3) == 0) { | |
611 | /* | |
612 | * If a BLK allocation overlaps the start of | |
613 | * PMEM the entire interleave set may now only | |
614 | * be used for BLK. | |
615 | */ | |
616 | blk_start = map_start; | |
a1f3e4d6 DW |
617 | } else |
618 | busy += resource_size(res); | |
bf9bccc1 DW |
619 | } else if (map_start > res->start && map_start < res->end) { |
620 | /* total eclipse of the mapping */ | |
621 | busy += nd_mapping->size; | |
622 | blk_start = map_start; | |
623 | } | |
a1f3e4d6 | 624 | } |
bf9bccc1 DW |
625 | |
626 | *overlap = map_end + 1 - blk_start; | |
627 | available = blk_start - map_start; | |
628 | if (busy < available) | |
629 | return available - busy; | |
630 | return 0; | |
631 | ||
632 | err: | |
bf9bccc1 DW |
633 | nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); |
634 | return 0; | |
635 | } | |
636 | ||
4a826c83 DW |
637 | void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res) |
638 | { | |
639 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
640 | kfree(res->name); | |
641 | __release_region(&ndd->dpa, res->start, resource_size(res)); | |
642 | } | |
643 | ||
644 | struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, | |
645 | struct nd_label_id *label_id, resource_size_t start, | |
646 | resource_size_t n) | |
647 | { | |
648 | char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL); | |
649 | struct resource *res; | |
650 | ||
651 | if (!name) | |
652 | return NULL; | |
653 | ||
654 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
655 | res = __request_region(&ndd->dpa, start, n, name, 0); | |
656 | if (!res) | |
657 | kfree(name); | |
658 | return res; | |
659 | } | |
660 | ||
bf9bccc1 DW |
661 | /** |
662 | * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id | |
663 | * @nvdimm: container of dpa-resource-root + labels | |
664 | * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid> | |
665 | */ | |
666 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, | |
667 | struct nd_label_id *label_id) | |
668 | { | |
669 | resource_size_t allocated = 0; | |
670 | struct resource *res; | |
671 | ||
672 | for_each_dpa_resource(ndd, res) | |
673 | if (strcmp(res->name, label_id->id) == 0) | |
674 | allocated += resource_size(res); | |
675 | ||
676 | return allocated; | |
677 | } | |
678 | ||
4d88a97a DW |
679 | static int count_dimms(struct device *dev, void *c) |
680 | { | |
681 | int *count = c; | |
682 | ||
683 | if (is_nvdimm(dev)) | |
684 | (*count)++; | |
685 | return 0; | |
686 | } | |
687 | ||
688 | int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count) | |
689 | { | |
690 | int count = 0; | |
691 | /* Flush any possible dimm registration failures */ | |
692 | nd_synchronize(); | |
693 | ||
694 | device_for_each_child(&nvdimm_bus->dev, &count, count_dimms); | |
426824d6 | 695 | dev_dbg(&nvdimm_bus->dev, "count: %d\n", count); |
4d88a97a DW |
696 | if (count != dimm_count) |
697 | return -ENXIO; | |
698 | return 0; | |
699 | } | |
700 | EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count); | |
b354aba0 DW |
701 | |
702 | void __exit nvdimm_devs_exit(void) | |
703 | { | |
704 | ida_destroy(&dimm_ida); | |
705 | } |