Commit | Line | Data |
---|---|---|
e6dfb2de DW |
1 | /* |
2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
4d88a97a | 14 | #include <linux/vmalloc.h> |
e6dfb2de | 15 | #include <linux/device.h> |
62232e45 | 16 | #include <linux/ndctl.h> |
e6dfb2de DW |
17 | #include <linux/slab.h> |
18 | #include <linux/io.h> | |
19 | #include <linux/fs.h> | |
20 | #include <linux/mm.h> | |
21 | #include "nd-core.h" | |
0ba1c634 | 22 | #include "label.h" |
ca6a4657 | 23 | #include "pmem.h" |
4d88a97a | 24 | #include "nd.h" |
e6dfb2de DW |
25 | |
26 | static DEFINE_IDA(dimm_ida); | |
27 | ||
4d88a97a DW |
28 | /* |
29 | * Retrieve bus and dimm handle and return if this bus supports | |
30 | * get_config_data commands | |
31 | */ | |
aee65987 | 32 | int nvdimm_check_config_data(struct device *dev) |
4d88a97a | 33 | { |
aee65987 | 34 | struct nvdimm *nvdimm = to_nvdimm(dev); |
4d88a97a | 35 | |
aee65987 TK |
36 | if (!nvdimm->cmd_mask || |
37 | !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { | |
8f078b38 | 38 | if (test_bit(NDD_ALIASING, &nvdimm->flags)) |
aee65987 TK |
39 | return -ENXIO; |
40 | else | |
41 | return -ENOTTY; | |
42 | } | |
4d88a97a DW |
43 | |
44 | return 0; | |
45 | } | |
46 | ||
47 | static int validate_dimm(struct nvdimm_drvdata *ndd) | |
48 | { | |
aee65987 | 49 | int rc; |
4d88a97a | 50 | |
aee65987 TK |
51 | if (!ndd) |
52 | return -EINVAL; | |
53 | ||
54 | rc = nvdimm_check_config_data(ndd->dev); | |
55 | if (rc) | |
4d88a97a DW |
56 | dev_dbg(ndd->dev, "%pf: %s error: %d\n", |
57 | __builtin_return_address(0), __func__, rc); | |
58 | return rc; | |
59 | } | |
60 | ||
61 | /** | |
62 | * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area | |
63 | * @nvdimm: dimm to initialize | |
64 | */ | |
65 | int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) | |
66 | { | |
67 | struct nd_cmd_get_config_size *cmd = &ndd->nsarea; | |
68 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
69 | struct nvdimm_bus_descriptor *nd_desc; | |
70 | int rc = validate_dimm(ndd); | |
9d62ed96 | 71 | int cmd_rc = 0; |
4d88a97a DW |
72 | |
73 | if (rc) | |
74 | return rc; | |
75 | ||
76 | if (cmd->config_size) | |
77 | return 0; /* already valid */ | |
78 | ||
79 | memset(cmd, 0, sizeof(*cmd)); | |
80 | nd_desc = nvdimm_bus->nd_desc; | |
9d62ed96 DW |
81 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
82 | ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc); | |
83 | if (rc < 0) | |
84 | return rc; | |
85 | return cmd_rc; | |
4d88a97a DW |
86 | } |
87 | ||
88 | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) | |
89 | { | |
90 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
e7c5a571 | 91 | int rc = validate_dimm(ndd), cmd_rc = 0; |
4d88a97a DW |
92 | struct nd_cmd_get_config_data_hdr *cmd; |
93 | struct nvdimm_bus_descriptor *nd_desc; | |
4d88a97a DW |
94 | u32 max_cmd_size, config_size; |
95 | size_t offset; | |
96 | ||
97 | if (rc) | |
98 | return rc; | |
99 | ||
100 | if (ndd->data) | |
101 | return 0; | |
102 | ||
4a826c83 DW |
103 | if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 |
104 | || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) { | |
105 | dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n", | |
106 | ndd->nsarea.max_xfer, ndd->nsarea.config_size); | |
4d88a97a | 107 | return -ENXIO; |
4a826c83 | 108 | } |
4d88a97a | 109 | |
752ade68 | 110 | ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL); |
4d88a97a DW |
111 | if (!ndd->data) |
112 | return -ENOMEM; | |
113 | ||
d11cf4a7 DW |
114 | max_cmd_size = min_t(u32, ndd->nsarea.config_size, ndd->nsarea.max_xfer); |
115 | cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL); | |
4d88a97a DW |
116 | if (!cmd) |
117 | return -ENOMEM; | |
118 | ||
119 | nd_desc = nvdimm_bus->nd_desc; | |
120 | for (config_size = ndd->nsarea.config_size, offset = 0; | |
121 | config_size; config_size -= cmd->in_length, | |
122 | offset += cmd->in_length) { | |
123 | cmd->in_length = min(config_size, max_cmd_size); | |
124 | cmd->in_offset = offset; | |
125 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | |
126 | ND_CMD_GET_CONFIG_DATA, cmd, | |
e7c5a571 DW |
127 | cmd->in_length + sizeof(*cmd), &cmd_rc); |
128 | if (rc < 0) | |
129 | break; | |
130 | if (cmd_rc < 0) { | |
131 | rc = cmd_rc; | |
4d88a97a DW |
132 | break; |
133 | } | |
134 | memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); | |
135 | } | |
426824d6 | 136 | dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc); |
d11cf4a7 | 137 | kvfree(cmd); |
4d88a97a DW |
138 | |
139 | return rc; | |
140 | } | |
141 | ||
f524bf27 DW |
142 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, |
143 | void *buf, size_t len) | |
144 | { | |
f524bf27 DW |
145 | size_t max_cmd_size, buf_offset; |
146 | struct nd_cmd_set_config_hdr *cmd; | |
e7c5a571 | 147 | int rc = validate_dimm(ndd), cmd_rc = 0; |
f524bf27 DW |
148 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
149 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; | |
150 | ||
151 | if (rc) | |
152 | return rc; | |
153 | ||
154 | if (!ndd->data) | |
155 | return -ENXIO; | |
156 | ||
157 | if (offset + len > ndd->nsarea.config_size) | |
158 | return -ENXIO; | |
159 | ||
d11cf4a7 DW |
160 | max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer); |
161 | cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL); | |
f524bf27 DW |
162 | if (!cmd) |
163 | return -ENOMEM; | |
164 | ||
165 | for (buf_offset = 0; len; len -= cmd->in_length, | |
166 | buf_offset += cmd->in_length) { | |
167 | size_t cmd_size; | |
f524bf27 DW |
168 | |
169 | cmd->in_offset = offset + buf_offset; | |
170 | cmd->in_length = min(max_cmd_size, len); | |
171 | memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length); | |
172 | ||
173 | /* status is output in the last 4-bytes of the command buffer */ | |
174 | cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); | |
f524bf27 DW |
175 | |
176 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | |
e7c5a571 DW |
177 | ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); |
178 | if (rc < 0) | |
179 | break; | |
180 | if (cmd_rc < 0) { | |
181 | rc = cmd_rc; | |
f524bf27 DW |
182 | break; |
183 | } | |
184 | } | |
d11cf4a7 | 185 | kvfree(cmd); |
f524bf27 DW |
186 | |
187 | return rc; | |
188 | } | |
189 | ||
42237e39 DW |
190 | void nvdimm_set_aliasing(struct device *dev) |
191 | { | |
192 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
193 | ||
8f078b38 DW |
194 | set_bit(NDD_ALIASING, &nvdimm->flags); |
195 | } | |
196 | ||
197 | void nvdimm_set_locked(struct device *dev) | |
198 | { | |
199 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
200 | ||
201 | set_bit(NDD_LOCKED, &nvdimm->flags); | |
42237e39 DW |
202 | } |
203 | ||
d34cb808 DW |
204 | void nvdimm_clear_locked(struct device *dev) |
205 | { | |
206 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
207 | ||
208 | clear_bit(NDD_LOCKED, &nvdimm->flags); | |
209 | } | |
210 | ||
e6dfb2de DW |
211 | static void nvdimm_release(struct device *dev) |
212 | { | |
213 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
214 | ||
215 | ida_simple_remove(&dimm_ida, nvdimm->id); | |
216 | kfree(nvdimm); | |
217 | } | |
218 | ||
219 | static struct device_type nvdimm_device_type = { | |
220 | .name = "nvdimm", | |
221 | .release = nvdimm_release, | |
222 | }; | |
223 | ||
62232e45 | 224 | bool is_nvdimm(struct device *dev) |
e6dfb2de DW |
225 | { |
226 | return dev->type == &nvdimm_device_type; | |
227 | } | |
228 | ||
229 | struct nvdimm *to_nvdimm(struct device *dev) | |
230 | { | |
231 | struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev); | |
232 | ||
233 | WARN_ON(!is_nvdimm(dev)); | |
234 | return nvdimm; | |
235 | } | |
236 | EXPORT_SYMBOL_GPL(to_nvdimm); | |
237 | ||
047fc8a1 RZ |
238 | struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) |
239 | { | |
240 | struct nd_region *nd_region = &ndbr->nd_region; | |
241 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
242 | ||
243 | return nd_mapping->nvdimm; | |
244 | } | |
245 | EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); | |
246 | ||
ca6a4657 DW |
247 | unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr) |
248 | { | |
249 | /* pmem mapping properties are private to libnvdimm */ | |
250 | return ARCH_MEMREMAP_PMEM; | |
251 | } | |
252 | EXPORT_SYMBOL_GPL(nd_blk_memremap_flags); | |
253 | ||
bf9bccc1 DW |
254 | struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) |
255 | { | |
256 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
257 | ||
258 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); | |
259 | ||
260 | return dev_get_drvdata(&nvdimm->dev); | |
261 | } | |
262 | EXPORT_SYMBOL(to_ndd); | |
263 | ||
264 | void nvdimm_drvdata_release(struct kref *kref) | |
265 | { | |
266 | struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref); | |
267 | struct device *dev = ndd->dev; | |
268 | struct resource *res, *_r; | |
269 | ||
426824d6 | 270 | dev_dbg(dev, "trace\n"); |
bf9bccc1 DW |
271 | nvdimm_bus_lock(dev); |
272 | for_each_dpa_resource_safe(ndd, res, _r) | |
273 | nvdimm_free_dpa(ndd, res); | |
274 | nvdimm_bus_unlock(dev); | |
275 | ||
a06a7576 | 276 | kvfree(ndd->data); |
bf9bccc1 DW |
277 | kfree(ndd); |
278 | put_device(dev); | |
279 | } | |
280 | ||
281 | void get_ndd(struct nvdimm_drvdata *ndd) | |
282 | { | |
283 | kref_get(&ndd->kref); | |
284 | } | |
285 | ||
286 | void put_ndd(struct nvdimm_drvdata *ndd) | |
287 | { | |
288 | if (ndd) | |
289 | kref_put(&ndd->kref, nvdimm_drvdata_release); | |
290 | } | |
291 | ||
e6dfb2de DW |
292 | const char *nvdimm_name(struct nvdimm *nvdimm) |
293 | { | |
294 | return dev_name(&nvdimm->dev); | |
295 | } | |
296 | EXPORT_SYMBOL_GPL(nvdimm_name); | |
297 | ||
ba9c8dd3 DW |
298 | struct kobject *nvdimm_kobj(struct nvdimm *nvdimm) |
299 | { | |
300 | return &nvdimm->dev.kobj; | |
301 | } | |
302 | EXPORT_SYMBOL_GPL(nvdimm_kobj); | |
303 | ||
e3654eca DW |
304 | unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm) |
305 | { | |
306 | return nvdimm->cmd_mask; | |
307 | } | |
308 | EXPORT_SYMBOL_GPL(nvdimm_cmd_mask); | |
309 | ||
e6dfb2de DW |
310 | void *nvdimm_provider_data(struct nvdimm *nvdimm) |
311 | { | |
62232e45 DW |
312 | if (nvdimm) |
313 | return nvdimm->provider_data; | |
314 | return NULL; | |
e6dfb2de DW |
315 | } |
316 | EXPORT_SYMBOL_GPL(nvdimm_provider_data); | |
317 | ||
62232e45 DW |
318 | static ssize_t commands_show(struct device *dev, |
319 | struct device_attribute *attr, char *buf) | |
320 | { | |
321 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
322 | int cmd, len = 0; | |
323 | ||
e3654eca | 324 | if (!nvdimm->cmd_mask) |
62232e45 DW |
325 | return sprintf(buf, "\n"); |
326 | ||
e3654eca | 327 | for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG) |
62232e45 DW |
328 | len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd)); |
329 | len += sprintf(buf + len, "\n"); | |
330 | return len; | |
331 | } | |
332 | static DEVICE_ATTR_RO(commands); | |
333 | ||
efbf6f50 DW |
334 | static ssize_t flags_show(struct device *dev, |
335 | struct device_attribute *attr, char *buf) | |
336 | { | |
337 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
338 | ||
339 | return sprintf(buf, "%s%s\n", | |
340 | test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "", | |
341 | test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : ""); | |
342 | } | |
343 | static DEVICE_ATTR_RO(flags); | |
344 | ||
eaf96153 DW |
345 | static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
346 | char *buf) | |
347 | { | |
348 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
349 | ||
350 | /* | |
351 | * The state may be in the process of changing, userspace should | |
352 | * quiesce probing if it wants a static answer | |
353 | */ | |
354 | nvdimm_bus_lock(dev); | |
355 | nvdimm_bus_unlock(dev); | |
356 | return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy) | |
357 | ? "active" : "idle"); | |
358 | } | |
359 | static DEVICE_ATTR_RO(state); | |
360 | ||
0ba1c634 DW |
361 | static ssize_t available_slots_show(struct device *dev, |
362 | struct device_attribute *attr, char *buf) | |
363 | { | |
364 | struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); | |
365 | ssize_t rc; | |
366 | u32 nfree; | |
367 | ||
368 | if (!ndd) | |
369 | return -ENXIO; | |
370 | ||
371 | nvdimm_bus_lock(dev); | |
372 | nfree = nd_label_nfree(ndd); | |
373 | if (nfree - 1 > nfree) { | |
374 | dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); | |
375 | nfree = 0; | |
376 | } else | |
377 | nfree--; | |
378 | rc = sprintf(buf, "%d\n", nfree); | |
379 | nvdimm_bus_unlock(dev); | |
380 | return rc; | |
381 | } | |
382 | static DEVICE_ATTR_RO(available_slots); | |
383 | ||
62232e45 | 384 | static struct attribute *nvdimm_attributes[] = { |
eaf96153 | 385 | &dev_attr_state.attr, |
efbf6f50 | 386 | &dev_attr_flags.attr, |
62232e45 | 387 | &dev_attr_commands.attr, |
0ba1c634 | 388 | &dev_attr_available_slots.attr, |
62232e45 DW |
389 | NULL, |
390 | }; | |
391 | ||
392 | struct attribute_group nvdimm_attribute_group = { | |
393 | .attrs = nvdimm_attributes, | |
394 | }; | |
395 | EXPORT_SYMBOL_GPL(nvdimm_attribute_group); | |
396 | ||
e6dfb2de | 397 | struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, |
62232e45 | 398 | const struct attribute_group **groups, unsigned long flags, |
e5ae3b25 DW |
399 | unsigned long cmd_mask, int num_flush, |
400 | struct resource *flush_wpq) | |
e6dfb2de DW |
401 | { |
402 | struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); | |
403 | struct device *dev; | |
404 | ||
405 | if (!nvdimm) | |
406 | return NULL; | |
407 | ||
408 | nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL); | |
409 | if (nvdimm->id < 0) { | |
410 | kfree(nvdimm); | |
411 | return NULL; | |
412 | } | |
413 | nvdimm->provider_data = provider_data; | |
414 | nvdimm->flags = flags; | |
e3654eca | 415 | nvdimm->cmd_mask = cmd_mask; |
e5ae3b25 DW |
416 | nvdimm->num_flush = num_flush; |
417 | nvdimm->flush_wpq = flush_wpq; | |
eaf96153 | 418 | atomic_set(&nvdimm->busy, 0); |
e6dfb2de DW |
419 | dev = &nvdimm->dev; |
420 | dev_set_name(dev, "nmem%d", nvdimm->id); | |
421 | dev->parent = &nvdimm_bus->dev; | |
422 | dev->type = &nvdimm_device_type; | |
62232e45 | 423 | dev->devt = MKDEV(nvdimm_major, nvdimm->id); |
e6dfb2de | 424 | dev->groups = groups; |
4d88a97a | 425 | nd_device_register(dev); |
e6dfb2de DW |
426 | |
427 | return nvdimm; | |
428 | } | |
429 | EXPORT_SYMBOL_GPL(nvdimm_create); | |
4d88a97a | 430 | |
762d067d | 431 | int alias_dpa_busy(struct device *dev, void *data) |
a1f3e4d6 | 432 | { |
fe514739 | 433 | resource_size_t map_end, blk_start, new; |
a1f3e4d6 DW |
434 | struct blk_alloc_info *info = data; |
435 | struct nd_mapping *nd_mapping; | |
436 | struct nd_region *nd_region; | |
437 | struct nvdimm_drvdata *ndd; | |
438 | struct resource *res; | |
439 | int i; | |
440 | ||
c9e582aa | 441 | if (!is_memory(dev)) |
a1f3e4d6 DW |
442 | return 0; |
443 | ||
444 | nd_region = to_nd_region(dev); | |
445 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
446 | nd_mapping = &nd_region->mapping[i]; | |
447 | if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) | |
448 | break; | |
449 | } | |
450 | ||
451 | if (i >= nd_region->ndr_mappings) | |
452 | return 0; | |
453 | ||
454 | ndd = to_ndd(nd_mapping); | |
455 | map_end = nd_mapping->start + nd_mapping->size - 1; | |
456 | blk_start = nd_mapping->start; | |
762d067d DW |
457 | |
458 | /* | |
459 | * In the allocation case ->res is set to free space that we are | |
460 | * looking to validate against PMEM aliasing collision rules | |
461 | * (i.e. BLK is allocated after all aliased PMEM). | |
462 | */ | |
463 | if (info->res) { | |
464 | if (info->res->start >= nd_mapping->start | |
465 | && info->res->start < map_end) | |
466 | /* pass */; | |
467 | else | |
468 | return 0; | |
469 | } | |
470 | ||
a1f3e4d6 DW |
471 | retry: |
472 | /* | |
473 | * Find the free dpa from the end of the last pmem allocation to | |
fe514739 | 474 | * the end of the interleave-set mapping. |
a1f3e4d6 | 475 | */ |
a1f3e4d6 | 476 | for_each_dpa_resource(ndd, res) { |
fe514739 DW |
477 | if (strncmp(res->name, "pmem", 4) != 0) |
478 | continue; | |
a1f3e4d6 DW |
479 | if ((res->start >= blk_start && res->start < map_end) |
480 | || (res->end >= blk_start | |
481 | && res->end <= map_end)) { | |
fe514739 DW |
482 | new = max(blk_start, min(map_end + 1, res->end + 1)); |
483 | if (new != blk_start) { | |
484 | blk_start = new; | |
485 | goto retry; | |
486 | } | |
a1f3e4d6 DW |
487 | } |
488 | } | |
489 | ||
762d067d DW |
490 | /* update the free space range with the probed blk_start */ |
491 | if (info->res && blk_start > info->res->start) { | |
492 | info->res->start = max(info->res->start, blk_start); | |
493 | if (info->res->start > info->res->end) | |
494 | info->res->end = info->res->start - 1; | |
495 | return 1; | |
496 | } | |
497 | ||
fe514739 | 498 | info->available -= blk_start - nd_mapping->start; |
762d067d | 499 | |
a1f3e4d6 DW |
500 | return 0; |
501 | } | |
502 | ||
1b40e09a DW |
503 | /** |
504 | * nd_blk_available_dpa - account the unused dpa of BLK region | |
505 | * @nd_mapping: container of dpa-resource-root + labels | |
506 | * | |
a1f3e4d6 DW |
507 | * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but |
508 | * we arrange for them to never start at an lower dpa than the last | |
509 | * PMEM allocation in an aliased region. | |
1b40e09a | 510 | */ |
a1f3e4d6 | 511 | resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) |
1b40e09a | 512 | { |
a1f3e4d6 DW |
513 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); |
514 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
1b40e09a | 515 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
a1f3e4d6 DW |
516 | struct blk_alloc_info info = { |
517 | .nd_mapping = nd_mapping, | |
518 | .available = nd_mapping->size, | |
762d067d | 519 | .res = NULL, |
a1f3e4d6 | 520 | }; |
1b40e09a DW |
521 | struct resource *res; |
522 | ||
523 | if (!ndd) | |
524 | return 0; | |
525 | ||
a1f3e4d6 | 526 | device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); |
1b40e09a | 527 | |
a1f3e4d6 DW |
528 | /* now account for busy blk allocations in unaliased dpa */ |
529 | for_each_dpa_resource(ndd, res) { | |
530 | if (strncmp(res->name, "blk", 3) != 0) | |
531 | continue; | |
fe514739 | 532 | info.available -= resource_size(res); |
a1f3e4d6 DW |
533 | } |
534 | ||
535 | return info.available; | |
1b40e09a DW |
536 | } |
537 | ||
12e3129e KB |
538 | /** |
539 | * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max | |
540 | * contiguous unallocated dpa range. | |
541 | * @nd_region: constrain available space check to this reference region | |
542 | * @nd_mapping: container of dpa-resource-root + labels | |
543 | */ | |
544 | resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, | |
545 | struct nd_mapping *nd_mapping) | |
546 | { | |
547 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
548 | struct nvdimm_bus *nvdimm_bus; | |
549 | resource_size_t max = 0; | |
550 | struct resource *res; | |
551 | ||
552 | /* if a dimm is disabled the available capacity is zero */ | |
553 | if (!ndd) | |
554 | return 0; | |
555 | ||
556 | nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
557 | if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm)) | |
558 | return 0; | |
559 | for_each_dpa_resource(ndd, res) { | |
560 | if (strcmp(res->name, "pmem-reserve") != 0) | |
561 | continue; | |
562 | if (resource_size(res) > max) | |
563 | max = resource_size(res); | |
564 | } | |
565 | release_free_pmem(nvdimm_bus, nd_mapping); | |
566 | return max; | |
567 | } | |
568 | ||
bf9bccc1 DW |
569 | /** |
570 | * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa | |
571 | * @nd_mapping: container of dpa-resource-root + labels | |
572 | * @nd_region: constrain available space check to this reference region | |
573 | * @overlap: calculate available space assuming this level of overlap | |
574 | * | |
575 | * Validate that a PMEM label, if present, aligns with the start of an | |
576 | * interleave set and truncate the available size at the lowest BLK | |
577 | * overlap point. | |
578 | * | |
579 | * The expectation is that this routine is called multiple times as it | |
580 | * probes for the largest BLK encroachment for any single member DIMM of | |
581 | * the interleave set. Once that value is determined the PMEM-limit for | |
582 | * the set can be established. | |
583 | */ | |
584 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | |
585 | struct nd_mapping *nd_mapping, resource_size_t *overlap) | |
586 | { | |
587 | resource_size_t map_start, map_end, busy = 0, available, blk_start; | |
588 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
589 | struct resource *res; | |
590 | const char *reason; | |
591 | ||
592 | if (!ndd) | |
593 | return 0; | |
594 | ||
595 | map_start = nd_mapping->start; | |
596 | map_end = map_start + nd_mapping->size - 1; | |
597 | blk_start = max(map_start, map_end + 1 - *overlap); | |
a1f3e4d6 | 598 | for_each_dpa_resource(ndd, res) { |
bf9bccc1 DW |
599 | if (res->start >= map_start && res->start < map_end) { |
600 | if (strncmp(res->name, "blk", 3) == 0) | |
a1f3e4d6 DW |
601 | blk_start = min(blk_start, |
602 | max(map_start, res->start)); | |
603 | else if (res->end > map_end) { | |
bf9bccc1 DW |
604 | reason = "misaligned to iset"; |
605 | goto err; | |
a1f3e4d6 | 606 | } else |
bf9bccc1 | 607 | busy += resource_size(res); |
bf9bccc1 DW |
608 | } else if (res->end >= map_start && res->end <= map_end) { |
609 | if (strncmp(res->name, "blk", 3) == 0) { | |
610 | /* | |
611 | * If a BLK allocation overlaps the start of | |
612 | * PMEM the entire interleave set may now only | |
613 | * be used for BLK. | |
614 | */ | |
615 | blk_start = map_start; | |
a1f3e4d6 DW |
616 | } else |
617 | busy += resource_size(res); | |
bf9bccc1 DW |
618 | } else if (map_start > res->start && map_start < res->end) { |
619 | /* total eclipse of the mapping */ | |
620 | busy += nd_mapping->size; | |
621 | blk_start = map_start; | |
622 | } | |
a1f3e4d6 | 623 | } |
bf9bccc1 DW |
624 | |
625 | *overlap = map_end + 1 - blk_start; | |
626 | available = blk_start - map_start; | |
627 | if (busy < available) | |
628 | return available - busy; | |
629 | return 0; | |
630 | ||
631 | err: | |
bf9bccc1 DW |
632 | nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); |
633 | return 0; | |
634 | } | |
635 | ||
4a826c83 DW |
636 | void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res) |
637 | { | |
638 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
639 | kfree(res->name); | |
640 | __release_region(&ndd->dpa, res->start, resource_size(res)); | |
641 | } | |
642 | ||
643 | struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, | |
644 | struct nd_label_id *label_id, resource_size_t start, | |
645 | resource_size_t n) | |
646 | { | |
647 | char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL); | |
648 | struct resource *res; | |
649 | ||
650 | if (!name) | |
651 | return NULL; | |
652 | ||
653 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
654 | res = __request_region(&ndd->dpa, start, n, name, 0); | |
655 | if (!res) | |
656 | kfree(name); | |
657 | return res; | |
658 | } | |
659 | ||
bf9bccc1 DW |
660 | /** |
661 | * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id | |
662 | * @nvdimm: container of dpa-resource-root + labels | |
663 | * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid> | |
664 | */ | |
665 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, | |
666 | struct nd_label_id *label_id) | |
667 | { | |
668 | resource_size_t allocated = 0; | |
669 | struct resource *res; | |
670 | ||
671 | for_each_dpa_resource(ndd, res) | |
672 | if (strcmp(res->name, label_id->id) == 0) | |
673 | allocated += resource_size(res); | |
674 | ||
675 | return allocated; | |
676 | } | |
677 | ||
4d88a97a DW |
678 | static int count_dimms(struct device *dev, void *c) |
679 | { | |
680 | int *count = c; | |
681 | ||
682 | if (is_nvdimm(dev)) | |
683 | (*count)++; | |
684 | return 0; | |
685 | } | |
686 | ||
687 | int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count) | |
688 | { | |
689 | int count = 0; | |
690 | /* Flush any possible dimm registration failures */ | |
691 | nd_synchronize(); | |
692 | ||
693 | device_for_each_child(&nvdimm_bus->dev, &count, count_dimms); | |
426824d6 | 694 | dev_dbg(&nvdimm_bus->dev, "count: %d\n", count); |
4d88a97a DW |
695 | if (count != dimm_count) |
696 | return -ENXIO; | |
697 | return 0; | |
698 | } | |
699 | EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count); | |
b354aba0 DW |
700 | |
701 | void __exit nvdimm_devs_exit(void) | |
702 | { | |
703 | ida_destroy(&dimm_ida); | |
704 | } |