Commit | Line | Data |
---|---|---|
e6dfb2de DW |
1 | /* |
2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
4d88a97a | 14 | #include <linux/vmalloc.h> |
e6dfb2de | 15 | #include <linux/device.h> |
62232e45 | 16 | #include <linux/ndctl.h> |
e6dfb2de DW |
17 | #include <linux/slab.h> |
18 | #include <linux/io.h> | |
19 | #include <linux/fs.h> | |
20 | #include <linux/mm.h> | |
21 | #include "nd-core.h" | |
4d88a97a | 22 | #include "nd.h" |
e6dfb2de DW |
23 | |
24 | static DEFINE_IDA(dimm_ida); | |
25 | ||
4d88a97a DW |
26 | /* |
27 | * Retrieve bus and dimm handle and return if this bus supports | |
28 | * get_config_data commands | |
29 | */ | |
30 | static int __validate_dimm(struct nvdimm_drvdata *ndd) | |
31 | { | |
32 | struct nvdimm *nvdimm; | |
33 | ||
34 | if (!ndd) | |
35 | return -EINVAL; | |
36 | ||
37 | nvdimm = to_nvdimm(ndd->dev); | |
38 | ||
39 | if (!nvdimm->dsm_mask) | |
40 | return -ENXIO; | |
41 | if (!test_bit(ND_CMD_GET_CONFIG_DATA, nvdimm->dsm_mask)) | |
42 | return -ENXIO; | |
43 | ||
44 | return 0; | |
45 | } | |
46 | ||
47 | static int validate_dimm(struct nvdimm_drvdata *ndd) | |
48 | { | |
49 | int rc = __validate_dimm(ndd); | |
50 | ||
51 | if (rc && ndd) | |
52 | dev_dbg(ndd->dev, "%pf: %s error: %d\n", | |
53 | __builtin_return_address(0), __func__, rc); | |
54 | return rc; | |
55 | } | |
56 | ||
57 | /** | |
58 | * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area | |
59 | * @nvdimm: dimm to initialize | |
60 | */ | |
61 | int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) | |
62 | { | |
63 | struct nd_cmd_get_config_size *cmd = &ndd->nsarea; | |
64 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
65 | struct nvdimm_bus_descriptor *nd_desc; | |
66 | int rc = validate_dimm(ndd); | |
67 | ||
68 | if (rc) | |
69 | return rc; | |
70 | ||
71 | if (cmd->config_size) | |
72 | return 0; /* already valid */ | |
73 | ||
74 | memset(cmd, 0, sizeof(*cmd)); | |
75 | nd_desc = nvdimm_bus->nd_desc; | |
76 | return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | |
77 | ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd)); | |
78 | } | |
79 | ||
80 | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) | |
81 | { | |
82 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
83 | struct nd_cmd_get_config_data_hdr *cmd; | |
84 | struct nvdimm_bus_descriptor *nd_desc; | |
85 | int rc = validate_dimm(ndd); | |
86 | u32 max_cmd_size, config_size; | |
87 | size_t offset; | |
88 | ||
89 | if (rc) | |
90 | return rc; | |
91 | ||
92 | if (ndd->data) | |
93 | return 0; | |
94 | ||
4a826c83 DW |
95 | if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 |
96 | || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) { | |
97 | dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n", | |
98 | ndd->nsarea.max_xfer, ndd->nsarea.config_size); | |
4d88a97a | 99 | return -ENXIO; |
4a826c83 | 100 | } |
4d88a97a DW |
101 | |
102 | ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL); | |
103 | if (!ndd->data) | |
104 | ndd->data = vmalloc(ndd->nsarea.config_size); | |
105 | ||
106 | if (!ndd->data) | |
107 | return -ENOMEM; | |
108 | ||
109 | max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer); | |
110 | cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL); | |
111 | if (!cmd) | |
112 | return -ENOMEM; | |
113 | ||
114 | nd_desc = nvdimm_bus->nd_desc; | |
115 | for (config_size = ndd->nsarea.config_size, offset = 0; | |
116 | config_size; config_size -= cmd->in_length, | |
117 | offset += cmd->in_length) { | |
118 | cmd->in_length = min(config_size, max_cmd_size); | |
119 | cmd->in_offset = offset; | |
120 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | |
121 | ND_CMD_GET_CONFIG_DATA, cmd, | |
122 | cmd->in_length + sizeof(*cmd)); | |
123 | if (rc || cmd->status) { | |
124 | rc = -ENXIO; | |
125 | break; | |
126 | } | |
127 | memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); | |
128 | } | |
129 | dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc); | |
130 | kfree(cmd); | |
131 | ||
132 | return rc; | |
133 | } | |
134 | ||
e6dfb2de DW |
135 | static void nvdimm_release(struct device *dev) |
136 | { | |
137 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
138 | ||
139 | ida_simple_remove(&dimm_ida, nvdimm->id); | |
140 | kfree(nvdimm); | |
141 | } | |
142 | ||
143 | static struct device_type nvdimm_device_type = { | |
144 | .name = "nvdimm", | |
145 | .release = nvdimm_release, | |
146 | }; | |
147 | ||
62232e45 | 148 | bool is_nvdimm(struct device *dev) |
e6dfb2de DW |
149 | { |
150 | return dev->type == &nvdimm_device_type; | |
151 | } | |
152 | ||
153 | struct nvdimm *to_nvdimm(struct device *dev) | |
154 | { | |
155 | struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev); | |
156 | ||
157 | WARN_ON(!is_nvdimm(dev)); | |
158 | return nvdimm; | |
159 | } | |
160 | EXPORT_SYMBOL_GPL(to_nvdimm); | |
161 | ||
162 | const char *nvdimm_name(struct nvdimm *nvdimm) | |
163 | { | |
164 | return dev_name(&nvdimm->dev); | |
165 | } | |
166 | EXPORT_SYMBOL_GPL(nvdimm_name); | |
167 | ||
168 | void *nvdimm_provider_data(struct nvdimm *nvdimm) | |
169 | { | |
62232e45 DW |
170 | if (nvdimm) |
171 | return nvdimm->provider_data; | |
172 | return NULL; | |
e6dfb2de DW |
173 | } |
174 | EXPORT_SYMBOL_GPL(nvdimm_provider_data); | |
175 | ||
62232e45 DW |
176 | static ssize_t commands_show(struct device *dev, |
177 | struct device_attribute *attr, char *buf) | |
178 | { | |
179 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
180 | int cmd, len = 0; | |
181 | ||
182 | if (!nvdimm->dsm_mask) | |
183 | return sprintf(buf, "\n"); | |
184 | ||
185 | for_each_set_bit(cmd, nvdimm->dsm_mask, BITS_PER_LONG) | |
186 | len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd)); | |
187 | len += sprintf(buf + len, "\n"); | |
188 | return len; | |
189 | } | |
190 | static DEVICE_ATTR_RO(commands); | |
191 | ||
eaf96153 DW |
192 | static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
193 | char *buf) | |
194 | { | |
195 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
196 | ||
197 | /* | |
198 | * The state may be in the process of changing, userspace should | |
199 | * quiesce probing if it wants a static answer | |
200 | */ | |
201 | nvdimm_bus_lock(dev); | |
202 | nvdimm_bus_unlock(dev); | |
203 | return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy) | |
204 | ? "active" : "idle"); | |
205 | } | |
206 | static DEVICE_ATTR_RO(state); | |
207 | ||
62232e45 | 208 | static struct attribute *nvdimm_attributes[] = { |
eaf96153 | 209 | &dev_attr_state.attr, |
62232e45 DW |
210 | &dev_attr_commands.attr, |
211 | NULL, | |
212 | }; | |
213 | ||
214 | struct attribute_group nvdimm_attribute_group = { | |
215 | .attrs = nvdimm_attributes, | |
216 | }; | |
217 | EXPORT_SYMBOL_GPL(nvdimm_attribute_group); | |
218 | ||
e6dfb2de | 219 | struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, |
62232e45 DW |
220 | const struct attribute_group **groups, unsigned long flags, |
221 | unsigned long *dsm_mask) | |
e6dfb2de DW |
222 | { |
223 | struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); | |
224 | struct device *dev; | |
225 | ||
226 | if (!nvdimm) | |
227 | return NULL; | |
228 | ||
229 | nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL); | |
230 | if (nvdimm->id < 0) { | |
231 | kfree(nvdimm); | |
232 | return NULL; | |
233 | } | |
234 | nvdimm->provider_data = provider_data; | |
235 | nvdimm->flags = flags; | |
62232e45 | 236 | nvdimm->dsm_mask = dsm_mask; |
eaf96153 | 237 | atomic_set(&nvdimm->busy, 0); |
e6dfb2de DW |
238 | dev = &nvdimm->dev; |
239 | dev_set_name(dev, "nmem%d", nvdimm->id); | |
240 | dev->parent = &nvdimm_bus->dev; | |
241 | dev->type = &nvdimm_device_type; | |
62232e45 | 242 | dev->devt = MKDEV(nvdimm_major, nvdimm->id); |
e6dfb2de | 243 | dev->groups = groups; |
4d88a97a | 244 | nd_device_register(dev); |
e6dfb2de DW |
245 | |
246 | return nvdimm; | |
247 | } | |
248 | EXPORT_SYMBOL_GPL(nvdimm_create); | |
4d88a97a | 249 | |
4a826c83 DW |
250 | void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res) |
251 | { | |
252 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
253 | kfree(res->name); | |
254 | __release_region(&ndd->dpa, res->start, resource_size(res)); | |
255 | } | |
256 | ||
257 | struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, | |
258 | struct nd_label_id *label_id, resource_size_t start, | |
259 | resource_size_t n) | |
260 | { | |
261 | char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL); | |
262 | struct resource *res; | |
263 | ||
264 | if (!name) | |
265 | return NULL; | |
266 | ||
267 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
268 | res = __request_region(&ndd->dpa, start, n, name, 0); | |
269 | if (!res) | |
270 | kfree(name); | |
271 | return res; | |
272 | } | |
273 | ||
4d88a97a DW |
274 | static int count_dimms(struct device *dev, void *c) |
275 | { | |
276 | int *count = c; | |
277 | ||
278 | if (is_nvdimm(dev)) | |
279 | (*count)++; | |
280 | return 0; | |
281 | } | |
282 | ||
283 | int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count) | |
284 | { | |
285 | int count = 0; | |
286 | /* Flush any possible dimm registration failures */ | |
287 | nd_synchronize(); | |
288 | ||
289 | device_for_each_child(&nvdimm_bus->dev, &count, count_dimms); | |
290 | dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count); | |
291 | if (count != dimm_count) | |
292 | return -ENXIO; | |
293 | return 0; | |
294 | } | |
295 | EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count); |