Commit | Line | Data |
---|---|---|
8fdcb170 DW |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ | |
3 | #include <linux/libnvdimm.h> | |
60b8f172 | 4 | #include <asm/unaligned.h> |
8fdcb170 DW |
5 | #include <linux/device.h> |
6 | #include <linux/module.h> | |
21083f51 DW |
7 | #include <linux/ndctl.h> |
8 | #include <linux/async.h> | |
8fdcb170 | 9 | #include <linux/slab.h> |
5161a55c | 10 | #include "cxlmem.h" |
8fdcb170 DW |
11 | #include "cxl.h" |
12 | ||
13 | /* | |
14 | * Ordered workqueue for cxl nvdimm device arrival and departure | |
15 | * to coordinate bus rescans when a bridge arrives and trigger remove | |
16 | * operations when the bridge is removed. | |
17 | */ | |
18 | static struct workqueue_struct *cxl_pmem_wq; | |
19 | ||
12f3856a DW |
20 | static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); |
21 | ||
5e2411ae | 22 | static void clear_exclusive(void *cxlds) |
12f3856a | 23 | { |
5e2411ae | 24 | clear_exclusive_cxl_commands(cxlds, exclusive_cmds); |
12f3856a DW |
25 | } |
26 | ||
21083f51 DW |
27 | static void unregister_nvdimm(void *nvdimm) |
28 | { | |
29 | nvdimm_delete(nvdimm); | |
30 | } | |
31 | ||
21083f51 DW |
32 | static int cxl_nvdimm_probe(struct device *dev) |
33 | { | |
34 | struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); | |
12f3856a | 35 | struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; |
60b8f172 | 36 | unsigned long flags = 0, cmd_mask = 0; |
5e2411ae | 37 | struct cxl_dev_state *cxlds = cxlmd->cxlds; |
21083f51 | 38 | struct cxl_nvdimm_bridge *cxl_nvb; |
21083f51 | 39 | struct nvdimm *nvdimm; |
12f3856a | 40 | int rc; |
21083f51 | 41 | |
7d3eb23c | 42 | cxl_nvb = cxl_find_nvdimm_bridge(cxl_nvd); |
21083f51 DW |
43 | if (!cxl_nvb) |
44 | return -ENXIO; | |
45 | ||
38a34e10 | 46 | device_lock(&cxl_nvb->dev); |
12f3856a DW |
47 | if (!cxl_nvb->nvdimm_bus) { |
48 | rc = -ENXIO; | |
49 | goto out; | |
50 | } | |
51 | ||
5e2411ae IW |
52 | set_exclusive_cxl_commands(cxlds, exclusive_cmds); |
53 | rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds); | |
12f3856a | 54 | if (rc) |
21083f51 DW |
55 | goto out; |
56 | ||
57 | set_bit(NDD_LABELING, &flags); | |
60b8f172 DW |
58 | set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); |
59 | set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); | |
60 | set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); | |
61 | nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, | |
62 | cmd_mask, 0, NULL); | |
12f3856a DW |
63 | if (!nvdimm) { |
64 | rc = -ENOMEM; | |
21083f51 | 65 | goto out; |
12f3856a | 66 | } |
21083f51 | 67 | |
12f3856a | 68 | dev_set_drvdata(dev, nvdimm); |
21083f51 DW |
69 | rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm); |
70 | out: | |
38a34e10 | 71 | device_unlock(&cxl_nvb->dev); |
21083f51 DW |
72 | put_device(&cxl_nvb->dev); |
73 | ||
74 | return rc; | |
75 | } | |
76 | ||
77 | static struct cxl_driver cxl_nvdimm_driver = { | |
78 | .name = "cxl_nvdimm", | |
79 | .probe = cxl_nvdimm_probe, | |
80 | .id = CXL_DEVICE_NVDIMM, | |
81 | }; | |
82 | ||
5e2411ae | 83 | static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds, |
60b8f172 DW |
84 | struct nd_cmd_get_config_size *cmd, |
85 | unsigned int buf_len) | |
86 | { | |
87 | if (sizeof(*cmd) > buf_len) | |
88 | return -EINVAL; | |
89 | ||
90 | *cmd = (struct nd_cmd_get_config_size) { | |
5e2411ae IW |
91 | .config_size = cxlds->lsa_size, |
92 | .max_xfer = cxlds->payload_size, | |
60b8f172 DW |
93 | }; |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
5e2411ae | 98 | static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds, |
60b8f172 DW |
99 | struct nd_cmd_get_config_data_hdr *cmd, |
100 | unsigned int buf_len) | |
101 | { | |
49be6dd8 | 102 | struct cxl_mbox_get_lsa get_lsa; |
60b8f172 DW |
103 | int rc; |
104 | ||
105 | if (sizeof(*cmd) > buf_len) | |
106 | return -EINVAL; | |
107 | if (struct_size(cmd, out_buf, cmd->in_length) > buf_len) | |
108 | return -EINVAL; | |
109 | ||
110 | get_lsa = (struct cxl_mbox_get_lsa) { | |
111 | .offset = cmd->in_offset, | |
112 | .length = cmd->in_length, | |
113 | }; | |
114 | ||
5e2411ae IW |
115 | rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa, |
116 | sizeof(get_lsa), cmd->out_buf, cmd->in_length); | |
60b8f172 DW |
117 | cmd->status = 0; |
118 | ||
119 | return rc; | |
120 | } | |
121 | ||
5e2411ae | 122 | static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds, |
60b8f172 DW |
123 | struct nd_cmd_set_config_hdr *cmd, |
124 | unsigned int buf_len) | |
125 | { | |
49be6dd8 | 126 | struct cxl_mbox_set_lsa *set_lsa; |
60b8f172 DW |
127 | int rc; |
128 | ||
129 | if (sizeof(*cmd) > buf_len) | |
130 | return -EINVAL; | |
131 | ||
132 | /* 4-byte status follows the input data in the payload */ | |
133 | if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len) | |
134 | return -EINVAL; | |
135 | ||
136 | set_lsa = | |
137 | kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL); | |
138 | if (!set_lsa) | |
139 | return -ENOMEM; | |
140 | ||
141 | *set_lsa = (struct cxl_mbox_set_lsa) { | |
142 | .offset = cmd->in_offset, | |
143 | }; | |
144 | memcpy(set_lsa->data, cmd->in_buf, cmd->in_length); | |
145 | ||
5e2411ae IW |
146 | rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa, |
147 | struct_size(set_lsa, data, cmd->in_length), | |
148 | NULL, 0); | |
60b8f172 DW |
149 | |
150 | /* | |
151 | * Set "firmware" status (4-packed bytes at the end of the input | |
152 | * payload. | |
153 | */ | |
154 | put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]); | |
155 | kvfree(set_lsa); | |
156 | ||
157 | return rc; | |
158 | } | |
159 | ||
160 | static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, | |
161 | void *buf, unsigned int buf_len) | |
162 | { | |
163 | struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); | |
164 | unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm); | |
165 | struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; | |
5e2411ae | 166 | struct cxl_dev_state *cxlds = cxlmd->cxlds; |
60b8f172 DW |
167 | |
168 | if (!test_bit(cmd, &cmd_mask)) | |
169 | return -ENOTTY; | |
170 | ||
171 | switch (cmd) { | |
172 | case ND_CMD_GET_CONFIG_SIZE: | |
5e2411ae | 173 | return cxl_pmem_get_config_size(cxlds, buf, buf_len); |
60b8f172 | 174 | case ND_CMD_GET_CONFIG_DATA: |
5e2411ae | 175 | return cxl_pmem_get_config_data(cxlds, buf, buf_len); |
60b8f172 | 176 | case ND_CMD_SET_CONFIG_DATA: |
5e2411ae | 177 | return cxl_pmem_set_config_data(cxlds, buf, buf_len); |
60b8f172 DW |
178 | default: |
179 | return -ENOTTY; | |
180 | } | |
181 | } | |
182 | ||
8fdcb170 DW |
183 | static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc, |
184 | struct nvdimm *nvdimm, unsigned int cmd, void *buf, | |
185 | unsigned int buf_len, int *cmd_rc) | |
186 | { | |
60b8f172 DW |
187 | /* |
188 | * No firmware response to translate, let the transport error | |
189 | * code take precedence. | |
190 | */ | |
191 | *cmd_rc = 0; | |
192 | ||
193 | if (!nvdimm) | |
194 | return -ENOTTY; | |
195 | return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len); | |
8fdcb170 DW |
196 | } |
197 | ||
198 | static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb) | |
199 | { | |
200 | if (cxl_nvb->nvdimm_bus) | |
201 | return true; | |
202 | cxl_nvb->nvdimm_bus = | |
203 | nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc); | |
204 | return cxl_nvb->nvdimm_bus != NULL; | |
205 | } | |
206 | ||
21083f51 | 207 | static int cxl_nvdimm_release_driver(struct device *dev, void *data) |
8fdcb170 | 208 | { |
21083f51 DW |
209 | if (!is_cxl_nvdimm(dev)) |
210 | return 0; | |
211 | device_release_driver(dev); | |
212 | return 0; | |
213 | } | |
214 | ||
215 | static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus) | |
216 | { | |
217 | if (!nvdimm_bus) | |
8fdcb170 | 218 | return; |
21083f51 DW |
219 | |
220 | /* | |
221 | * Set the state of cxl_nvdimm devices to unbound / idle before | |
222 | * nvdimm_bus_unregister() rips the nvdimm objects out from | |
223 | * underneath them. | |
224 | */ | |
225 | bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_release_driver); | |
226 | nvdimm_bus_unregister(nvdimm_bus); | |
8fdcb170 DW |
227 | } |
228 | ||
229 | static void cxl_nvb_update_state(struct work_struct *work) | |
230 | { | |
231 | struct cxl_nvdimm_bridge *cxl_nvb = | |
232 | container_of(work, typeof(*cxl_nvb), state_work); | |
21083f51 DW |
233 | struct nvdimm_bus *victim_bus = NULL; |
234 | bool release = false, rescan = false; | |
8fdcb170 | 235 | |
38a34e10 | 236 | device_lock(&cxl_nvb->dev); |
8fdcb170 DW |
237 | switch (cxl_nvb->state) { |
238 | case CXL_NVB_ONLINE: | |
239 | if (!online_nvdimm_bus(cxl_nvb)) { | |
240 | dev_err(&cxl_nvb->dev, | |
241 | "failed to establish nvdimm bus\n"); | |
242 | release = true; | |
21083f51 DW |
243 | } else |
244 | rescan = true; | |
8fdcb170 DW |
245 | break; |
246 | case CXL_NVB_OFFLINE: | |
247 | case CXL_NVB_DEAD: | |
21083f51 DW |
248 | victim_bus = cxl_nvb->nvdimm_bus; |
249 | cxl_nvb->nvdimm_bus = NULL; | |
8fdcb170 DW |
250 | break; |
251 | default: | |
252 | break; | |
253 | } | |
38a34e10 | 254 | device_unlock(&cxl_nvb->dev); |
8fdcb170 DW |
255 | |
256 | if (release) | |
257 | device_release_driver(&cxl_nvb->dev); | |
21083f51 DW |
258 | if (rescan) { |
259 | int rc = bus_rescan_devices(&cxl_bus_type); | |
260 | ||
261 | dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc); | |
262 | } | |
263 | offline_nvdimm_bus(victim_bus); | |
8fdcb170 DW |
264 | |
265 | put_device(&cxl_nvb->dev); | |
266 | } | |
267 | ||
08b9e0ab DW |
268 | static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb) |
269 | { | |
270 | /* | |
271 | * Take a reference that the workqueue will drop if new work | |
272 | * gets queued. | |
273 | */ | |
274 | get_device(&cxl_nvb->dev); | |
275 | if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work)) | |
276 | put_device(&cxl_nvb->dev); | |
277 | } | |
278 | ||
8fdcb170 DW |
279 | static void cxl_nvdimm_bridge_remove(struct device *dev) |
280 | { | |
281 | struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); | |
282 | ||
283 | if (cxl_nvb->state == CXL_NVB_ONLINE) | |
284 | cxl_nvb->state = CXL_NVB_OFFLINE; | |
08b9e0ab | 285 | cxl_nvdimm_bridge_state_work(cxl_nvb); |
8fdcb170 DW |
286 | } |
287 | ||
288 | static int cxl_nvdimm_bridge_probe(struct device *dev) | |
289 | { | |
290 | struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); | |
291 | ||
292 | if (cxl_nvb->state == CXL_NVB_DEAD) | |
293 | return -ENXIO; | |
294 | ||
295 | if (cxl_nvb->state == CXL_NVB_NEW) { | |
296 | cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) { | |
297 | .provider_name = "CXL", | |
298 | .module = THIS_MODULE, | |
299 | .ndctl = cxl_pmem_ctl, | |
300 | }; | |
301 | ||
302 | INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state); | |
303 | } | |
304 | ||
305 | cxl_nvb->state = CXL_NVB_ONLINE; | |
08b9e0ab | 306 | cxl_nvdimm_bridge_state_work(cxl_nvb); |
8fdcb170 DW |
307 | |
308 | return 0; | |
309 | } | |
310 | ||
311 | static struct cxl_driver cxl_nvdimm_bridge_driver = { | |
312 | .name = "cxl_nvdimm_bridge", | |
313 | .probe = cxl_nvdimm_bridge_probe, | |
314 | .remove = cxl_nvdimm_bridge_remove, | |
315 | .id = CXL_DEVICE_NVDIMM_BRIDGE, | |
316 | }; | |
317 | ||
53989fad DW |
318 | /* |
319 | * Return all bridges to the CXL_NVB_NEW state to invalidate any | |
320 | * ->state_work referring to the now destroyed cxl_pmem_wq. | |
321 | */ | |
322 | static int cxl_nvdimm_bridge_reset(struct device *dev, void *data) | |
323 | { | |
324 | struct cxl_nvdimm_bridge *cxl_nvb; | |
325 | ||
326 | if (!is_cxl_nvdimm_bridge(dev)) | |
327 | return 0; | |
328 | ||
329 | cxl_nvb = to_cxl_nvdimm_bridge(dev); | |
38a34e10 | 330 | device_lock(dev); |
53989fad | 331 | cxl_nvb->state = CXL_NVB_NEW; |
38a34e10 | 332 | device_unlock(dev); |
53989fad DW |
333 | |
334 | return 0; | |
335 | } | |
336 | ||
337 | static void destroy_cxl_pmem_wq(void) | |
338 | { | |
339 | destroy_workqueue(cxl_pmem_wq); | |
340 | bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_bridge_reset); | |
341 | } | |
342 | ||
8fdcb170 DW |
343 | static __init int cxl_pmem_init(void) |
344 | { | |
345 | int rc; | |
346 | ||
12f3856a DW |
347 | set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds); |
348 | set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds); | |
349 | ||
8fdcb170 DW |
350 | cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0); |
351 | if (!cxl_pmem_wq) | |
352 | return -ENXIO; | |
353 | ||
354 | rc = cxl_driver_register(&cxl_nvdimm_bridge_driver); | |
355 | if (rc) | |
21083f51 DW |
356 | goto err_bridge; |
357 | ||
358 | rc = cxl_driver_register(&cxl_nvdimm_driver); | |
359 | if (rc) | |
360 | goto err_nvdimm; | |
8fdcb170 DW |
361 | |
362 | return 0; | |
363 | ||
21083f51 DW |
364 | err_nvdimm: |
365 | cxl_driver_unregister(&cxl_nvdimm_bridge_driver); | |
366 | err_bridge: | |
53989fad | 367 | destroy_cxl_pmem_wq(); |
8fdcb170 DW |
368 | return rc; |
369 | } | |
370 | ||
371 | static __exit void cxl_pmem_exit(void) | |
372 | { | |
21083f51 | 373 | cxl_driver_unregister(&cxl_nvdimm_driver); |
8fdcb170 | 374 | cxl_driver_unregister(&cxl_nvdimm_bridge_driver); |
53989fad | 375 | destroy_cxl_pmem_wq(); |
8fdcb170 DW |
376 | } |
377 | ||
378 | MODULE_LICENSE("GPL v2"); | |
379 | module_init(cxl_pmem_init); | |
380 | module_exit(cxl_pmem_exit); | |
381 | MODULE_IMPORT_NS(CXL); | |
382 | MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE); | |
21083f51 | 383 | MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM); |