Commit | Line | Data |
---|---|---|
8dd2bc0f BW |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ | |
cc2a4878 | 3 | #include <linux/debugfs.h> |
8dd2bc0f BW |
4 | #include <linux/device.h> |
5 | #include <linux/module.h> | |
6 | #include <linux/pci.h> | |
7 | ||
8 | #include "cxlmem.h" | |
9 | #include "cxlpci.h" | |
10 | ||
11 | /** | |
12 | * DOC: cxl mem | |
13 | * | |
14 | * CXL memory endpoint devices and switches are CXL capable devices that are | |
15 | * participating in CXL.mem protocol. Their functionality builds on top of the | |
16 | * CXL.io protocol that allows enumerating and configuring components via | |
17 | * standard PCI mechanisms. | |
18 | * | |
19 | * The cxl_mem driver owns kicking off the enumeration of this CXL.mem | |
20 | * capability. With the detection of a CXL capable endpoint, the driver will | |
21 | * walk up to find the platform specific port it is connected to, and determine | |
22 | * if there are intervening switches in the path. If there are switches, a | |
23 | * secondary action is to enumerate those (implemented in cxl_core). Finally the | |
24 | * cxl_mem driver adds the device it is bound to as a CXL endpoint-port for use | |
25 | * in higher level operations. | |
26 | */ | |
27 | ||
9ea4dcf4 DW |
28 | static void enable_suspend(void *data) |
29 | { | |
30 | cxl_mem_active_dec(); | |
31 | } | |
32 | ||
cc2a4878 DW |
33 | static void remove_debugfs(void *dentry) |
34 | { | |
35 | debugfs_remove_recursive(dentry); | |
36 | } | |
37 | ||
38 | static int cxl_mem_dpa_show(struct seq_file *file, void *data) | |
39 | { | |
40 | struct device *dev = file->private; | |
41 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev); | |
42 | ||
43 | cxl_dpa_debug(file, cxlmd->cxlds); | |
44 | ||
45 | return 0; | |
46 | } | |
47 | ||
0a19bfc8 | 48 | static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd, |
7592d935 DW |
49 | struct cxl_dport *parent_dport) |
50 | { | |
51 | struct cxl_port *parent_port = parent_dport->port; | |
7592d935 DW |
52 | struct cxl_port *endpoint, *iter, *down; |
53 | int rc; | |
54 | ||
55 | /* | |
56 | * Now that the path to the root is established record all the | |
57 | * intervening ports in the chain. | |
58 | */ | |
59 | for (iter = parent_port, down = NULL; !is_cxl_root(iter); | |
60 | down = iter, iter = to_cxl_port(iter->dev.parent)) { | |
61 | struct cxl_ep *ep; | |
62 | ||
63 | ep = cxl_ep_load(iter, cxlmd); | |
64 | ep->next = down; | |
65 | } | |
66 | ||
8ce520fd RR |
67 | /* Note: endpoint port component registers are derived from @cxlds */ |
68 | endpoint = devm_cxl_add_port(host, &cxlmd->dev, CXL_RESOURCE_NONE, | |
0a19bfc8 | 69 | parent_dport); |
7592d935 DW |
70 | if (IS_ERR(endpoint)) |
71 | return PTR_ERR(endpoint); | |
72 | ||
73 | rc = cxl_endpoint_autoremove(cxlmd, endpoint); | |
74 | if (rc) | |
75 | return rc; | |
76 | ||
77 | if (!endpoint->dev.driver) { | |
78 | dev_err(&cxlmd->dev, "%s failed probe\n", | |
79 | dev_name(&endpoint->dev)); | |
80 | return -ENXIO; | |
81 | } | |
82 | ||
83 | return 0; | |
84 | } | |
85 | ||
50d527f5 AS |
86 | static int cxl_debugfs_poison_inject(void *data, u64 dpa) |
87 | { | |
88 | struct cxl_memdev *cxlmd = data; | |
89 | ||
90 | return cxl_inject_poison(cxlmd, dpa); | |
91 | } | |
92 | ||
93 | DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_inject_fops, NULL, | |
94 | cxl_debugfs_poison_inject, "%llx\n"); | |
95 | ||
96 | static int cxl_debugfs_poison_clear(void *data, u64 dpa) | |
97 | { | |
98 | struct cxl_memdev *cxlmd = data; | |
99 | ||
100 | return cxl_clear_poison(cxlmd, dpa); | |
101 | } | |
102 | ||
103 | DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL, | |
104 | cxl_debugfs_poison_clear, "%llx\n"); | |
105 | ||
8dd2bc0f BW |
106 | static int cxl_mem_probe(struct device *dev) |
107 | { | |
108 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev); | |
59f8d151 | 109 | struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); |
f17b558d | 110 | struct cxl_dev_state *cxlds = cxlmd->cxlds; |
0a19bfc8 | 111 | struct device *endpoint_parent; |
8dd2bc0f | 112 | struct cxl_port *parent_port; |
1b58b4ca | 113 | struct cxl_dport *dport; |
cc2a4878 | 114 | struct dentry *dentry; |
8dd2bc0f BW |
115 | int rc; |
116 | ||
e764f122 DJ |
117 | if (!cxlds->media_ready) |
118 | return -EBUSY; | |
119 | ||
8dd2bc0f BW |
120 | /* |
121 | * Someone is trying to reattach this device after it lost its port | |
122 | * connection (an endpoint port previously registered by this memdev was | |
123 | * disabled). This racy check is ok because if the port is still gone, | |
124 | * no harm done, and if the port hierarchy comes back it will re-trigger | |
125 | * this probe. Port rescan and memdev detach work share the same | |
126 | * single-threaded workqueue. | |
127 | */ | |
128 | if (work_pending(&cxlmd->detach_work)) | |
129 | return -EBUSY; | |
130 | ||
cc2a4878 DW |
131 | dentry = cxl_debugfs_create_dir(dev_name(dev)); |
132 | debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show); | |
50d527f5 | 133 | |
59f8d151 | 134 | if (test_bit(CXL_POISON_ENABLED_INJECT, mds->poison.enabled_cmds)) |
50d527f5 AS |
135 | debugfs_create_file("inject_poison", 0200, dentry, cxlmd, |
136 | &cxl_poison_inject_fops); | |
59f8d151 | 137 | if (test_bit(CXL_POISON_ENABLED_CLEAR, mds->poison.enabled_cmds)) |
50d527f5 AS |
138 | debugfs_create_file("clear_poison", 0200, dentry, cxlmd, |
139 | &cxl_poison_clear_fops); | |
140 | ||
cc2a4878 DW |
141 | rc = devm_add_action_or_reset(dev, remove_debugfs, dentry); |
142 | if (rc) | |
143 | return rc; | |
144 | ||
8dd2bc0f BW |
145 | rc = devm_cxl_enumerate_ports(cxlmd); |
146 | if (rc) | |
147 | return rc; | |
148 | ||
1b58b4ca | 149 | parent_port = cxl_mem_find_port(cxlmd, &dport); |
8dd2bc0f BW |
150 | if (!parent_port) { |
151 | dev_err(dev, "CXL port topology not found\n"); | |
152 | return -ENXIO; | |
153 | } | |
154 | ||
0a19bfc8 | 155 | if (dport->rch) |
7481653d | 156 | endpoint_parent = parent_port->uport_dev; |
0a19bfc8 DW |
157 | else |
158 | endpoint_parent = &parent_port->dev; | |
159 | ||
f05fd10d RR |
160 | cxl_setup_parent_dport(dev, dport); |
161 | ||
0a19bfc8 DW |
162 | device_lock(endpoint_parent); |
163 | if (!endpoint_parent->driver) { | |
8dd2bc0f | 164 | dev_err(dev, "CXL port topology %s not enabled\n", |
0a19bfc8 | 165 | dev_name(endpoint_parent)); |
8dd2bc0f | 166 | rc = -ENXIO; |
76a4121e | 167 | goto unlock; |
8dd2bc0f BW |
168 | } |
169 | ||
0a19bfc8 | 170 | rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport); |
76a4121e | 171 | unlock: |
0a19bfc8 | 172 | device_unlock(endpoint_parent); |
8dd2bc0f | 173 | put_device(&parent_port->dev); |
76a4121e DW |
174 | if (rc) |
175 | return rc; | |
9ea4dcf4 | 176 | |
f17b558d DW |
177 | if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) { |
178 | rc = devm_cxl_add_nvdimm(cxlmd); | |
179 | if (rc == -ENODEV) | |
180 | dev_info(dev, "PMEM disabled by platform\n"); | |
181 | else | |
182 | return rc; | |
183 | } | |
184 | ||
9ea4dcf4 DW |
185 | /* |
186 | * The kernel may be operating out of CXL memory on this device, | |
187 | * there is no spec defined way to determine whether this device | |
188 | * preserves contents over suspend, and there is no simple way | |
189 | * to arrange for the suspend image to avoid CXL memory which | |
190 | * would setup a circular dependency between PCI resume and save | |
191 | * state restoration. | |
192 | * | |
193 | * TODO: support suspend when all the regions this device is | |
194 | * hosting are locked and covered by the system address map, | |
195 | * i.e. platform firmware owns restoring the HDM configuration | |
196 | * that it locked. | |
197 | */ | |
198 | cxl_mem_active_inc(); | |
199 | return devm_add_action_or_reset(dev, enable_suspend, NULL); | |
8dd2bc0f BW |
200 | } |
201 | ||
7ff6ad10 AS |
202 | static ssize_t trigger_poison_list_store(struct device *dev, |
203 | struct device_attribute *attr, | |
204 | const char *buf, size_t len) | |
205 | { | |
206 | bool trigger; | |
207 | int rc; | |
208 | ||
209 | if (kstrtobool(buf, &trigger) || !trigger) | |
210 | return -EINVAL; | |
211 | ||
212 | rc = cxl_trigger_poison_list(to_cxl_memdev(dev)); | |
213 | ||
214 | return rc ? rc : len; | |
215 | } | |
216 | static DEVICE_ATTR_WO(trigger_poison_list); | |
217 | ||
42834b17 DJ |
218 | static ssize_t ram_qos_class_show(struct device *dev, |
219 | struct device_attribute *attr, char *buf) | |
220 | { | |
221 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev); | |
222 | struct cxl_dev_state *cxlds = cxlmd->cxlds; | |
223 | struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); | |
224 | struct cxl_dpa_perf *dpa_perf; | |
225 | ||
226 | if (!dev->driver) | |
227 | return -ENOENT; | |
228 | ||
229 | if (list_empty(&mds->ram_perf_list)) | |
230 | return -ENOENT; | |
231 | ||
232 | dpa_perf = list_first_entry(&mds->ram_perf_list, struct cxl_dpa_perf, | |
233 | list); | |
234 | ||
235 | return sysfs_emit(buf, "%d\n", dpa_perf->qos_class); | |
236 | } | |
237 | ||
238 | static struct device_attribute dev_attr_ram_qos_class = | |
239 | __ATTR(qos_class, 0444, ram_qos_class_show, NULL); | |
240 | ||
241 | static ssize_t pmem_qos_class_show(struct device *dev, | |
242 | struct device_attribute *attr, char *buf) | |
243 | { | |
244 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev); | |
245 | struct cxl_dev_state *cxlds = cxlmd->cxlds; | |
246 | struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); | |
247 | struct cxl_dpa_perf *dpa_perf; | |
248 | ||
249 | if (!dev->driver) | |
250 | return -ENOENT; | |
251 | ||
252 | if (list_empty(&mds->pmem_perf_list)) | |
253 | return -ENOENT; | |
254 | ||
255 | dpa_perf = list_first_entry(&mds->pmem_perf_list, struct cxl_dpa_perf, | |
256 | list); | |
257 | ||
258 | return sysfs_emit(buf, "%d\n", dpa_perf->qos_class); | |
259 | } | |
260 | ||
261 | static struct device_attribute dev_attr_pmem_qos_class = | |
262 | __ATTR(qos_class, 0444, pmem_qos_class_show, NULL); | |
263 | ||
7ff6ad10 AS |
264 | static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n) |
265 | { | |
42834b17 DJ |
266 | struct device *dev = kobj_to_dev(kobj); |
267 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev); | |
268 | struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); | |
7ff6ad10 | 269 | |
42834b17 | 270 | if (a == &dev_attr_trigger_poison_list.attr) |
7ff6ad10 | 271 | if (!test_bit(CXL_POISON_ENABLED_LIST, |
59f8d151 | 272 | mds->poison.enabled_cmds)) |
7ff6ad10 | 273 | return 0; |
42834b17 DJ |
274 | |
275 | if (a == &dev_attr_pmem_qos_class.attr) | |
276 | if (list_empty(&mds->pmem_perf_list)) | |
277 | return 0; | |
278 | ||
279 | if (a == &dev_attr_ram_qos_class.attr) | |
280 | if (list_empty(&mds->ram_perf_list)) | |
281 | return 0; | |
282 | ||
7ff6ad10 AS |
283 | return a->mode; |
284 | } | |
285 | ||
286 | static struct attribute *cxl_mem_attrs[] = { | |
287 | &dev_attr_trigger_poison_list.attr, | |
42834b17 DJ |
288 | &dev_attr_ram_qos_class.attr, |
289 | &dev_attr_pmem_qos_class.attr, | |
7ff6ad10 AS |
290 | NULL |
291 | }; | |
292 | ||
293 | static struct attribute_group cxl_mem_group = { | |
294 | .attrs = cxl_mem_attrs, | |
295 | .is_visible = cxl_mem_visible, | |
296 | }; | |
297 | ||
298 | __ATTRIBUTE_GROUPS(cxl_mem); | |
299 | ||
8dd2bc0f BW |
300 | static struct cxl_driver cxl_mem_driver = { |
301 | .name = "cxl_mem", | |
302 | .probe = cxl_mem_probe, | |
303 | .id = CXL_DEVICE_MEMORY_EXPANDER, | |
7ff6ad10 AS |
304 | .drv = { |
305 | .dev_groups = cxl_mem_groups, | |
306 | }, | |
8dd2bc0f BW |
307 | }; |
308 | ||
309 | module_cxl_driver(cxl_mem_driver); | |
310 | ||
311 | MODULE_LICENSE("GPL v2"); | |
312 | MODULE_IMPORT_NS(CXL); | |
313 | MODULE_ALIAS_CXL(CXL_DEVICE_MEMORY_EXPANDER); | |
314 | /* | |
315 | * create_endpoint() wants to validate port driver attach immediately after | |
316 | * endpoint registration. | |
317 | */ | |
318 | MODULE_SOFTDEP("pre: cxl_port"); |