Commit | Line | Data |
---|---|---|
8dd2bc0f BW |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ | |
cc2a4878 | 3 | #include <linux/debugfs.h> |
8dd2bc0f BW |
4 | #include <linux/device.h> |
5 | #include <linux/module.h> | |
6 | #include <linux/pci.h> | |
7 | ||
8 | #include "cxlmem.h" | |
9 | #include "cxlpci.h" | |
10 | ||
11 | /** | |
12 | * DOC: cxl mem | |
13 | * | |
14 | * CXL memory endpoint devices and switches are CXL capable devices that are | |
15 | * participating in CXL.mem protocol. Their functionality builds on top of the | |
16 | * CXL.io protocol that allows enumerating and configuring components via | |
17 | * standard PCI mechanisms. | |
18 | * | |
19 | * The cxl_mem driver owns kicking off the enumeration of this CXL.mem | |
20 | * capability. With the detection of a CXL capable endpoint, the driver will | |
21 | * walk up to find the platform specific port it is connected to, and determine | |
22 | * if there are intervening switches in the path. If there are switches, a | |
23 | * secondary action is to enumerate those (implemented in cxl_core). Finally the | |
24 | * cxl_mem driver adds the device it is bound to as a CXL endpoint-port for use | |
25 | * in higher level operations. | |
26 | */ | |
27 | ||
9ea4dcf4 DW |
28 | static void enable_suspend(void *data) |
29 | { | |
30 | cxl_mem_active_dec(); | |
31 | } | |
32 | ||
cc2a4878 DW |
33 | static void remove_debugfs(void *dentry) |
34 | { | |
35 | debugfs_remove_recursive(dentry); | |
36 | } | |
37 | ||
38 | static int cxl_mem_dpa_show(struct seq_file *file, void *data) | |
39 | { | |
40 | struct device *dev = file->private; | |
41 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev); | |
42 | ||
43 | cxl_dpa_debug(file, cxlmd->cxlds); | |
44 | ||
45 | return 0; | |
46 | } | |
47 | ||
0a19bfc8 | 48 | static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd, |
7592d935 DW |
49 | struct cxl_dport *parent_dport) |
50 | { | |
51 | struct cxl_port *parent_port = parent_dport->port; | |
52 | struct cxl_dev_state *cxlds = cxlmd->cxlds; | |
53 | struct cxl_port *endpoint, *iter, *down; | |
0a19bfc8 | 54 | resource_size_t component_reg_phys; |
7592d935 DW |
55 | int rc; |
56 | ||
57 | /* | |
58 | * Now that the path to the root is established record all the | |
59 | * intervening ports in the chain. | |
60 | */ | |
61 | for (iter = parent_port, down = NULL; !is_cxl_root(iter); | |
62 | down = iter, iter = to_cxl_port(iter->dev.parent)) { | |
63 | struct cxl_ep *ep; | |
64 | ||
65 | ep = cxl_ep_load(iter, cxlmd); | |
66 | ep->next = down; | |
67 | } | |
68 | ||
0a19bfc8 DW |
69 | /* |
70 | * The component registers for an RCD might come from the | |
71 | * host-bridge RCRB if they are not already mapped via the | |
72 | * typical register locator mechanism. | |
73 | */ | |
74 | if (parent_dport->rch && cxlds->component_reg_phys == CXL_RESOURCE_NONE) | |
75 | component_reg_phys = cxl_rcrb_to_component( | |
76 | &cxlmd->dev, parent_dport->rcrb, CXL_RCRB_UPSTREAM); | |
77 | else | |
78 | component_reg_phys = cxlds->component_reg_phys; | |
79 | endpoint = devm_cxl_add_port(host, &cxlmd->dev, component_reg_phys, | |
80 | parent_dport); | |
7592d935 DW |
81 | if (IS_ERR(endpoint)) |
82 | return PTR_ERR(endpoint); | |
83 | ||
84 | rc = cxl_endpoint_autoremove(cxlmd, endpoint); | |
85 | if (rc) | |
86 | return rc; | |
87 | ||
88 | if (!endpoint->dev.driver) { | |
89 | dev_err(&cxlmd->dev, "%s failed probe\n", | |
90 | dev_name(&endpoint->dev)); | |
91 | return -ENXIO; | |
92 | } | |
93 | ||
94 | return 0; | |
95 | } | |
96 | ||
8dd2bc0f BW |
97 | static int cxl_mem_probe(struct device *dev) |
98 | { | |
99 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev); | |
f17b558d | 100 | struct cxl_dev_state *cxlds = cxlmd->cxlds; |
0a19bfc8 | 101 | struct device *endpoint_parent; |
8dd2bc0f | 102 | struct cxl_port *parent_port; |
1b58b4ca | 103 | struct cxl_dport *dport; |
cc2a4878 | 104 | struct dentry *dentry; |
8dd2bc0f BW |
105 | int rc; |
106 | ||
107 | /* | |
108 | * Someone is trying to reattach this device after it lost its port | |
109 | * connection (an endpoint port previously registered by this memdev was | |
110 | * disabled). This racy check is ok because if the port is still gone, | |
111 | * no harm done, and if the port hierarchy comes back it will re-trigger | |
112 | * this probe. Port rescan and memdev detach work share the same | |
113 | * single-threaded workqueue. | |
114 | */ | |
115 | if (work_pending(&cxlmd->detach_work)) | |
116 | return -EBUSY; | |
117 | ||
cc2a4878 DW |
118 | dentry = cxl_debugfs_create_dir(dev_name(dev)); |
119 | debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show); | |
120 | rc = devm_add_action_or_reset(dev, remove_debugfs, dentry); | |
121 | if (rc) | |
122 | return rc; | |
123 | ||
8dd2bc0f BW |
124 | rc = devm_cxl_enumerate_ports(cxlmd); |
125 | if (rc) | |
126 | return rc; | |
127 | ||
1b58b4ca | 128 | parent_port = cxl_mem_find_port(cxlmd, &dport); |
8dd2bc0f BW |
129 | if (!parent_port) { |
130 | dev_err(dev, "CXL port topology not found\n"); | |
131 | return -ENXIO; | |
132 | } | |
133 | ||
0a19bfc8 DW |
134 | if (dport->rch) |
135 | endpoint_parent = parent_port->uport; | |
136 | else | |
137 | endpoint_parent = &parent_port->dev; | |
138 | ||
139 | device_lock(endpoint_parent); | |
140 | if (!endpoint_parent->driver) { | |
8dd2bc0f | 141 | dev_err(dev, "CXL port topology %s not enabled\n", |
0a19bfc8 | 142 | dev_name(endpoint_parent)); |
8dd2bc0f | 143 | rc = -ENXIO; |
76a4121e | 144 | goto unlock; |
8dd2bc0f BW |
145 | } |
146 | ||
0a19bfc8 | 147 | rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport); |
76a4121e | 148 | unlock: |
0a19bfc8 | 149 | device_unlock(endpoint_parent); |
8dd2bc0f | 150 | put_device(&parent_port->dev); |
76a4121e DW |
151 | if (rc) |
152 | return rc; | |
9ea4dcf4 | 153 | |
f17b558d DW |
154 | if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) { |
155 | rc = devm_cxl_add_nvdimm(cxlmd); | |
156 | if (rc == -ENODEV) | |
157 | dev_info(dev, "PMEM disabled by platform\n"); | |
158 | else | |
159 | return rc; | |
160 | } | |
161 | ||
9ea4dcf4 DW |
162 | /* |
163 | * The kernel may be operating out of CXL memory on this device, | |
164 | * there is no spec defined way to determine whether this device | |
165 | * preserves contents over suspend, and there is no simple way | |
166 | * to arrange for the suspend image to avoid CXL memory which | |
167 | * would setup a circular dependency between PCI resume and save | |
168 | * state restoration. | |
169 | * | |
170 | * TODO: support suspend when all the regions this device is | |
171 | * hosting are locked and covered by the system address map, | |
172 | * i.e. platform firmware owns restoring the HDM configuration | |
173 | * that it locked. | |
174 | */ | |
175 | cxl_mem_active_inc(); | |
176 | return devm_add_action_or_reset(dev, enable_suspend, NULL); | |
8dd2bc0f BW |
177 | } |
178 | ||
179 | static struct cxl_driver cxl_mem_driver = { | |
180 | .name = "cxl_mem", | |
181 | .probe = cxl_mem_probe, | |
182 | .id = CXL_DEVICE_MEMORY_EXPANDER, | |
183 | }; | |
184 | ||
185 | module_cxl_driver(cxl_mem_driver); | |
186 | ||
187 | MODULE_LICENSE("GPL v2"); | |
188 | MODULE_IMPORT_NS(CXL); | |
189 | MODULE_ALIAS_CXL(CXL_DEVICE_MEMORY_EXPANDER); | |
190 | /* | |
191 | * create_endpoint() wants to validate port driver attach immediately after | |
192 | * endpoint registration. | |
193 | */ | |
194 | MODULE_SOFTDEP("pre: cxl_port"); |