cxl/mem: Add the cxl_mem driver
[linux-2.6-block.git] / drivers / cxl / mem.c
CommitLineData
8dd2bc0f
BW
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3#include <linux/device.h>
4#include <linux/module.h>
5#include <linux/pci.h>
6
7#include "cxlmem.h"
8#include "cxlpci.h"
9
10/**
11 * DOC: cxl mem
12 *
13 * CXL memory endpoint devices and switches are CXL capable devices that are
14 * participating in CXL.mem protocol. Their functionality builds on top of the
15 * CXL.io protocol that allows enumerating and configuring components via
16 * standard PCI mechanisms.
17 *
18 * The cxl_mem driver owns kicking off the enumeration of this CXL.mem
19 * capability. With the detection of a CXL capable endpoint, the driver will
20 * walk up to find the platform specific port it is connected to, and determine
21 * if there are intervening switches in the path. If there are switches, a
22 * secondary action is to enumerate those (implemented in cxl_core). Finally the
23 * cxl_mem driver adds the device it is bound to as a CXL endpoint-port for use
24 * in higher level operations.
25 */
26
27static int wait_for_media(struct cxl_memdev *cxlmd)
28{
29 struct cxl_dev_state *cxlds = cxlmd->cxlds;
30 struct cxl_endpoint_dvsec_info *info = &cxlds->info;
31 int rc;
32
33 if (!info->mem_enabled)
34 return -EBUSY;
35
36 rc = cxlds->wait_media_ready(cxlds);
37 if (rc)
38 return rc;
39
40 /*
41 * We know the device is active, and enabled, if any ranges are non-zero
42 * we'll need to check later before adding the port since that owns the
43 * HDM decoder registers.
44 */
45 return 0;
46}
47
48static int create_endpoint(struct cxl_memdev *cxlmd,
49 struct cxl_port *parent_port)
50{
51 struct cxl_dev_state *cxlds = cxlmd->cxlds;
52 struct cxl_port *endpoint;
53
54 endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
55 cxlds->component_reg_phys, parent_port);
56 if (IS_ERR(endpoint))
57 return PTR_ERR(endpoint);
58
59 dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
60
61 if (!endpoint->dev.driver) {
62 dev_err(&cxlmd->dev, "%s failed probe\n",
63 dev_name(&endpoint->dev));
64 return -ENXIO;
65 }
66
67 return cxl_endpoint_autoremove(cxlmd, endpoint);
68}
69
70/**
71 * cxl_dvsec_decode_init() - Setup HDM decoding for the endpoint
72 * @cxlds: Device state
73 *
74 * Additionally, enables global HDM decoding. Warning: don't call this outside
75 * of probe. Once probe is complete, the port driver owns all access to the HDM
76 * decoder registers.
77 *
78 * Returns: false if DVSEC Ranges are being used instead of HDM
79 * decoders, or if it can not be determined if DVSEC Ranges are in use.
80 * Otherwise, returns true.
81 */
82__mock bool cxl_dvsec_decode_init(struct cxl_dev_state *cxlds)
83{
84 struct cxl_endpoint_dvsec_info *info = &cxlds->info;
85 struct cxl_register_map map;
86 struct cxl_component_reg_map *cmap = &map.component_map;
87 bool global_enable, do_hdm_init = false;
88 void __iomem *crb;
89 u32 global_ctrl;
90
91 /* map hdm decoder */
92 crb = ioremap(cxlds->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
93 if (!crb) {
94 dev_dbg(cxlds->dev, "Failed to map component registers\n");
95 return false;
96 }
97
98 cxl_probe_component_regs(cxlds->dev, crb, cmap);
99 if (!cmap->hdm_decoder.valid) {
100 dev_dbg(cxlds->dev, "Invalid HDM decoder registers\n");
101 goto out;
102 }
103
104 global_ctrl = readl(crb + cmap->hdm_decoder.offset +
105 CXL_HDM_DECODER_CTRL_OFFSET);
106 global_enable = global_ctrl & CXL_HDM_DECODER_ENABLE;
107 if (!global_enable && info->ranges) {
108 dev_dbg(cxlds->dev,
109 "DVSEC ranges already programmed and HDM decoders not enabled.\n");
110 goto out;
111 }
112
113 do_hdm_init = true;
114
115 /*
116 * Permanently (for this boot at least) opt the device into HDM
117 * operation. Individual HDM decoders still need to be enabled after
118 * this point.
119 */
120 if (!global_enable) {
121 dev_dbg(cxlds->dev, "Enabling HDM decode\n");
122 writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
123 crb + cmap->hdm_decoder.offset +
124 CXL_HDM_DECODER_CTRL_OFFSET);
125 }
126
127out:
128 iounmap(crb);
129 return do_hdm_init;
130}
131
132static int cxl_mem_probe(struct device *dev)
133{
134 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
135 struct cxl_dev_state *cxlds = cxlmd->cxlds;
136 struct cxl_port *parent_port;
137 int rc;
138
139 /*
140 * Someone is trying to reattach this device after it lost its port
141 * connection (an endpoint port previously registered by this memdev was
142 * disabled). This racy check is ok because if the port is still gone,
143 * no harm done, and if the port hierarchy comes back it will re-trigger
144 * this probe. Port rescan and memdev detach work share the same
145 * single-threaded workqueue.
146 */
147 if (work_pending(&cxlmd->detach_work))
148 return -EBUSY;
149
150 rc = wait_for_media(cxlmd);
151 if (rc) {
152 dev_err(dev, "Media not active (%d)\n", rc);
153 return rc;
154 }
155
156 /*
157 * If DVSEC ranges are being used instead of HDM decoder registers there
158 * is no use in trying to manage those.
159 */
160 if (!cxl_dvsec_decode_init(cxlds)) {
161 struct cxl_endpoint_dvsec_info *info = &cxlds->info;
162 int i;
163
164 /* */
165 for (i = 0; i < 2; i++) {
166 u64 base, size;
167
168 /*
169 * Give a nice warning to the user that BIOS has really
170 * botched things for them if it didn't place DVSEC
171 * ranges in the memory map.
172 */
173 base = info->dvsec_range[i].start;
174 size = range_len(&info->dvsec_range[i]);
175 if (size && !region_intersects(base, size,
176 IORESOURCE_SYSTEM_RAM,
177 IORES_DESC_NONE)) {
178 dev_err(dev,
179 "DVSEC range %#llx-%#llx must be reserved by BIOS, but isn't\n",
180 base, base + size - 1);
181 }
182 }
183 dev_err(dev,
184 "Active DVSEC range registers in use. Will not bind.\n");
185 return -EBUSY;
186 }
187
188 rc = devm_cxl_enumerate_ports(cxlmd);
189 if (rc)
190 return rc;
191
192 parent_port = cxl_mem_find_port(cxlmd);
193 if (!parent_port) {
194 dev_err(dev, "CXL port topology not found\n");
195 return -ENXIO;
196 }
197
198 cxl_device_lock(&parent_port->dev);
199 if (!parent_port->dev.driver) {
200 dev_err(dev, "CXL port topology %s not enabled\n",
201 dev_name(&parent_port->dev));
202 rc = -ENXIO;
203 goto out;
204 }
205
206 rc = create_endpoint(cxlmd, parent_port);
207out:
208 cxl_device_unlock(&parent_port->dev);
209 put_device(&parent_port->dev);
210 return rc;
211}
212
213static struct cxl_driver cxl_mem_driver = {
214 .name = "cxl_mem",
215 .probe = cxl_mem_probe,
216 .id = CXL_DEVICE_MEMORY_EXPANDER,
217};
218
219module_cxl_driver(cxl_mem_driver);
220
221MODULE_LICENSE("GPL v2");
222MODULE_IMPORT_NS(CXL);
223MODULE_ALIAS_CXL(CXL_DEVICE_MEMORY_EXPANDER);
224/*
225 * create_endpoint() wants to validate port driver attach immediately after
226 * endpoint registration.
227 */
228MODULE_SOFTDEP("pre: cxl_port");