cxl/core: Drop unused io-64-nonatomic-lo-hi.h
[linux-block.git] / drivers / cxl / port.c
CommitLineData
54cdbf84
BW
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3#include <linux/device.h>
4#include <linux/module.h>
5#include <linux/slab.h>
6
7#include "cxlmem.h"
8#include "cxlpci.h"
9
10/**
11 * DOC: cxl port
12 *
13 * The port driver enumerates dport via PCI and scans for HDM
14 * (Host-managed-Device-Memory) decoder resources via the
15 * @component_reg_phys value passed in by the agent that registered the
16 * port. All descendant ports of a CXL root port (described by platform
17 * firmware) are managed in this drivers context. Each driver instance
18 * is responsible for tearing down the driver context of immediate
19 * descendant ports. The locking for this is validated by
20 * CONFIG_PROVE_CXL_LOCKING.
21 *
22 * The primary service this driver provides is presenting APIs to other
23 * drivers to utilize the decoders, and indicating to userspace (via bind
24 * status) the connectivity of the CXL.mem protocol throughout the
25 * PCIe topology.
26 */
27
8dd2bc0f
BW
28static void schedule_detach(void *cxlmd)
29{
30 schedule_cxl_memdev_detach(cxlmd);
31}
32
a32320b7
DW
33static int discover_region(struct device *dev, void *root)
34{
35 struct cxl_endpoint_decoder *cxled;
36 int rc;
37
38 if (!is_endpoint_decoder(dev))
39 return 0;
40
41 cxled = to_cxl_endpoint_decoder(dev);
42 if ((cxled->cxld.flags & CXL_DECODER_F_ENABLE) == 0)
43 return 0;
44
45 if (cxled->state != CXL_DECODER_STATE_AUTO)
46 return 0;
47
48 /*
49 * Region enumeration is opportunistic, if this add-event fails,
50 * continue to the next endpoint decoder.
51 */
52 rc = cxl_add_to_region(root, cxled);
53 if (rc)
54 dev_dbg(dev, "failed to add to region: %#llx-%#llx\n",
55 cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end);
56
57 return 0;
58}
59
32ce3f18 60static int cxl_switch_port_probe(struct cxl_port *port)
54cdbf84 61{
54cdbf84
BW
62 struct cxl_hdm *cxlhdm;
63 int rc;
64
32ce3f18
DW
65 rc = devm_cxl_port_enumerate_dports(port);
66 if (rc < 0)
67 return rc;
fcfbc93c 68
32ce3f18
DW
69 if (rc == 1)
70 return devm_cxl_add_passthrough_decoder(port);
fcfbc93c 71
a5fcd228 72 cxlhdm = devm_cxl_setup_hdm(port, NULL);
fcfbc93c
DW
73 if (IS_ERR(cxlhdm))
74 return PTR_ERR(cxlhdm);
75
a5fcd228 76 return devm_cxl_enumerate_decoders(cxlhdm, NULL);
32ce3f18 77}
8dd2bc0f 78
32ce3f18
DW
79static int cxl_endpoint_port_probe(struct cxl_port *port)
80{
b70c2cf9 81 struct cxl_endpoint_dvsec_info info = { .port = port };
32ce3f18
DW
82 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
83 struct cxl_dev_state *cxlds = cxlmd->cxlds;
84 struct cxl_hdm *cxlhdm;
a32320b7 85 struct cxl_port *root;
32ce3f18 86 int rc;
c9700604 87
a5fcd228
DW
88 rc = cxl_dvsec_rr_decode(cxlds->dev, cxlds->cxl_dvsec, &info);
89 if (rc < 0)
90 return rc;
fcfbc93c 91
4474ce56 92 cxlhdm = devm_cxl_setup_hdm(port, &info);
32ce3f18
DW
93 if (IS_ERR(cxlhdm))
94 return PTR_ERR(cxlhdm);
5e5f4ad5 95
32ce3f18
DW
96 /* Cache the data early to ensure is_visible() works */
97 read_cdat_data(port);
5e5f4ad5 98
32ce3f18
DW
99 get_device(&cxlmd->dev);
100 rc = devm_add_action_or_reset(&port->dev, schedule_detach, cxlmd);
101 if (rc)
102 return rc;
8dd2bc0f 103
a5fcd228 104 rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
32ce3f18
DW
105 if (rc)
106 return rc;
107
108 rc = cxl_await_media_ready(cxlds);
54cdbf84 109 if (rc) {
32ce3f18 110 dev_err(&port->dev, "Media not active (%d)\n", rc);
54cdbf84
BW
111 return rc;
112 }
113
b777e9be 114 rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
a32320b7
DW
115 if (rc)
116 return rc;
117
118 /*
119 * This can't fail in practice as CXL root exit unregisters all
120 * descendant ports and that in turn synchronizes with cxl_port_probe()
121 */
d35b495d 122 root = find_cxl_root(port);
a32320b7
DW
123
124 /*
125 * Now that all endpoint decoders are successfully enumerated, try to
126 * assemble regions from committed decoders
127 */
128 device_for_each_child(&port->dev, root, discover_region);
129 put_device(&root->dev);
130
131 return 0;
32ce3f18
DW
132}
133
134static int cxl_port_probe(struct device *dev)
135{
136 struct cxl_port *port = to_cxl_port(dev);
137
138 if (is_cxl_endpoint(port))
139 return cxl_endpoint_port_probe(port);
140 return cxl_switch_port_probe(port);
54cdbf84
BW
141}
142
c9700604
IW
143static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
144 struct bin_attribute *bin_attr, char *buf,
145 loff_t offset, size_t count)
146{
147 struct device *dev = kobj_to_dev(kobj);
148 struct cxl_port *port = to_cxl_port(dev);
149
150 if (!port->cdat_available)
151 return -ENXIO;
152
153 if (!port->cdat.table)
154 return 0;
155
156 return memory_read_from_buffer(buf, count, &offset,
157 port->cdat.table,
158 port->cdat.length);
159}
160
161static BIN_ATTR_ADMIN_RO(CDAT, 0);
162
163static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
164 struct bin_attribute *attr, int i)
165{
166 struct device *dev = kobj_to_dev(kobj);
167 struct cxl_port *port = to_cxl_port(dev);
168
169 if ((attr == &bin_attr_CDAT) && port->cdat_available)
170 return attr->attr.mode;
171
172 return 0;
173}
174
175static struct bin_attribute *cxl_cdat_bin_attributes[] = {
176 &bin_attr_CDAT,
177 NULL,
178};
179
180static struct attribute_group cxl_cdat_attribute_group = {
181 .bin_attrs = cxl_cdat_bin_attributes,
182 .is_bin_visible = cxl_port_bin_attr_is_visible,
183};
184
185static const struct attribute_group *cxl_port_attribute_groups[] = {
186 &cxl_cdat_attribute_group,
187 NULL,
188};
189
54cdbf84
BW
190static struct cxl_driver cxl_port_driver = {
191 .name = "cxl_port",
192 .probe = cxl_port_probe,
193 .id = CXL_DEVICE_PORT,
c9700604
IW
194 .drv = {
195 .dev_groups = cxl_port_attribute_groups,
196 },
54cdbf84
BW
197};
198
199module_cxl_driver(cxl_port_driver);
200MODULE_LICENSE("GPL v2");
201MODULE_IMPORT_NS(CXL);
202MODULE_ALIAS_CXL(CXL_DEVICE_PORT);