| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ |
| 3 | #include <linux/device.h> |
| 4 | #include <linux/module.h> |
| 5 | #include <linux/slab.h> |
| 6 | |
| 7 | #include "cxlmem.h" |
| 8 | #include "cxlpci.h" |
| 9 | |
| 10 | /** |
| 11 | * DOC: cxl port |
| 12 | * |
| 13 | * The port driver enumerates dport via PCI and scans for HDM |
| 14 | * (Host-managed-Device-Memory) decoder resources via the |
| 15 | * @component_reg_phys value passed in by the agent that registered the |
| 16 | * port. All descendant ports of a CXL root port (described by platform |
| 17 | * firmware) are managed in this drivers context. Each driver instance |
| 18 | * is responsible for tearing down the driver context of immediate |
| 19 | * descendant ports. The locking for this is validated by |
| 20 | * CONFIG_PROVE_CXL_LOCKING. |
| 21 | * |
| 22 | * The primary service this driver provides is presenting APIs to other |
| 23 | * drivers to utilize the decoders, and indicating to userspace (via bind |
| 24 | * status) the connectivity of the CXL.mem protocol throughout the |
| 25 | * PCIe topology. |
| 26 | */ |
| 27 | |
| 28 | static void schedule_detach(void *cxlmd) |
| 29 | { |
| 30 | schedule_cxl_memdev_detach(cxlmd); |
| 31 | } |
| 32 | |
| 33 | static int discover_region(struct device *dev, void *unused) |
| 34 | { |
| 35 | struct cxl_endpoint_decoder *cxled; |
| 36 | int rc; |
| 37 | |
| 38 | if (!is_endpoint_decoder(dev)) |
| 39 | return 0; |
| 40 | |
| 41 | cxled = to_cxl_endpoint_decoder(dev); |
| 42 | if ((cxled->cxld.flags & CXL_DECODER_F_ENABLE) == 0) |
| 43 | return 0; |
| 44 | |
| 45 | if (cxled->state != CXL_DECODER_STATE_AUTO) |
| 46 | return 0; |
| 47 | |
| 48 | /* |
| 49 | * Region enumeration is opportunistic, if this add-event fails, |
| 50 | * continue to the next endpoint decoder. |
| 51 | */ |
| 52 | rc = cxl_add_to_region(cxled); |
| 53 | if (rc) |
| 54 | dev_dbg(dev, "failed to add to region: %#llx-%#llx\n", |
| 55 | cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end); |
| 56 | |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | static int cxl_switch_port_probe(struct cxl_port *port) |
| 61 | { |
| 62 | struct cxl_hdm *cxlhdm; |
| 63 | int rc; |
| 64 | |
| 65 | /* Cache the data early to ensure is_visible() works */ |
| 66 | read_cdat_data(port); |
| 67 | |
| 68 | rc = devm_cxl_port_enumerate_dports(port); |
| 69 | if (rc < 0) |
| 70 | return rc; |
| 71 | |
| 72 | cxl_switch_parse_cdat(port); |
| 73 | |
| 74 | cxlhdm = devm_cxl_setup_hdm(port, NULL); |
| 75 | if (!IS_ERR(cxlhdm)) |
| 76 | return devm_cxl_enumerate_decoders(cxlhdm, NULL); |
| 77 | |
| 78 | if (PTR_ERR(cxlhdm) != -ENODEV) { |
| 79 | dev_err(&port->dev, "Failed to map HDM decoder capability\n"); |
| 80 | return PTR_ERR(cxlhdm); |
| 81 | } |
| 82 | |
| 83 | if (rc == 1) { |
| 84 | dev_dbg(&port->dev, "Fallback to passthrough decoder\n"); |
| 85 | return devm_cxl_add_passthrough_decoder(port); |
| 86 | } |
| 87 | |
| 88 | dev_err(&port->dev, "HDM decoder capability not found\n"); |
| 89 | return -ENXIO; |
| 90 | } |
| 91 | |
| 92 | static int cxl_endpoint_port_probe(struct cxl_port *port) |
| 93 | { |
| 94 | struct cxl_endpoint_dvsec_info info = { .port = port }; |
| 95 | struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); |
| 96 | struct cxl_dev_state *cxlds = cxlmd->cxlds; |
| 97 | struct cxl_hdm *cxlhdm; |
| 98 | int rc; |
| 99 | |
| 100 | rc = cxl_dvsec_rr_decode(cxlds, &info); |
| 101 | if (rc < 0) |
| 102 | return rc; |
| 103 | |
| 104 | cxlhdm = devm_cxl_setup_hdm(port, &info); |
| 105 | if (IS_ERR(cxlhdm)) { |
| 106 | if (PTR_ERR(cxlhdm) == -ENODEV) |
| 107 | dev_err(&port->dev, "HDM decoder registers not found\n"); |
| 108 | return PTR_ERR(cxlhdm); |
| 109 | } |
| 110 | |
| 111 | /* Cache the data early to ensure is_visible() works */ |
| 112 | read_cdat_data(port); |
| 113 | cxl_endpoint_parse_cdat(port); |
| 114 | |
| 115 | get_device(&cxlmd->dev); |
| 116 | rc = devm_add_action_or_reset(&port->dev, schedule_detach, cxlmd); |
| 117 | if (rc) |
| 118 | return rc; |
| 119 | |
| 120 | rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info); |
| 121 | if (rc) |
| 122 | return rc; |
| 123 | |
| 124 | rc = devm_cxl_enumerate_decoders(cxlhdm, &info); |
| 125 | if (rc) |
| 126 | return rc; |
| 127 | |
| 128 | /* |
| 129 | * Now that all endpoint decoders are successfully enumerated, try to |
| 130 | * assemble regions from committed decoders |
| 131 | */ |
| 132 | device_for_each_child(&port->dev, NULL, discover_region); |
| 133 | |
| 134 | return 0; |
| 135 | } |
| 136 | |
| 137 | static int cxl_port_probe(struct device *dev) |
| 138 | { |
| 139 | struct cxl_port *port = to_cxl_port(dev); |
| 140 | |
| 141 | if (is_cxl_endpoint(port)) |
| 142 | return cxl_endpoint_port_probe(port); |
| 143 | return cxl_switch_port_probe(port); |
| 144 | } |
| 145 | |
| 146 | static ssize_t CDAT_read(struct file *filp, struct kobject *kobj, |
| 147 | const struct bin_attribute *bin_attr, char *buf, |
| 148 | loff_t offset, size_t count) |
| 149 | { |
| 150 | struct device *dev = kobj_to_dev(kobj); |
| 151 | struct cxl_port *port = to_cxl_port(dev); |
| 152 | |
| 153 | if (!port->cdat_available) |
| 154 | return -ENXIO; |
| 155 | |
| 156 | if (!port->cdat.table) |
| 157 | return 0; |
| 158 | |
| 159 | return memory_read_from_buffer(buf, count, &offset, |
| 160 | port->cdat.table, |
| 161 | port->cdat.length); |
| 162 | } |
| 163 | |
| 164 | static const BIN_ATTR_ADMIN_RO(CDAT, 0); |
| 165 | |
| 166 | static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj, |
| 167 | const struct bin_attribute *attr, int i) |
| 168 | { |
| 169 | struct device *dev = kobj_to_dev(kobj); |
| 170 | struct cxl_port *port = to_cxl_port(dev); |
| 171 | |
| 172 | if ((attr == &bin_attr_CDAT) && port->cdat_available) |
| 173 | return attr->attr.mode; |
| 174 | |
| 175 | return 0; |
| 176 | } |
| 177 | |
| 178 | static const struct bin_attribute *const cxl_cdat_bin_attributes[] = { |
| 179 | &bin_attr_CDAT, |
| 180 | NULL, |
| 181 | }; |
| 182 | |
| 183 | static const struct attribute_group cxl_cdat_attribute_group = { |
| 184 | .bin_attrs_new = cxl_cdat_bin_attributes, |
| 185 | .is_bin_visible = cxl_port_bin_attr_is_visible, |
| 186 | }; |
| 187 | |
| 188 | static const struct attribute_group *cxl_port_attribute_groups[] = { |
| 189 | &cxl_cdat_attribute_group, |
| 190 | NULL, |
| 191 | }; |
| 192 | |
| 193 | static struct cxl_driver cxl_port_driver = { |
| 194 | .name = "cxl_port", |
| 195 | .probe = cxl_port_probe, |
| 196 | .id = CXL_DEVICE_PORT, |
| 197 | .drv = { |
| 198 | .dev_groups = cxl_port_attribute_groups, |
| 199 | }, |
| 200 | }; |
| 201 | |
| 202 | static int __init cxl_port_init(void) |
| 203 | { |
| 204 | return cxl_driver_register(&cxl_port_driver); |
| 205 | } |
| 206 | /* |
| 207 | * Be ready to immediately enable ports emitted by the platform CXL root |
| 208 | * (e.g. cxl_acpi) when CONFIG_CXL_PORT=y. |
| 209 | */ |
| 210 | subsys_initcall(cxl_port_init); |
| 211 | |
| 212 | static void __exit cxl_port_exit(void) |
| 213 | { |
| 214 | cxl_driver_unregister(&cxl_port_driver); |
| 215 | } |
| 216 | module_exit(cxl_port_exit); |
| 217 | |
| 218 | MODULE_DESCRIPTION("CXL: Port enumeration and services"); |
| 219 | MODULE_LICENSE("GPL v2"); |
| 220 | MODULE_IMPORT_NS("CXL"); |
| 221 | MODULE_ALIAS_CXL(CXL_DEVICE_PORT); |