1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/platform_device.h>
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/kernel.h>
7 #include <linux/acpi.h>
9 #include <linux/node.h>
10 #include <asm/div64.h>
14 #define CXL_RCRB_SIZE SZ_8K
16 struct cxl_cxims_data {
18 u64 xormaps[] __counted_by(nr_maps);
21 static const guid_t acpi_cxl_qtg_id_guid =
22 GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
23 0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
26 * Find a targets entry (n) in the host bridge interleave list.
27 * CXL Specification 3.0 Table 9-22
29 static int cxl_xor_calc_n(u64 hpa, struct cxl_cxims_data *cximsd, int iw,
35 /* IW: 2,4,6,8,12,16 begin building 'n' using xormaps */
37 for (i = 0; i < cximsd->nr_maps; i++)
38 n |= (hweight64(hpa & cximsd->xormaps[i]) & 1) << i;
40 /* IW: 3,6,12 add a modulo calculation to 'n' */
41 if (!is_power_of_2(iw)) {
42 if (ways_to_eiw(iw, &eiw))
44 hpa &= GENMASK_ULL(51, eiw + ig);
45 n |= do_div(hpa, 3) << i;
50 static struct cxl_dport *cxl_hb_xor(struct cxl_root_decoder *cxlrd, int pos)
52 struct cxl_cxims_data *cximsd = cxlrd->platform_data;
53 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
54 struct cxl_decoder *cxld = &cxlsd->cxld;
55 int ig = cxld->interleave_granularity;
56 int iw = cxld->interleave_ways;
60 if (dev_WARN_ONCE(&cxld->dev,
61 cxld->interleave_ways != cxlsd->nr_targets,
62 "misconfigured root decoder\n"))
65 hpa = cxlrd->res->start + pos * ig;
67 /* Entry (n) is 0 for no interleave (iw == 1) */
69 n = cxl_xor_calc_n(hpa, cximsd, iw, ig);
74 return cxlrd->cxlsd.target[n];
77 struct cxl_cxims_context {
79 struct cxl_root_decoder *cxlrd;
82 static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg,
83 const unsigned long end)
85 struct acpi_cedt_cxims *cxims = (struct acpi_cedt_cxims *)header;
86 struct cxl_cxims_context *ctx = arg;
87 struct cxl_root_decoder *cxlrd = ctx->cxlrd;
88 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
89 struct device *dev = ctx->dev;
90 struct cxl_cxims_data *cximsd;
91 unsigned int hbig, nr_maps;
94 rc = eig_to_granularity(cxims->hbig, &hbig);
98 /* Does this CXIMS entry apply to the given CXL Window? */
99 if (hbig != cxld->interleave_granularity)
102 /* IW 1,3 do not use xormaps and skip this parsing entirely */
103 if (is_power_of_2(cxld->interleave_ways))
104 /* 2, 4, 8, 16 way */
105 nr_maps = ilog2(cxld->interleave_ways);
108 nr_maps = ilog2(cxld->interleave_ways / 3);
110 if (cxims->nr_xormaps < nr_maps) {
111 dev_dbg(dev, "CXIMS nr_xormaps[%d] expected[%d]\n",
112 cxims->nr_xormaps, nr_maps);
116 cximsd = devm_kzalloc(dev, struct_size(cximsd, xormaps, nr_maps),
120 cximsd->nr_maps = nr_maps;
121 memcpy(cximsd->xormaps, cxims->xormap_list,
122 nr_maps * sizeof(*cximsd->xormaps));
123 cxlrd->platform_data = cximsd;
128 static unsigned long cfmws_to_decoder_flags(int restrictions)
130 unsigned long flags = CXL_DECODER_F_ENABLE;
132 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
133 flags |= CXL_DECODER_F_TYPE2;
134 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
135 flags |= CXL_DECODER_F_TYPE3;
136 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
137 flags |= CXL_DECODER_F_RAM;
138 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
139 flags |= CXL_DECODER_F_PMEM;
140 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
141 flags |= CXL_DECODER_F_LOCK;
146 static int cxl_acpi_cfmws_verify(struct device *dev,
147 struct acpi_cedt_cfmws *cfmws)
149 int rc, expected_len;
152 if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO &&
153 cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
154 dev_err(dev, "CFMWS Unknown Interleave Arithmetic: %d\n",
155 cfmws->interleave_arithmetic);
159 if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
160 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
164 if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
165 dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
169 rc = eiw_to_ways(cfmws->interleave_ways, &ways);
171 dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n",
172 cfmws->interleave_ways);
176 expected_len = struct_size(cfmws, interleave_targets, ways);
178 if (cfmws->header.length < expected_len) {
179 dev_err(dev, "CFMWS length %d less than expected %d\n",
180 cfmws->header.length, expected_len);
184 if (cfmws->header.length > expected_len)
185 dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
186 cfmws->header.length, expected_len);
192 * Note, @dev must be the first member, see 'struct cxl_chbs_context'
193 * and mock_acpi_table_parse_cedt()
195 struct cxl_cfmws_context {
197 struct cxl_port *root_port;
198 struct resource *cxl_res;
203 * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM
204 * @handle: ACPI handle
205 * @coord: performance access coordinates
206 * @entries: number of QTG IDs to return
207 * @qos_class: int array provided by caller to return QTG IDs
209 * Return: number of QTG IDs returned, or -errno for errors
211 * Issue QTG _DSM with accompanied bandwidth and latency data in order to get
212 * the QTG IDs that are suitable for the performance point in order of most
213 * suitable to least suitable. Write back array of QTG IDs and return the
214 * actual number of QTG IDs written back.
217 cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord,
218 int entries, int *qos_class)
220 union acpi_object *out_obj, *out_buf, *obj;
221 union acpi_object in_array[4] = {
222 [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency },
223 [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency },
224 [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth },
225 [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth },
227 union acpi_object in_obj = {
229 .type = ACPI_TYPE_PACKAGE,
231 .elements = in_array,
234 int count, pkg_entries, i;
241 out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj);
245 if (out_obj->type != ACPI_TYPE_PACKAGE) {
250 /* Check Max QTG ID */
251 obj = &out_obj->package.elements[0];
252 if (obj->type != ACPI_TYPE_INTEGER) {
257 max_qtg = obj->integer.value;
259 /* It's legal to have 0 QTG entries */
260 pkg_entries = out_obj->package.count;
261 if (pkg_entries <= 1) {
266 /* Retrieve QTG IDs package */
267 obj = &out_obj->package.elements[1];
268 if (obj->type != ACPI_TYPE_PACKAGE) {
273 pkg_entries = obj->package.count;
274 count = min(entries, pkg_entries);
275 for (i = 0; i < count; i++) {
278 out_buf = &obj->package.elements[i];
279 if (out_buf->type != ACPI_TYPE_INTEGER) {
284 qtg_id = out_buf->integer.value;
285 if (qtg_id > max_qtg)
286 pr_warn("QTG ID %u greater than MAX %u\n",
289 qos_class[i] = qtg_id;
298 static int cxl_acpi_qos_class(struct cxl_port *root_port,
299 struct access_coordinate *coord, int entries,
305 dev = root_port->uport_dev;
307 if (!dev_is_platform(dev))
310 handle = ACPI_HANDLE(dev);
314 return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class);
317 static const struct cxl_root_ops acpi_root_ops = {
318 .qos_class = cxl_acpi_qos_class,
321 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
322 const unsigned long end)
324 int target_map[CXL_DECODER_MAX_INTERLEAVE];
325 struct cxl_cfmws_context *ctx = arg;
326 struct cxl_port *root_port = ctx->root_port;
327 struct resource *cxl_res = ctx->cxl_res;
328 struct cxl_cxims_context cxims_ctx;
329 struct cxl_root_decoder *cxlrd;
330 struct device *dev = ctx->dev;
331 struct acpi_cedt_cfmws *cfmws;
332 cxl_calc_hb_fn cxl_calc_hb;
333 struct cxl_decoder *cxld;
334 unsigned int ways, i, ig;
335 struct resource *res;
338 cfmws = (struct acpi_cedt_cfmws *) header;
340 rc = cxl_acpi_cfmws_verify(dev, cfmws);
342 dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
344 cfmws->base_hpa + cfmws->window_size - 1);
348 rc = eiw_to_ways(cfmws->interleave_ways, &ways);
351 rc = eig_to_granularity(cfmws->granularity, &ig);
354 for (i = 0; i < ways; i++)
355 target_map[i] = cfmws->interleave_targets[i];
357 res = kzalloc(sizeof(*res), GFP_KERNEL);
361 res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++);
365 res->start = cfmws->base_hpa;
366 res->end = cfmws->base_hpa + cfmws->window_size - 1;
367 res->flags = IORESOURCE_MEM;
369 /* add to the local resource tracking to establish a sort order */
370 rc = insert_resource(cxl_res, res);
374 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_MODULO)
375 cxl_calc_hb = cxl_hb_modulo;
377 cxl_calc_hb = cxl_hb_xor;
379 cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
383 cxld = &cxlrd->cxlsd.cxld;
384 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
385 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
386 cxld->hpa_range = (struct range) {
390 cxld->interleave_ways = ways;
392 * Minimize the x1 granularity to advertise support for any
393 * valid region granularity
396 ig = CXL_DECODER_MIN_GRANULARITY;
397 cxld->interleave_granularity = ig;
399 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
400 if (ways != 1 && ways != 3) {
401 cxims_ctx = (struct cxl_cxims_context) {
405 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS,
406 cxl_parse_cxims, &cxims_ctx);
409 if (!cxlrd->platform_data) {
410 dev_err(dev, "No CXIMS for HBIG %u\n", ig);
417 cxlrd->qos_class = cfmws->qtg_id;
419 rc = cxl_decoder_add(cxld, target_map);
422 put_device(&cxld->dev);
424 rc = cxl_decoder_autoremove(dev, cxld);
426 dev_err(dev, "Failed to add decode range: %pr", res);
429 dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
430 dev_name(&cxld->dev),
431 phys_to_target_node(cxld->hpa_range.start),
432 cxld->hpa_range.start, cxld->hpa_range.end);
443 __mock struct acpi_device *to_cxl_host_bridge(struct device *host,
446 struct acpi_device *adev = to_acpi_device(dev);
448 if (!acpi_pci_find_root(adev->handle))
451 if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
456 /* Note, @dev is used by mock_acpi_table_parse_cedt() */
457 struct cxl_chbs_context {
459 unsigned long long uid;
460 resource_size_t base;
464 static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
465 const unsigned long end)
467 struct cxl_chbs_context *ctx = arg;
468 struct acpi_cedt_chbs *chbs;
470 if (ctx->base != CXL_RESOURCE_NONE)
473 chbs = (struct acpi_cedt_chbs *) header;
475 if (ctx->uid != chbs->uid)
478 ctx->cxl_version = chbs->cxl_version;
482 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
483 chbs->length != CXL_RCRB_SIZE)
486 ctx->base = chbs->base;
491 static int cxl_get_chbs(struct device *dev, struct acpi_device *hb,
492 struct cxl_chbs_context *ctx)
494 unsigned long long uid;
497 rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid);
499 dev_err(dev, "unable to retrieve _UID\n");
503 dev_dbg(dev, "UID found: %lld\n", uid);
504 *ctx = (struct cxl_chbs_context) {
507 .base = CXL_RESOURCE_NONE,
508 .cxl_version = UINT_MAX,
511 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbs_iter, ctx);
516 static int add_host_bridge_dport(struct device *match, void *arg)
519 struct device *bridge;
520 struct cxl_dport *dport;
521 struct cxl_chbs_context ctx;
522 struct acpi_pci_root *pci_root;
523 struct cxl_port *root_port = arg;
524 struct device *host = root_port->dev.parent;
525 struct acpi_device *hb = to_cxl_host_bridge(host, match);
530 rc = cxl_get_chbs(match, hb, &ctx);
534 if (ctx.cxl_version == UINT_MAX) {
535 dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n",
540 if (ctx.base == CXL_RESOURCE_NONE) {
541 dev_warn(match, "CHBS invalid for Host Bridge (UID %lld)\n",
546 pci_root = acpi_pci_find_root(hb->handle);
547 bridge = pci_root->bus->bridge;
550 * In RCH mode, bind the component regs base to the dport. In
551 * VH mode it will be bound to the CXL host bridge's port
552 * object later in add_host_bridge_uport().
554 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
555 dev_dbg(match, "RCRB found for UID %lld: %pa\n", ctx.uid,
557 dport = devm_cxl_add_rch_dport(root_port, bridge, ctx.uid,
560 dport = devm_cxl_add_dport(root_port, bridge, ctx.uid,
565 return PTR_ERR(dport);
571 * A host bridge is a dport to a CFMWS decode and it is a uport to the
572 * dport (PCIe Root Ports) in the host bridge.
574 static int add_host_bridge_uport(struct device *match, void *arg)
576 struct cxl_port *root_port = arg;
577 struct device *host = root_port->dev.parent;
578 struct acpi_device *hb = to_cxl_host_bridge(host, match);
579 struct acpi_pci_root *pci_root;
580 struct cxl_dport *dport;
581 struct cxl_port *port;
582 struct device *bridge;
583 struct cxl_chbs_context ctx;
584 resource_size_t component_reg_phys;
590 pci_root = acpi_pci_find_root(hb->handle);
591 bridge = pci_root->bus->bridge;
592 dport = cxl_find_dport_by_dev(root_port, bridge);
594 dev_dbg(host, "host bridge expected and not found\n");
599 dev_info(bridge, "host supports CXL (restricted)\n");
603 rc = cxl_get_chbs(match, hb, &ctx);
607 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
609 "CXL CHBS version mismatch, skip port registration\n");
613 component_reg_phys = ctx.base;
614 if (component_reg_phys != CXL_RESOURCE_NONE)
615 dev_dbg(match, "CHBCR found for UID %lld: %pa\n",
616 ctx.uid, &component_reg_phys);
618 rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus);
622 port = devm_cxl_add_port(host, bridge, component_reg_phys, dport);
624 return PTR_ERR(port);
626 dev_info(bridge, "host supports CXL\n");
631 static int add_root_nvdimm_bridge(struct device *match, void *data)
633 struct cxl_decoder *cxld;
634 struct cxl_port *root_port = data;
635 struct cxl_nvdimm_bridge *cxl_nvb;
636 struct device *host = root_port->dev.parent;
638 if (!is_root_decoder(match))
641 cxld = to_cxl_decoder(match);
642 if (!(cxld->flags & CXL_DECODER_F_PMEM))
645 cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port);
646 if (IS_ERR(cxl_nvb)) {
647 dev_dbg(host, "failed to register pmem\n");
648 return PTR_ERR(cxl_nvb);
650 dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
651 dev_name(&cxl_nvb->dev));
655 static struct lock_class_key cxl_root_key;
657 static void cxl_acpi_lock_reset_class(void *dev)
659 device_lock_reset_class(dev);
662 static void del_cxl_resource(struct resource *res)
668 static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
670 priv->desc = (unsigned long) pub;
673 static struct resource *cxl_get_public_resource(struct resource *priv)
675 return (struct resource *) priv->desc;
678 static void remove_cxl_resources(void *data)
680 struct resource *res, *next, *cxl = data;
682 for (res = cxl->child; res; res = next) {
683 struct resource *victim = cxl_get_public_resource(res);
686 remove_resource(res);
689 remove_resource(victim);
693 del_cxl_resource(res);
698 * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
699 * @cxl_res: A standalone resource tree where each CXL window is a sibling
701 * Walk each CXL window in @cxl_res and add it to iomem_resource potentially
702 * expanding its boundaries to ensure that any conflicting resources become
703 * children. If a window is expanded it may then conflict with a another window
704 * entry and require the window to be truncated or trimmed. Consider this
707 * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
708 * |--------------- "System RAM" -------------|
710 * ...where platform firmware has established as System RAM resource across 2
711 * windows, but has left some portion of window 1 for dynamic CXL region
712 * provisioning. In this case "Window 0" will span the entirety of the "System
713 * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end
714 * of that "System RAM" resource.
716 static int add_cxl_resources(struct resource *cxl_res)
718 struct resource *res, *new, *next;
720 for (res = cxl_res->child; res; res = next) {
721 new = kzalloc(sizeof(*new), GFP_KERNEL);
724 new->name = res->name;
725 new->start = res->start;
727 new->flags = IORESOURCE_MEM;
728 new->desc = IORES_DESC_CXL;
731 * Record the public resource in the private cxl_res tree for
734 cxl_set_public_resource(res, new);
736 insert_resource_expand_to_fit(&iomem_resource, new);
739 while (next && resource_overlaps(new, next)) {
740 if (resource_contains(new, next)) {
741 struct resource *_next = next->sibling;
743 remove_resource(next);
744 del_cxl_resource(next);
747 next->start = new->end + 1;
753 static int pair_cxl_resource(struct device *dev, void *data)
755 struct resource *cxl_res = data;
758 if (!is_root_decoder(dev))
761 for (p = cxl_res->child; p; p = p->sibling) {
762 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
763 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
764 struct resource res = {
765 .start = cxld->hpa_range.start,
766 .end = cxld->hpa_range.end,
767 .flags = IORESOURCE_MEM,
770 if (resource_contains(p, &res)) {
771 cxlrd->res = cxl_get_public_resource(p);
779 static int cxl_acpi_probe(struct platform_device *pdev)
782 struct resource *cxl_res;
783 struct cxl_root *cxl_root;
784 struct cxl_port *root_port;
785 struct device *host = &pdev->dev;
786 struct acpi_device *adev = ACPI_COMPANION(host);
787 struct cxl_cfmws_context ctx;
789 device_lock_set_class(&pdev->dev, &cxl_root_key);
790 rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class,
795 cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL);
798 cxl_res->name = "CXL mem";
801 cxl_res->flags = IORESOURCE_MEM;
803 cxl_root = devm_cxl_add_root(host, &acpi_root_ops);
804 if (IS_ERR(cxl_root))
805 return PTR_ERR(cxl_root);
806 root_port = &cxl_root->port;
808 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
809 add_host_bridge_dport);
813 rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res);
817 ctx = (struct cxl_cfmws_context) {
819 .root_port = root_port,
822 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
826 rc = add_cxl_resources(cxl_res);
831 * Populate the root decoders with their related iomem resource,
834 device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource);
837 * Root level scanned with host-bridge as dports, now scan host-bridges
838 * for their role as CXL uports to their CXL-capable PCIe Root Ports.
840 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
841 add_host_bridge_uport);
845 if (IS_ENABLED(CONFIG_CXL_PMEM))
846 rc = device_for_each_child(&root_port->dev, root_port,
847 add_root_nvdimm_bridge);
851 /* In case PCI is scanned before ACPI re-trigger memdev attach */
856 static const struct acpi_device_id cxl_acpi_ids[] = {
860 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
862 static const struct platform_device_id cxl_test_ids[] = {
866 MODULE_DEVICE_TABLE(platform, cxl_test_ids);
868 static struct platform_driver cxl_acpi_driver = {
869 .probe = cxl_acpi_probe,
871 .name = KBUILD_MODNAME,
872 .acpi_match_table = cxl_acpi_ids,
874 .id_table = cxl_test_ids,
877 static int __init cxl_acpi_init(void)
879 return platform_driver_register(&cxl_acpi_driver);
882 static void __exit cxl_acpi_exit(void)
884 platform_driver_unregister(&cxl_acpi_driver);
888 /* load before dax_hmem sees 'Soft Reserved' CXL ranges */
889 subsys_initcall(cxl_acpi_init);
890 module_exit(cxl_acpi_exit);
891 MODULE_LICENSE("GPL v2");
892 MODULE_IMPORT_NS(CXL);
893 MODULE_IMPORT_NS(ACPI);