1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/memregion.h>
4 #include <linux/genalloc.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/uuid.h>
9 #include <linux/sort.h>
10 #include <linux/idr.h>
16 * DOC: cxl core region
18 * CXL Regions represent mapped memory capacity in system physical address
19 * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
20 * Memory ranges, Regions represent the active mapped capacity by the HDM
21 * Decoder Capability structures throughout the Host Bridges, Switches, and
22 * Endpoints in the topology.
24 * Region configuration has ordering constraints. UUID may be set at any time
25 * but is only visible for persistent regions.
26 * 1. Interleave granularity
31 static struct cxl_region *to_cxl_region(struct device *dev);
33 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
36 struct cxl_region *cxlr = to_cxl_region(dev);
37 struct cxl_region_params *p = &cxlr->params;
40 rc = down_read_interruptible(&cxl_region_rwsem);
43 if (cxlr->mode != CXL_DECODER_PMEM)
44 rc = sysfs_emit(buf, "\n");
46 rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
47 up_read(&cxl_region_rwsem);
52 static int is_dup(struct device *match, void *data)
54 struct cxl_region_params *p;
55 struct cxl_region *cxlr;
58 if (!is_cxl_region(match))
61 lockdep_assert_held(&cxl_region_rwsem);
62 cxlr = to_cxl_region(match);
65 if (uuid_equal(&p->uuid, uuid)) {
66 dev_dbg(match, "already has uuid: %pUb\n", uuid);
73 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
74 const char *buf, size_t len)
76 struct cxl_region *cxlr = to_cxl_region(dev);
77 struct cxl_region_params *p = &cxlr->params;
81 if (len != UUID_STRING_LEN + 1)
84 rc = uuid_parse(buf, &temp);
88 if (uuid_is_null(&temp))
91 rc = down_write_killable(&cxl_region_rwsem);
95 if (uuid_equal(&p->uuid, &temp))
99 if (p->state >= CXL_CONFIG_ACTIVE)
102 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
106 uuid_copy(&p->uuid, &temp);
108 up_write(&cxl_region_rwsem);
114 static DEVICE_ATTR_RW(uuid);
116 static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
117 struct cxl_region *cxlr)
119 return xa_load(&port->regions, (unsigned long)cxlr);
122 static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
124 if (!cpu_cache_has_invalidate_memregion()) {
125 if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
128 "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
132 "Failed to synchronize CPU cache state\n");
137 cpu_cache_invalidate_memregion(IORES_DESC_CXL);
141 static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
143 struct cxl_region_params *p = &cxlr->params;
147 * Before region teardown attempt to flush, and if the flush
148 * fails cancel the region teardown for data consistency
151 rc = cxl_region_invalidate_memregion(cxlr);
155 for (i = count - 1; i >= 0; i--) {
156 struct cxl_endpoint_decoder *cxled = p->targets[i];
157 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
158 struct cxl_port *iter = cxled_to_port(cxled);
159 struct cxl_dev_state *cxlds = cxlmd->cxlds;
165 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
166 iter = to_cxl_port(iter->dev.parent);
168 for (ep = cxl_ep_load(iter, cxlmd); iter;
169 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
170 struct cxl_region_ref *cxl_rr;
171 struct cxl_decoder *cxld;
173 cxl_rr = cxl_rr_load(iter, cxlr);
174 cxld = cxl_rr->decoder;
176 rc = cxld->reset(cxld);
179 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
183 rc = cxled->cxld.reset(&cxled->cxld);
186 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
189 /* all decoders associated with this region have been torn down */
190 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
195 static int commit_decoder(struct cxl_decoder *cxld)
197 struct cxl_switch_decoder *cxlsd = NULL;
200 return cxld->commit(cxld);
202 if (is_switch_decoder(&cxld->dev))
203 cxlsd = to_cxl_switch_decoder(&cxld->dev);
205 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1,
206 "->commit() is required\n"))
211 static int cxl_region_decode_commit(struct cxl_region *cxlr)
213 struct cxl_region_params *p = &cxlr->params;
216 for (i = 0; i < p->nr_targets; i++) {
217 struct cxl_endpoint_decoder *cxled = p->targets[i];
218 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
219 struct cxl_region_ref *cxl_rr;
220 struct cxl_decoder *cxld;
221 struct cxl_port *iter;
224 /* commit bottom up */
225 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
226 iter = to_cxl_port(iter->dev.parent)) {
227 cxl_rr = cxl_rr_load(iter, cxlr);
228 cxld = cxl_rr->decoder;
229 rc = commit_decoder(cxld);
235 /* programming @iter failed, teardown */
236 for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
237 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
238 cxl_rr = cxl_rr_load(iter, cxlr);
239 cxld = cxl_rr->decoder;
244 cxled->cxld.reset(&cxled->cxld);
252 /* undo the targets that were successfully committed */
253 cxl_region_decode_reset(cxlr, i);
257 static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
258 const char *buf, size_t len)
260 struct cxl_region *cxlr = to_cxl_region(dev);
261 struct cxl_region_params *p = &cxlr->params;
265 rc = kstrtobool(buf, &commit);
269 rc = down_write_killable(&cxl_region_rwsem);
273 /* Already in the requested state? */
274 if (commit && p->state >= CXL_CONFIG_COMMIT)
276 if (!commit && p->state < CXL_CONFIG_COMMIT)
279 /* Not ready to commit? */
280 if (commit && p->state < CXL_CONFIG_ACTIVE) {
286 * Invalidate caches before region setup to drop any speculative
287 * consumption of this address space
289 rc = cxl_region_invalidate_memregion(cxlr);
294 rc = cxl_region_decode_commit(cxlr);
296 p->state = CXL_CONFIG_COMMIT;
298 p->state = CXL_CONFIG_RESET_PENDING;
299 up_write(&cxl_region_rwsem);
300 device_release_driver(&cxlr->dev);
301 down_write(&cxl_region_rwsem);
304 * The lock was dropped, so need to revalidate that the reset is
307 if (p->state == CXL_CONFIG_RESET_PENDING) {
308 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
310 * Revert to committed since there may still be active
311 * decoders associated with this region, or move forward
312 * to active to mark the reset successful
315 p->state = CXL_CONFIG_COMMIT;
317 p->state = CXL_CONFIG_ACTIVE;
322 up_write(&cxl_region_rwsem);
329 static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
332 struct cxl_region *cxlr = to_cxl_region(dev);
333 struct cxl_region_params *p = &cxlr->params;
336 rc = down_read_interruptible(&cxl_region_rwsem);
339 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
340 up_read(&cxl_region_rwsem);
344 static DEVICE_ATTR_RW(commit);
346 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
349 struct device *dev = kobj_to_dev(kobj);
350 struct cxl_region *cxlr = to_cxl_region(dev);
353 * Support tooling that expects to find a 'uuid' attribute for all
354 * regions regardless of mode.
356 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
361 static ssize_t interleave_ways_show(struct device *dev,
362 struct device_attribute *attr, char *buf)
364 struct cxl_region *cxlr = to_cxl_region(dev);
365 struct cxl_region_params *p = &cxlr->params;
368 rc = down_read_interruptible(&cxl_region_rwsem);
371 rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
372 up_read(&cxl_region_rwsem);
377 static const struct attribute_group *get_cxl_region_target_group(void);
379 static ssize_t interleave_ways_store(struct device *dev,
380 struct device_attribute *attr,
381 const char *buf, size_t len)
383 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
384 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
385 struct cxl_region *cxlr = to_cxl_region(dev);
386 struct cxl_region_params *p = &cxlr->params;
387 unsigned int val, save;
391 rc = kstrtouint(buf, 0, &val);
395 rc = ways_to_eiw(val, &iw);
400 * Even for x3, x6, and x12 interleaves the region interleave must be a
401 * power of 2 multiple of the host bridge interleave.
403 if (!is_power_of_2(val / cxld->interleave_ways) ||
404 (val % cxld->interleave_ways)) {
405 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
409 rc = down_write_killable(&cxl_region_rwsem);
412 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
417 save = p->interleave_ways;
418 p->interleave_ways = val;
419 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
421 p->interleave_ways = save;
423 up_write(&cxl_region_rwsem);
428 static DEVICE_ATTR_RW(interleave_ways);
430 static ssize_t interleave_granularity_show(struct device *dev,
431 struct device_attribute *attr,
434 struct cxl_region *cxlr = to_cxl_region(dev);
435 struct cxl_region_params *p = &cxlr->params;
438 rc = down_read_interruptible(&cxl_region_rwsem);
441 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
442 up_read(&cxl_region_rwsem);
447 static ssize_t interleave_granularity_store(struct device *dev,
448 struct device_attribute *attr,
449 const char *buf, size_t len)
451 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
452 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
453 struct cxl_region *cxlr = to_cxl_region(dev);
454 struct cxl_region_params *p = &cxlr->params;
458 rc = kstrtoint(buf, 0, &val);
462 rc = granularity_to_eig(val, &ig);
467 * When the host-bridge is interleaved, disallow region granularity !=
468 * root granularity. Regions with a granularity less than the root
469 * interleave result in needing multiple endpoints to support a single
470 * slot in the interleave (possible to support in the future). Regions
471 * with a granularity greater than the root interleave result in invalid
472 * DPA translations (invalid to support).
474 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
477 rc = down_write_killable(&cxl_region_rwsem);
480 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
485 p->interleave_granularity = val;
487 up_write(&cxl_region_rwsem);
492 static DEVICE_ATTR_RW(interleave_granularity);
494 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
497 struct cxl_region *cxlr = to_cxl_region(dev);
498 struct cxl_region_params *p = &cxlr->params;
499 u64 resource = -1ULL;
502 rc = down_read_interruptible(&cxl_region_rwsem);
506 resource = p->res->start;
507 rc = sysfs_emit(buf, "%#llx\n", resource);
508 up_read(&cxl_region_rwsem);
512 static DEVICE_ATTR_RO(resource);
514 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
517 struct cxl_region *cxlr = to_cxl_region(dev);
519 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
521 static DEVICE_ATTR_RO(mode);
523 static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
525 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
526 struct cxl_region_params *p = &cxlr->params;
527 struct resource *res;
530 lockdep_assert_held_write(&cxl_region_rwsem);
532 /* Nothing to do... */
533 if (p->res && resource_size(p->res) == size)
536 /* To change size the old size must be freed first */
540 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
543 /* ways, granularity and uuid (if PMEM) need to be set before HPA */
544 if (!p->interleave_ways || !p->interleave_granularity ||
545 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
548 div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
552 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
553 dev_name(&cxlr->dev));
556 "HPA allocation error (%ld) for size:%pap in %s %pr\n",
557 PTR_ERR(res), &size, cxlrd->res->name, cxlrd->res);
562 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
567 static void cxl_region_iomem_release(struct cxl_region *cxlr)
569 struct cxl_region_params *p = &cxlr->params;
571 if (device_is_registered(&cxlr->dev))
572 lockdep_assert_held_write(&cxl_region_rwsem);
575 * Autodiscovered regions may not have been able to insert their
579 remove_resource(p->res);
585 static int free_hpa(struct cxl_region *cxlr)
587 struct cxl_region_params *p = &cxlr->params;
589 lockdep_assert_held_write(&cxl_region_rwsem);
594 if (p->state >= CXL_CONFIG_ACTIVE)
597 cxl_region_iomem_release(cxlr);
598 p->state = CXL_CONFIG_IDLE;
602 static ssize_t size_store(struct device *dev, struct device_attribute *attr,
603 const char *buf, size_t len)
605 struct cxl_region *cxlr = to_cxl_region(dev);
609 rc = kstrtou64(buf, 0, &val);
613 rc = down_write_killable(&cxl_region_rwsem);
618 rc = alloc_hpa(cxlr, val);
621 up_write(&cxl_region_rwsem);
629 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
632 struct cxl_region *cxlr = to_cxl_region(dev);
633 struct cxl_region_params *p = &cxlr->params;
637 rc = down_read_interruptible(&cxl_region_rwsem);
641 size = resource_size(p->res);
642 rc = sysfs_emit(buf, "%#llx\n", size);
643 up_read(&cxl_region_rwsem);
647 static DEVICE_ATTR_RW(size);
649 static struct attribute *cxl_region_attrs[] = {
651 &dev_attr_commit.attr,
652 &dev_attr_interleave_ways.attr,
653 &dev_attr_interleave_granularity.attr,
654 &dev_attr_resource.attr,
660 static const struct attribute_group cxl_region_group = {
661 .attrs = cxl_region_attrs,
662 .is_visible = cxl_region_visible,
665 static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
667 struct cxl_region_params *p = &cxlr->params;
668 struct cxl_endpoint_decoder *cxled;
671 rc = down_read_interruptible(&cxl_region_rwsem);
675 if (pos >= p->interleave_ways) {
676 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
682 cxled = p->targets[pos];
684 rc = sysfs_emit(buf, "\n");
686 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
688 up_read(&cxl_region_rwsem);
693 static int match_free_decoder(struct device *dev, void *data)
695 struct cxl_decoder *cxld;
698 if (!is_switch_decoder(dev))
701 cxld = to_cxl_decoder(dev);
703 /* enforce ordered allocation */
715 static int match_auto_decoder(struct device *dev, void *data)
717 struct cxl_region_params *p = data;
718 struct cxl_decoder *cxld;
721 if (!is_switch_decoder(dev))
724 cxld = to_cxl_decoder(dev);
725 r = &cxld->hpa_range;
727 if (p->res && p->res->start == r->start && p->res->end == r->end)
733 static struct cxl_decoder *
734 cxl_region_find_decoder(struct cxl_port *port,
735 struct cxl_endpoint_decoder *cxled,
736 struct cxl_region *cxlr)
741 if (port == cxled_to_port(cxled))
744 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
745 dev = device_find_child(&port->dev, &cxlr->params,
748 dev = device_find_child(&port->dev, &id, match_free_decoder);
752 * This decoder is pinned registered as long as the endpoint decoder is
753 * registered, and endpoint decoder unregistration holds the
754 * cxl_region_rwsem over unregister events, so no need to hold on to
755 * this extra reference.
758 return to_cxl_decoder(dev);
761 static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
762 struct cxl_decoder *cxld)
764 struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
765 struct cxl_decoder *cxld_iter = rr->decoder;
768 * Allow the out of order assembly of auto-discovered regions.
769 * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
770 * in HPA order. Confirm that the decoder with the lesser HPA
771 * starting address has the lesser id.
773 dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
774 dev_name(&cxld->dev), cxld->id,
775 dev_name(&cxld_iter->dev), cxld_iter->id);
777 if (cxld_iter->id > cxld->id)
783 static struct cxl_region_ref *
784 alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
785 struct cxl_endpoint_decoder *cxled)
787 struct cxl_region_params *p = &cxlr->params;
788 struct cxl_region_ref *cxl_rr, *iter;
792 xa_for_each(&port->regions, index, iter) {
793 struct cxl_region_params *ip = &iter->region->params;
795 if (!ip->res || ip->res->start < p->res->start)
798 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
799 struct cxl_decoder *cxld;
801 cxld = cxl_region_find_decoder(port, cxled, cxlr);
802 if (auto_order_ok(port, iter->region, cxld))
805 dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
806 dev_name(&port->dev),
807 dev_name(&iter->region->dev), ip->res, p->res);
809 return ERR_PTR(-EBUSY);
812 cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
814 return ERR_PTR(-ENOMEM);
816 cxl_rr->region = cxlr;
817 cxl_rr->nr_targets = 1;
818 xa_init(&cxl_rr->endpoints);
820 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
823 "%s: failed to track region reference: %d\n",
824 dev_name(&port->dev), rc);
832 static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr)
834 struct cxl_region *cxlr = cxl_rr->region;
835 struct cxl_decoder *cxld = cxl_rr->decoder;
840 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
841 if (cxld->region == cxlr) {
843 put_device(&cxlr->dev);
847 static void free_region_ref(struct cxl_region_ref *cxl_rr)
849 struct cxl_port *port = cxl_rr->port;
850 struct cxl_region *cxlr = cxl_rr->region;
852 cxl_rr_free_decoder(cxl_rr);
853 xa_erase(&port->regions, (unsigned long)cxlr);
854 xa_destroy(&cxl_rr->endpoints);
858 static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
859 struct cxl_endpoint_decoder *cxled)
862 struct cxl_port *port = cxl_rr->port;
863 struct cxl_region *cxlr = cxl_rr->region;
864 struct cxl_decoder *cxld = cxl_rr->decoder;
865 struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
868 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
877 get_device(&cxlr->dev);
883 static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
884 struct cxl_endpoint_decoder *cxled,
885 struct cxl_region_ref *cxl_rr)
887 struct cxl_decoder *cxld;
889 cxld = cxl_region_find_decoder(port, cxled, cxlr);
891 dev_dbg(&cxlr->dev, "%s: no decoder available\n",
892 dev_name(&port->dev));
897 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
898 dev_name(&port->dev), dev_name(&cxld->dev),
899 dev_name(&cxld->region->dev));
904 * Endpoints should already match the region type, but backstop that
905 * assumption with an assertion. Switch-decoders change mapping-type
906 * based on what is mapped when they are assigned to a region.
908 dev_WARN_ONCE(&cxlr->dev,
909 port == cxled_to_port(cxled) &&
910 cxld->target_type != cxlr->type,
911 "%s:%s mismatch decoder type %d -> %d\n",
912 dev_name(&cxled_to_memdev(cxled)->dev),
913 dev_name(&cxld->dev), cxld->target_type, cxlr->type);
914 cxld->target_type = cxlr->type;
915 cxl_rr->decoder = cxld;
920 * cxl_port_attach_region() - track a region's interest in a port by endpoint
921 * @port: port to add a new region reference 'struct cxl_region_ref'
922 * @cxlr: region to attach to @port
923 * @cxled: endpoint decoder used to create or further pin a region reference
924 * @pos: interleave position of @cxled in @cxlr
926 * The attach event is an opportunity to validate CXL decode setup
927 * constraints and record metadata needed for programming HDM decoders,
928 * in particular decoder target lists.
932 * - validate that there are no other regions with a higher HPA already
933 * associated with @port
934 * - establish a region reference if one is not already present
936 * - additionally allocate a decoder instance that will host @cxlr on
939 * - pin the region reference by the endpoint
940 * - account for how many entries in @port's target list are needed to
941 * cover all of the added endpoints.
943 static int cxl_port_attach_region(struct cxl_port *port,
944 struct cxl_region *cxlr,
945 struct cxl_endpoint_decoder *cxled, int pos)
947 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
948 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
949 struct cxl_region_ref *cxl_rr;
950 bool nr_targets_inc = false;
951 struct cxl_decoder *cxld;
955 lockdep_assert_held_write(&cxl_region_rwsem);
957 cxl_rr = cxl_rr_load(port, cxlr);
959 struct cxl_ep *ep_iter;
963 * Walk the existing endpoints that have been attached to
964 * @cxlr at @port and see if they share the same 'next' port
965 * in the downstream direction. I.e. endpoints that share common
968 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
971 if (ep_iter->next == ep->next) {
978 * New target port, or @port is an endpoint port that always
979 * accounts its own local decode as a target.
981 if (!found || !ep->next) {
982 cxl_rr->nr_targets++;
983 nr_targets_inc = true;
986 cxl_rr = alloc_region_ref(port, cxlr, cxled);
987 if (IS_ERR(cxl_rr)) {
989 "%s: failed to allocate region reference\n",
990 dev_name(&port->dev));
991 return PTR_ERR(cxl_rr);
993 nr_targets_inc = true;
995 rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
999 cxld = cxl_rr->decoder;
1001 rc = cxl_rr_ep_add(cxl_rr, cxled);
1004 "%s: failed to track endpoint %s:%s reference\n",
1005 dev_name(&port->dev), dev_name(&cxlmd->dev),
1006 dev_name(&cxld->dev));
1011 "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
1012 dev_name(port->uport_dev), dev_name(&port->dev),
1013 dev_name(&cxld->dev), dev_name(&cxlmd->dev),
1014 dev_name(&cxled->cxld.dev), pos,
1015 ep ? ep->next ? dev_name(ep->next->uport_dev) :
1016 dev_name(&cxlmd->dev) :
1018 cxl_rr->nr_eps, cxl_rr->nr_targets);
1023 cxl_rr->nr_targets--;
1024 if (cxl_rr->nr_eps == 0)
1025 free_region_ref(cxl_rr);
1029 static void cxl_port_detach_region(struct cxl_port *port,
1030 struct cxl_region *cxlr,
1031 struct cxl_endpoint_decoder *cxled)
1033 struct cxl_region_ref *cxl_rr;
1034 struct cxl_ep *ep = NULL;
1036 lockdep_assert_held_write(&cxl_region_rwsem);
1038 cxl_rr = cxl_rr_load(port, cxlr);
1043 * Endpoint ports do not carry cxl_ep references, and they
1044 * never target more than one endpoint by definition
1046 if (cxl_rr->decoder == &cxled->cxld)
1049 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
1051 struct cxl_ep *ep_iter;
1052 unsigned long index;
1056 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
1057 if (ep_iter->next == ep->next) {
1063 cxl_rr->nr_targets--;
1066 if (cxl_rr->nr_eps == 0)
1067 free_region_ref(cxl_rr);
1070 static int check_last_peer(struct cxl_endpoint_decoder *cxled,
1071 struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
1074 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1075 struct cxl_region *cxlr = cxl_rr->region;
1076 struct cxl_region_params *p = &cxlr->params;
1077 struct cxl_endpoint_decoder *cxled_peer;
1078 struct cxl_port *port = cxl_rr->port;
1079 struct cxl_memdev *cxlmd_peer;
1080 struct cxl_ep *ep_peer;
1081 int pos = cxled->pos;
1084 * If this position wants to share a dport with the last endpoint mapped
1085 * then that endpoint, at index 'position - distance', must also be
1086 * mapped by this dport.
1088 if (pos < distance) {
1089 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
1090 dev_name(port->uport_dev), dev_name(&port->dev),
1091 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1094 cxled_peer = p->targets[pos - distance];
1095 cxlmd_peer = cxled_to_memdev(cxled_peer);
1096 ep_peer = cxl_ep_load(port, cxlmd_peer);
1097 if (ep->dport != ep_peer->dport) {
1099 "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
1100 dev_name(port->uport_dev), dev_name(&port->dev),
1101 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
1102 dev_name(&cxlmd_peer->dev),
1103 dev_name(&cxled_peer->cxld.dev));
1110 static int cxl_port_setup_targets(struct cxl_port *port,
1111 struct cxl_region *cxlr,
1112 struct cxl_endpoint_decoder *cxled)
1114 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1115 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
1116 struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
1117 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1118 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1119 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
1120 struct cxl_region_params *p = &cxlr->params;
1121 struct cxl_decoder *cxld = cxl_rr->decoder;
1122 struct cxl_switch_decoder *cxlsd;
1127 * While root level decoders support x3, x6, x12, switch level
1128 * decoders only support powers of 2 up to x16.
1130 if (!is_power_of_2(cxl_rr->nr_targets)) {
1131 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
1132 dev_name(port->uport_dev), dev_name(&port->dev),
1133 cxl_rr->nr_targets);
1137 cxlsd = to_cxl_switch_decoder(&cxld->dev);
1138 if (cxl_rr->nr_targets_set) {
1142 * Passthrough decoders impose no distance requirements between
1145 if (cxl_rr->nr_targets == 1)
1148 distance = p->nr_targets / cxl_rr->nr_targets;
1149 for (i = 0; i < cxl_rr->nr_targets_set; i++)
1150 if (ep->dport == cxlsd->target[i]) {
1151 rc = check_last_peer(cxled, ep, cxl_rr,
1155 goto out_target_set;
1160 if (is_cxl_root(parent_port)) {
1162 * Root decoder IG is always set to value in CFMWS which
1163 * may be different than this region's IG. We can use the
1164 * region's IG here since interleave_granularity_store()
1165 * does not allow interleaved host-bridges with
1166 * root IG != region IG.
1168 parent_ig = p->interleave_granularity;
1169 parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
1171 * For purposes of address bit routing, use power-of-2 math for
1174 if (!is_power_of_2(parent_iw))
1177 struct cxl_region_ref *parent_rr;
1178 struct cxl_decoder *parent_cxld;
1180 parent_rr = cxl_rr_load(parent_port, cxlr);
1181 parent_cxld = parent_rr->decoder;
1182 parent_ig = parent_cxld->interleave_granularity;
1183 parent_iw = parent_cxld->interleave_ways;
1186 rc = granularity_to_eig(parent_ig, &peig);
1188 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
1189 dev_name(parent_port->uport_dev),
1190 dev_name(&parent_port->dev), parent_ig);
1194 rc = ways_to_eiw(parent_iw, &peiw);
1196 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
1197 dev_name(parent_port->uport_dev),
1198 dev_name(&parent_port->dev), parent_iw);
1202 iw = cxl_rr->nr_targets;
1203 rc = ways_to_eiw(iw, &eiw);
1205 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
1206 dev_name(port->uport_dev), dev_name(&port->dev), iw);
1211 * Interleave granularity is a multiple of @parent_port granularity.
1212 * Multiplier is the parent port interleave ways.
1214 rc = granularity_to_eig(parent_ig * parent_iw, &eig);
1217 "%s: invalid granularity calculation (%d * %d)\n",
1218 dev_name(&parent_port->dev), parent_ig, parent_iw);
1222 rc = eig_to_granularity(eig, &ig);
1224 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
1225 dev_name(port->uport_dev), dev_name(&port->dev),
1230 if (iw > 8 || iw > cxlsd->nr_targets) {
1232 "%s:%s:%s: ways: %d overflows targets: %d\n",
1233 dev_name(port->uport_dev), dev_name(&port->dev),
1234 dev_name(&cxld->dev), iw, cxlsd->nr_targets);
1238 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1239 if (cxld->interleave_ways != iw ||
1240 cxld->interleave_granularity != ig ||
1241 cxld->hpa_range.start != p->res->start ||
1242 cxld->hpa_range.end != p->res->end ||
1243 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
1245 "%s:%s %s expected iw: %d ig: %d %pr\n",
1246 dev_name(port->uport_dev), dev_name(&port->dev),
1247 __func__, iw, ig, p->res);
1249 "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n",
1250 dev_name(port->uport_dev), dev_name(&port->dev),
1251 __func__, cxld->interleave_ways,
1252 cxld->interleave_granularity,
1253 (cxld->flags & CXL_DECODER_F_ENABLE) ?
1256 cxld->hpa_range.start, cxld->hpa_range.end);
1260 cxld->interleave_ways = iw;
1261 cxld->interleave_granularity = ig;
1262 cxld->hpa_range = (struct range) {
1263 .start = p->res->start,
1267 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev),
1268 dev_name(&port->dev), iw, ig);
1270 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
1272 "%s:%s: targets full trying to add %s:%s at %d\n",
1273 dev_name(port->uport_dev), dev_name(&port->dev),
1274 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1277 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1278 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) {
1279 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n",
1280 dev_name(port->uport_dev), dev_name(&port->dev),
1281 dev_name(&cxlsd->cxld.dev),
1282 dev_name(ep->dport->dport_dev),
1283 cxl_rr->nr_targets_set);
1287 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
1290 cxl_rr->nr_targets_set += inc;
1291 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
1292 dev_name(port->uport_dev), dev_name(&port->dev),
1293 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev),
1294 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1299 static void cxl_port_reset_targets(struct cxl_port *port,
1300 struct cxl_region *cxlr)
1302 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1303 struct cxl_decoder *cxld;
1306 * After the last endpoint has been detached the entire cxl_rr may now
1311 cxl_rr->nr_targets_set = 0;
1313 cxld = cxl_rr->decoder;
1314 cxld->hpa_range = (struct range) {
1320 static void cxl_region_teardown_targets(struct cxl_region *cxlr)
1322 struct cxl_region_params *p = &cxlr->params;
1323 struct cxl_endpoint_decoder *cxled;
1324 struct cxl_dev_state *cxlds;
1325 struct cxl_memdev *cxlmd;
1326 struct cxl_port *iter;
1331 * In the auto-discovery case skip automatic teardown since the
1332 * address space is already active
1334 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
1337 for (i = 0; i < p->nr_targets; i++) {
1338 cxled = p->targets[i];
1339 cxlmd = cxled_to_memdev(cxled);
1340 cxlds = cxlmd->cxlds;
1345 iter = cxled_to_port(cxled);
1346 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1347 iter = to_cxl_port(iter->dev.parent);
1349 for (ep = cxl_ep_load(iter, cxlmd); iter;
1350 iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
1351 cxl_port_reset_targets(iter, cxlr);
1355 static int cxl_region_setup_targets(struct cxl_region *cxlr)
1357 struct cxl_region_params *p = &cxlr->params;
1358 struct cxl_endpoint_decoder *cxled;
1359 struct cxl_dev_state *cxlds;
1360 int i, rc, rch = 0, vh = 0;
1361 struct cxl_memdev *cxlmd;
1362 struct cxl_port *iter;
1365 for (i = 0; i < p->nr_targets; i++) {
1366 cxled = p->targets[i];
1367 cxlmd = cxled_to_memdev(cxled);
1368 cxlds = cxlmd->cxlds;
1370 /* validate that all targets agree on topology */
1378 iter = cxled_to_port(cxled);
1379 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1380 iter = to_cxl_port(iter->dev.parent);
1383 * Descend the topology tree programming / validating
1384 * targets while looking for conflicts.
1386 for (ep = cxl_ep_load(iter, cxlmd); iter;
1387 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
1388 rc = cxl_port_setup_targets(iter, cxlr, cxled);
1390 cxl_region_teardown_targets(cxlr);
1397 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
1398 cxl_region_teardown_targets(cxlr);
1405 static int cxl_region_validate_position(struct cxl_region *cxlr,
1406 struct cxl_endpoint_decoder *cxled,
1409 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1410 struct cxl_region_params *p = &cxlr->params;
1413 if (pos < 0 || pos >= p->interleave_ways) {
1414 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1415 p->interleave_ways);
1419 if (p->targets[pos] == cxled)
1422 if (p->targets[pos]) {
1423 struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
1424 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
1426 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
1427 pos, dev_name(&cxlmd_target->dev),
1428 dev_name(&cxled_target->cxld.dev));
1432 for (i = 0; i < p->interleave_ways; i++) {
1433 struct cxl_endpoint_decoder *cxled_target;
1434 struct cxl_memdev *cxlmd_target;
1436 cxled_target = p->targets[i];
1440 cxlmd_target = cxled_to_memdev(cxled_target);
1441 if (cxlmd_target == cxlmd) {
1443 "%s already specified at position %d via: %s\n",
1444 dev_name(&cxlmd->dev), pos,
1445 dev_name(&cxled_target->cxld.dev));
1453 static int cxl_region_attach_position(struct cxl_region *cxlr,
1454 struct cxl_root_decoder *cxlrd,
1455 struct cxl_endpoint_decoder *cxled,
1456 const struct cxl_dport *dport, int pos)
1458 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1459 struct cxl_port *iter;
1462 if (cxlrd->calc_hb(cxlrd, pos) != dport) {
1463 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
1464 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1465 dev_name(&cxlrd->cxlsd.cxld.dev));
1469 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1470 iter = to_cxl_port(iter->dev.parent)) {
1471 rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
1479 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1480 iter = to_cxl_port(iter->dev.parent))
1481 cxl_port_detach_region(iter, cxlr, cxled);
1485 static int cxl_region_attach_auto(struct cxl_region *cxlr,
1486 struct cxl_endpoint_decoder *cxled, int pos)
1488 struct cxl_region_params *p = &cxlr->params;
1490 if (cxled->state != CXL_DECODER_STATE_AUTO) {
1492 "%s: unable to add decoder to autodetected region\n",
1493 dev_name(&cxled->cxld.dev));
1498 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n",
1499 dev_name(&cxled->cxld.dev), pos);
1503 if (p->nr_targets >= p->interleave_ways) {
1504 dev_err(&cxlr->dev, "%s: no more target slots available\n",
1505 dev_name(&cxled->cxld.dev));
1510 * Temporarily record the endpoint decoder into the target array. Yes,
1511 * this means that userspace can view devices in the wrong position
1512 * before the region activates, and must be careful to understand when
1513 * it might be racing region autodiscovery.
1515 pos = p->nr_targets;
1516 p->targets[pos] = cxled;
1523 static int cmp_interleave_pos(const void *a, const void *b)
1525 struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
1526 struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
1528 return cxled_a->pos - cxled_b->pos;
1531 static struct cxl_port *next_port(struct cxl_port *port)
1533 if (!port->parent_dport)
1535 return port->parent_dport->port;
1538 static int match_switch_decoder_by_range(struct device *dev, void *data)
1540 struct cxl_switch_decoder *cxlsd;
1541 struct range *r1, *r2 = data;
1543 if (!is_switch_decoder(dev))
1546 cxlsd = to_cxl_switch_decoder(dev);
1547 r1 = &cxlsd->cxld.hpa_range;
1549 if (is_root_decoder(dev))
1550 return range_contains(r1, r2);
1551 return (r1->start == r2->start && r1->end == r2->end);
1554 static int find_pos_and_ways(struct cxl_port *port, struct range *range,
1555 int *pos, int *ways)
1557 struct cxl_switch_decoder *cxlsd;
1558 struct cxl_port *parent;
1562 parent = next_port(port);
1566 dev = device_find_child(&parent->dev, range,
1567 match_switch_decoder_by_range);
1569 dev_err(port->uport_dev,
1570 "failed to find decoder mapping %#llx-%#llx\n",
1571 range->start, range->end);
1574 cxlsd = to_cxl_switch_decoder(dev);
1575 *ways = cxlsd->cxld.interleave_ways;
1577 for (int i = 0; i < *ways; i++) {
1578 if (cxlsd->target[i] == port->parent_dport) {
1590 * cxl_calc_interleave_pos() - calculate an endpoint position in a region
1591 * @cxled: endpoint decoder member of given region
1593 * The endpoint position is calculated by traversing the topology from
1594 * the endpoint to the root decoder and iteratively applying this
1597 * position = position * parent_ways + parent_pos;
1599 * ...where @position is inferred from switch and root decoder target lists.
1601 * Return: position >= 0 on success
1604 static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
1606 struct cxl_port *iter, *port = cxled_to_port(cxled);
1607 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1608 struct range *range = &cxled->cxld.hpa_range;
1609 int parent_ways = 0, parent_pos = 0, pos = 0;
1613 * Example: the expected interleave order of the 4-way region shown
1614 * below is: mem0, mem2, mem1, mem3
1618 * host_bridge_0 host_bridge_1
1620 * mem0 mem1 mem2 mem3
1622 * In the example the calculator will iterate twice. The first iteration
1623 * uses the mem position in the host-bridge and the ways of the host-
1624 * bridge to generate the first, or local, position. The second
1625 * iteration uses the host-bridge position in the root_port and the ways
1626 * of the root_port to refine the position.
1628 * A trace of the calculation per endpoint looks like this:
1629 * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0
1630 * pos = 0 * 2 + 0 pos = 0 * 2 + 1
1633 * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1
1634 * pos = 1 * 2 + 0 pos = 1 * 2 + 1
1637 * Note that while this example is simple, the method applies to more
1638 * complex topologies, including those with switches.
1641 /* Iterate from endpoint to root_port refining the position */
1642 for (iter = port; iter; iter = next_port(iter)) {
1643 if (is_cxl_root(iter))
1646 rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
1650 pos = pos * parent_ways + parent_pos;
1653 dev_dbg(&cxlmd->dev,
1654 "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
1655 dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
1656 dev_name(&port->dev), range->start, range->end, pos);
1661 static int cxl_region_sort_targets(struct cxl_region *cxlr)
1663 struct cxl_region_params *p = &cxlr->params;
1666 for (i = 0; i < p->nr_targets; i++) {
1667 struct cxl_endpoint_decoder *cxled = p->targets[i];
1669 cxled->pos = cxl_calc_interleave_pos(cxled);
1671 * Record that sorting failed, but still continue to calc
1672 * cxled->pos so that follow-on code paths can reliably
1673 * do p->targets[cxled->pos] to self-reference their entry.
1678 /* Keep the cxlr target list in interleave position order */
1679 sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
1680 cmp_interleave_pos, NULL);
1682 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
1686 static int cxl_region_attach(struct cxl_region *cxlr,
1687 struct cxl_endpoint_decoder *cxled, int pos)
1689 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1690 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1691 struct cxl_region_params *p = &cxlr->params;
1692 struct cxl_port *ep_port, *root_port;
1693 struct cxl_dport *dport;
1696 if (cxled->mode != cxlr->mode) {
1697 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
1698 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
1702 if (cxled->mode == CXL_DECODER_DEAD) {
1703 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
1707 /* all full of members, or interleave config not established? */
1708 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
1709 dev_dbg(&cxlr->dev, "region already active\n");
1711 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
1712 dev_dbg(&cxlr->dev, "interleave config missing\n");
1716 if (p->nr_targets >= p->interleave_ways) {
1717 dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
1722 ep_port = cxled_to_port(cxled);
1723 root_port = cxlrd_to_port(cxlrd);
1724 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
1726 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
1727 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1728 dev_name(cxlr->dev.parent));
1732 if (cxled->cxld.target_type != cxlr->type) {
1733 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
1734 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1735 cxled->cxld.target_type, cxlr->type);
1739 if (!cxled->dpa_res) {
1740 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
1741 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
1745 if (resource_size(cxled->dpa_res) * p->interleave_ways !=
1746 resource_size(p->res)) {
1748 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
1749 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1750 (u64)resource_size(cxled->dpa_res), p->interleave_ways,
1751 (u64)resource_size(p->res));
1755 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1758 rc = cxl_region_attach_auto(cxlr, cxled, pos);
1762 /* await more targets to arrive... */
1763 if (p->nr_targets < p->interleave_ways)
1767 * All targets are here, which implies all PCI enumeration that
1768 * affects this region has been completed. Walk the topology to
1769 * sort the devices into their relative region decode position.
1771 rc = cxl_region_sort_targets(cxlr);
1775 for (i = 0; i < p->nr_targets; i++) {
1776 cxled = p->targets[i];
1777 ep_port = cxled_to_port(cxled);
1778 dport = cxl_find_dport_by_dev(root_port,
1779 ep_port->host_bridge);
1780 rc = cxl_region_attach_position(cxlr, cxlrd, cxled,
1786 rc = cxl_region_setup_targets(cxlr);
1791 * If target setup succeeds in the autodiscovery case
1792 * then the region is already committed.
1794 p->state = CXL_CONFIG_COMMIT;
1799 rc = cxl_region_validate_position(cxlr, cxled, pos);
1803 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos);
1807 p->targets[pos] = cxled;
1811 if (p->nr_targets == p->interleave_ways) {
1812 rc = cxl_region_setup_targets(cxlr);
1815 p->state = CXL_CONFIG_ACTIVE;
1818 cxled->cxld.interleave_ways = p->interleave_ways;
1819 cxled->cxld.interleave_granularity = p->interleave_granularity;
1820 cxled->cxld.hpa_range = (struct range) {
1821 .start = p->res->start,
1825 if (p->nr_targets != p->interleave_ways)
1829 * Test the auto-discovery position calculator function
1830 * against this successfully created user-defined region.
1831 * A fail message here means that this interleave config
1832 * will fail when presented as CXL_REGION_F_AUTO.
1834 for (int i = 0; i < p->nr_targets; i++) {
1835 struct cxl_endpoint_decoder *cxled = p->targets[i];
1838 test_pos = cxl_calc_interleave_pos(cxled);
1839 dev_dbg(&cxled->cxld.dev,
1840 "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
1841 (test_pos == cxled->pos) ? "success" : "fail",
1842 test_pos, cxled->pos);
1848 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
1850 struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
1851 struct cxl_region *cxlr = cxled->cxld.region;
1852 struct cxl_region_params *p;
1855 lockdep_assert_held_write(&cxl_region_rwsem);
1861 get_device(&cxlr->dev);
1863 if (p->state > CXL_CONFIG_ACTIVE) {
1865 * TODO: tear down all impacted regions if a device is
1866 * removed out of order
1868 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
1871 p->state = CXL_CONFIG_ACTIVE;
1874 for (iter = ep_port; !is_cxl_root(iter);
1875 iter = to_cxl_port(iter->dev.parent))
1876 cxl_port_detach_region(iter, cxlr, cxled);
1878 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
1879 p->targets[cxled->pos] != cxled) {
1880 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1882 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
1883 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1888 if (p->state == CXL_CONFIG_ACTIVE) {
1889 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
1890 cxl_region_teardown_targets(cxlr);
1892 p->targets[cxled->pos] = NULL;
1894 cxled->cxld.hpa_range = (struct range) {
1899 /* notify the region driver that one of its targets has departed */
1900 up_write(&cxl_region_rwsem);
1901 device_release_driver(&cxlr->dev);
1902 down_write(&cxl_region_rwsem);
1904 put_device(&cxlr->dev);
1908 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
1910 down_write(&cxl_region_rwsem);
1911 cxled->mode = CXL_DECODER_DEAD;
1912 cxl_region_detach(cxled);
1913 up_write(&cxl_region_rwsem);
1916 static int attach_target(struct cxl_region *cxlr,
1917 struct cxl_endpoint_decoder *cxled, int pos,
1922 if (state == TASK_INTERRUPTIBLE)
1923 rc = down_write_killable(&cxl_region_rwsem);
1925 down_write(&cxl_region_rwsem);
1929 down_read(&cxl_dpa_rwsem);
1930 rc = cxl_region_attach(cxlr, cxled, pos);
1931 up_read(&cxl_dpa_rwsem);
1932 up_write(&cxl_region_rwsem);
1936 static int detach_target(struct cxl_region *cxlr, int pos)
1938 struct cxl_region_params *p = &cxlr->params;
1941 rc = down_write_killable(&cxl_region_rwsem);
1945 if (pos >= p->interleave_ways) {
1946 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1947 p->interleave_ways);
1952 if (!p->targets[pos]) {
1957 rc = cxl_region_detach(p->targets[pos]);
1959 up_write(&cxl_region_rwsem);
1963 static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
1968 if (sysfs_streq(buf, "\n"))
1969 rc = detach_target(cxlr, pos);
1973 dev = bus_find_device_by_name(&cxl_bus_type, NULL, buf);
1977 if (!is_endpoint_decoder(dev)) {
1982 rc = attach_target(cxlr, to_cxl_endpoint_decoder(dev), pos,
1983 TASK_INTERRUPTIBLE);
1993 #define TARGET_ATTR_RW(n) \
1994 static ssize_t target##n##_show( \
1995 struct device *dev, struct device_attribute *attr, char *buf) \
1997 return show_targetN(to_cxl_region(dev), buf, (n)); \
1999 static ssize_t target##n##_store(struct device *dev, \
2000 struct device_attribute *attr, \
2001 const char *buf, size_t len) \
2003 return store_targetN(to_cxl_region(dev), buf, (n), len); \
2005 static DEVICE_ATTR_RW(target##n)
2024 static struct attribute *target_attrs[] = {
2025 &dev_attr_target0.attr,
2026 &dev_attr_target1.attr,
2027 &dev_attr_target2.attr,
2028 &dev_attr_target3.attr,
2029 &dev_attr_target4.attr,
2030 &dev_attr_target5.attr,
2031 &dev_attr_target6.attr,
2032 &dev_attr_target7.attr,
2033 &dev_attr_target8.attr,
2034 &dev_attr_target9.attr,
2035 &dev_attr_target10.attr,
2036 &dev_attr_target11.attr,
2037 &dev_attr_target12.attr,
2038 &dev_attr_target13.attr,
2039 &dev_attr_target14.attr,
2040 &dev_attr_target15.attr,
2044 static umode_t cxl_region_target_visible(struct kobject *kobj,
2045 struct attribute *a, int n)
2047 struct device *dev = kobj_to_dev(kobj);
2048 struct cxl_region *cxlr = to_cxl_region(dev);
2049 struct cxl_region_params *p = &cxlr->params;
2051 if (n < p->interleave_ways)
2056 static const struct attribute_group cxl_region_target_group = {
2057 .attrs = target_attrs,
2058 .is_visible = cxl_region_target_visible,
2061 static const struct attribute_group *get_cxl_region_target_group(void)
2063 return &cxl_region_target_group;
2066 static const struct attribute_group *region_groups[] = {
2067 &cxl_base_attribute_group,
2069 &cxl_region_target_group,
2073 static void cxl_region_release(struct device *dev)
2075 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
2076 struct cxl_region *cxlr = to_cxl_region(dev);
2077 int id = atomic_read(&cxlrd->region_id);
2080 * Try to reuse the recently idled id rather than the cached
2081 * next id to prevent the region id space from increasing
2085 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) {
2090 memregion_free(cxlr->id);
2092 put_device(dev->parent);
2096 const struct device_type cxl_region_type = {
2097 .name = "cxl_region",
2098 .release = cxl_region_release,
2099 .groups = region_groups
2102 bool is_cxl_region(struct device *dev)
2104 return dev->type == &cxl_region_type;
2106 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
2108 static struct cxl_region *to_cxl_region(struct device *dev)
2110 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
2111 "not a cxl_region device\n"))
2114 return container_of(dev, struct cxl_region, dev);
2117 static void unregister_region(void *_cxlr)
2119 struct cxl_region *cxlr = _cxlr;
2120 struct cxl_region_params *p = &cxlr->params;
2123 device_del(&cxlr->dev);
2126 * Now that region sysfs is shutdown, the parameter block is now
2127 * read-only, so no need to hold the region rwsem to access the
2128 * region parameters.
2130 for (i = 0; i < p->interleave_ways; i++)
2131 detach_target(cxlr, i);
2133 cxl_region_iomem_release(cxlr);
2134 put_device(&cxlr->dev);
2137 static struct lock_class_key cxl_region_key;
2139 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
2141 struct cxl_region *cxlr;
2144 cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
2147 return ERR_PTR(-ENOMEM);
2151 device_initialize(dev);
2152 lockdep_set_class(&dev->mutex, &cxl_region_key);
2153 dev->parent = &cxlrd->cxlsd.cxld.dev;
2155 * Keep root decoder pinned through cxl_region_release to fixup
2156 * region id allocations
2158 get_device(dev->parent);
2159 device_set_pm_not_required(dev);
2160 dev->bus = &cxl_bus_type;
2161 dev->type = &cxl_region_type;
2168 * devm_cxl_add_region - Adds a region to a decoder
2169 * @cxlrd: root decoder
2170 * @id: memregion id to create, or memregion_free() on failure
2171 * @mode: mode for the endpoint decoders of this region
2172 * @type: select whether this is an expander or accelerator (type-2 or type-3)
2174 * This is the second step of region initialization. Regions exist within an
2175 * address space which is mapped by a @cxlrd.
2177 * Return: 0 if the region was added to the @cxlrd, else returns negative error
2178 * code. The region will be named "regionZ" where Z is the unique region number.
2180 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
2182 enum cxl_decoder_mode mode,
2183 enum cxl_decoder_type type)
2185 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
2186 struct cxl_region *cxlr;
2191 case CXL_DECODER_RAM:
2192 case CXL_DECODER_PMEM:
2195 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
2196 return ERR_PTR(-EINVAL);
2199 cxlr = cxl_region_alloc(cxlrd, id);
2206 rc = dev_set_name(dev, "region%d", id);
2210 rc = device_add(dev);
2214 rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr);
2218 dev_dbg(port->uport_dev, "%s: created %s\n",
2219 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
2227 static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf)
2229 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
2232 static ssize_t create_pmem_region_show(struct device *dev,
2233 struct device_attribute *attr, char *buf)
2235 return __create_region_show(to_cxl_root_decoder(dev), buf);
2238 static ssize_t create_ram_region_show(struct device *dev,
2239 struct device_attribute *attr, char *buf)
2241 return __create_region_show(to_cxl_root_decoder(dev), buf);
2244 static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
2245 enum cxl_decoder_mode mode, int id)
2249 rc = memregion_alloc(GFP_KERNEL);
2253 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
2255 return ERR_PTR(-EBUSY);
2258 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
2261 static ssize_t create_pmem_region_store(struct device *dev,
2262 struct device_attribute *attr,
2263 const char *buf, size_t len)
2265 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2266 struct cxl_region *cxlr;
2269 rc = sscanf(buf, "region%d\n", &id);
2273 cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
2275 return PTR_ERR(cxlr);
2279 DEVICE_ATTR_RW(create_pmem_region);
2281 static ssize_t create_ram_region_store(struct device *dev,
2282 struct device_attribute *attr,
2283 const char *buf, size_t len)
2285 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2286 struct cxl_region *cxlr;
2289 rc = sscanf(buf, "region%d\n", &id);
2293 cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
2295 return PTR_ERR(cxlr);
2299 DEVICE_ATTR_RW(create_ram_region);
2301 static ssize_t region_show(struct device *dev, struct device_attribute *attr,
2304 struct cxl_decoder *cxld = to_cxl_decoder(dev);
2307 rc = down_read_interruptible(&cxl_region_rwsem);
2312 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
2314 rc = sysfs_emit(buf, "\n");
2315 up_read(&cxl_region_rwsem);
2319 DEVICE_ATTR_RO(region);
2321 static struct cxl_region *
2322 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
2324 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
2325 struct device *region_dev;
2327 region_dev = device_find_child_by_name(&cxld->dev, name);
2329 return ERR_PTR(-ENODEV);
2331 return to_cxl_region(region_dev);
2334 static ssize_t delete_region_store(struct device *dev,
2335 struct device_attribute *attr,
2336 const char *buf, size_t len)
2338 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2339 struct cxl_port *port = to_cxl_port(dev->parent);
2340 struct cxl_region *cxlr;
2342 cxlr = cxl_find_region_by_name(cxlrd, buf);
2344 return PTR_ERR(cxlr);
2346 devm_release_action(port->uport_dev, unregister_region, cxlr);
2347 put_device(&cxlr->dev);
2351 DEVICE_ATTR_WO(delete_region);
2353 static void cxl_pmem_region_release(struct device *dev)
2355 struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
2358 for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
2359 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
2361 put_device(&cxlmd->dev);
2367 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
2368 &cxl_base_attribute_group,
2372 const struct device_type cxl_pmem_region_type = {
2373 .name = "cxl_pmem_region",
2374 .release = cxl_pmem_region_release,
2375 .groups = cxl_pmem_region_attribute_groups,
2378 bool is_cxl_pmem_region(struct device *dev)
2380 return dev->type == &cxl_pmem_region_type;
2382 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
2384 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
2386 if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
2387 "not a cxl_pmem_region device\n"))
2389 return container_of(dev, struct cxl_pmem_region, dev);
2391 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
2393 struct cxl_poison_context {
2394 struct cxl_port *port;
2395 enum cxl_decoder_mode mode;
2399 static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
2400 struct cxl_poison_context *ctx)
2402 struct cxl_dev_state *cxlds = cxlmd->cxlds;
2407 * Collect poison for the remaining unmapped resources
2408 * after poison is collected by committed endpoints.
2410 * Knowing that PMEM must always follow RAM, get poison
2411 * for unmapped resources based on the last decoder's mode:
2412 * ram: scan remains of ram range, then any pmem range
2413 * pmem: scan remains of pmem range
2416 if (ctx->mode == CXL_DECODER_RAM) {
2417 offset = ctx->offset;
2418 length = resource_size(&cxlds->ram_res) - offset;
2419 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2425 if (ctx->mode == CXL_DECODER_PMEM) {
2426 offset = ctx->offset;
2427 length = resource_size(&cxlds->dpa_res) - offset;
2430 } else if (resource_size(&cxlds->pmem_res)) {
2431 offset = cxlds->pmem_res.start;
2432 length = resource_size(&cxlds->pmem_res);
2437 return cxl_mem_get_poison(cxlmd, offset, length, NULL);
2440 static int poison_by_decoder(struct device *dev, void *arg)
2442 struct cxl_poison_context *ctx = arg;
2443 struct cxl_endpoint_decoder *cxled;
2444 struct cxl_memdev *cxlmd;
2448 if (!is_endpoint_decoder(dev))
2451 cxled = to_cxl_endpoint_decoder(dev);
2452 if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
2456 * Regions are only created with single mode decoders: pmem or ram.
2457 * Linux does not support mixed mode decoders. This means that
2458 * reading poison per endpoint decoder adheres to the requirement
2459 * that poison reads of pmem and ram must be separated.
2460 * CXL 3.0 Spec 8.2.9.8.4.1
2462 if (cxled->mode == CXL_DECODER_MIXED) {
2463 dev_dbg(dev, "poison list read unsupported in mixed mode\n");
2467 cxlmd = cxled_to_memdev(cxled);
2469 offset = cxled->dpa_res->start - cxled->skip;
2470 length = cxled->skip;
2471 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2472 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2478 offset = cxled->dpa_res->start;
2479 length = cxled->dpa_res->end - offset + 1;
2480 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
2481 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2486 /* Iterate until commit_end is reached */
2487 if (cxled->cxld.id == ctx->port->commit_end) {
2488 ctx->offset = cxled->dpa_res->end + 1;
2489 ctx->mode = cxled->mode;
2496 int cxl_get_poison_by_endpoint(struct cxl_port *port)
2498 struct cxl_poison_context ctx;
2501 ctx = (struct cxl_poison_context) {
2505 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
2507 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
2513 static struct lock_class_key cxl_pmem_region_key;
2515 static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
2517 struct cxl_region_params *p = &cxlr->params;
2518 struct cxl_nvdimm_bridge *cxl_nvb;
2519 struct cxl_pmem_region *cxlr_pmem;
2523 down_read(&cxl_region_rwsem);
2524 if (p->state != CXL_CONFIG_COMMIT) {
2525 cxlr_pmem = ERR_PTR(-ENXIO);
2529 cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
2532 cxlr_pmem = ERR_PTR(-ENOMEM);
2536 cxlr_pmem->hpa_range.start = p->res->start;
2537 cxlr_pmem->hpa_range.end = p->res->end;
2539 /* Snapshot the region configuration underneath the cxl_region_rwsem */
2540 cxlr_pmem->nr_mappings = p->nr_targets;
2541 for (i = 0; i < p->nr_targets; i++) {
2542 struct cxl_endpoint_decoder *cxled = p->targets[i];
2543 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2544 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
2547 * Regions never span CXL root devices, so by definition the
2548 * bridge for one device is the same for all.
2551 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
2553 cxlr_pmem = ERR_PTR(-ENODEV);
2556 cxlr->cxl_nvb = cxl_nvb;
2559 get_device(&cxlmd->dev);
2560 m->start = cxled->dpa_res->start;
2561 m->size = resource_size(cxled->dpa_res);
2565 dev = &cxlr_pmem->dev;
2566 cxlr_pmem->cxlr = cxlr;
2567 cxlr->cxlr_pmem = cxlr_pmem;
2568 device_initialize(dev);
2569 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
2570 device_set_pm_not_required(dev);
2571 dev->parent = &cxlr->dev;
2572 dev->bus = &cxl_bus_type;
2573 dev->type = &cxl_pmem_region_type;
2575 up_read(&cxl_region_rwsem);
2580 static void cxl_dax_region_release(struct device *dev)
2582 struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
2587 static const struct attribute_group *cxl_dax_region_attribute_groups[] = {
2588 &cxl_base_attribute_group,
2592 const struct device_type cxl_dax_region_type = {
2593 .name = "cxl_dax_region",
2594 .release = cxl_dax_region_release,
2595 .groups = cxl_dax_region_attribute_groups,
2598 static bool is_cxl_dax_region(struct device *dev)
2600 return dev->type == &cxl_dax_region_type;
2603 struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
2605 if (dev_WARN_ONCE(dev, !is_cxl_dax_region(dev),
2606 "not a cxl_dax_region device\n"))
2608 return container_of(dev, struct cxl_dax_region, dev);
2610 EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL);
2612 static struct lock_class_key cxl_dax_region_key;
2614 static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
2616 struct cxl_region_params *p = &cxlr->params;
2617 struct cxl_dax_region *cxlr_dax;
2620 down_read(&cxl_region_rwsem);
2621 if (p->state != CXL_CONFIG_COMMIT) {
2622 cxlr_dax = ERR_PTR(-ENXIO);
2626 cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL);
2628 cxlr_dax = ERR_PTR(-ENOMEM);
2632 cxlr_dax->hpa_range.start = p->res->start;
2633 cxlr_dax->hpa_range.end = p->res->end;
2635 dev = &cxlr_dax->dev;
2636 cxlr_dax->cxlr = cxlr;
2637 device_initialize(dev);
2638 lockdep_set_class(&dev->mutex, &cxl_dax_region_key);
2639 device_set_pm_not_required(dev);
2640 dev->parent = &cxlr->dev;
2641 dev->bus = &cxl_bus_type;
2642 dev->type = &cxl_dax_region_type;
2644 up_read(&cxl_region_rwsem);
2649 static void cxlr_pmem_unregister(void *_cxlr_pmem)
2651 struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
2652 struct cxl_region *cxlr = cxlr_pmem->cxlr;
2653 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
2656 * Either the bridge is in ->remove() context under the device_lock(),
2657 * or cxlr_release_nvdimm() is cancelling the bridge's release action
2658 * for @cxlr_pmem and doing it itself (while manually holding the bridge
2661 device_lock_assert(&cxl_nvb->dev);
2662 cxlr->cxlr_pmem = NULL;
2663 cxlr_pmem->cxlr = NULL;
2664 device_unregister(&cxlr_pmem->dev);
2667 static void cxlr_release_nvdimm(void *_cxlr)
2669 struct cxl_region *cxlr = _cxlr;
2670 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
2672 device_lock(&cxl_nvb->dev);
2673 if (cxlr->cxlr_pmem)
2674 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
2676 device_unlock(&cxl_nvb->dev);
2677 cxlr->cxl_nvb = NULL;
2678 put_device(&cxl_nvb->dev);
2682 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
2683 * @cxlr: parent CXL region for this pmem region bridge device
2685 * Return: 0 on success negative error code on failure.
2687 static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
2689 struct cxl_pmem_region *cxlr_pmem;
2690 struct cxl_nvdimm_bridge *cxl_nvb;
2694 cxlr_pmem = cxl_pmem_region_alloc(cxlr);
2695 if (IS_ERR(cxlr_pmem))
2696 return PTR_ERR(cxlr_pmem);
2697 cxl_nvb = cxlr->cxl_nvb;
2699 dev = &cxlr_pmem->dev;
2700 rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
2704 rc = device_add(dev);
2708 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
2711 device_lock(&cxl_nvb->dev);
2712 if (cxl_nvb->dev.driver)
2713 rc = devm_add_action_or_reset(&cxl_nvb->dev,
2714 cxlr_pmem_unregister, cxlr_pmem);
2717 device_unlock(&cxl_nvb->dev);
2722 /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
2723 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
2728 put_device(&cxl_nvb->dev);
2729 cxlr->cxl_nvb = NULL;
2733 static void cxlr_dax_unregister(void *_cxlr_dax)
2735 struct cxl_dax_region *cxlr_dax = _cxlr_dax;
2737 device_unregister(&cxlr_dax->dev);
2740 static int devm_cxl_add_dax_region(struct cxl_region *cxlr)
2742 struct cxl_dax_region *cxlr_dax;
2746 cxlr_dax = cxl_dax_region_alloc(cxlr);
2747 if (IS_ERR(cxlr_dax))
2748 return PTR_ERR(cxlr_dax);
2750 dev = &cxlr_dax->dev;
2751 rc = dev_set_name(dev, "dax_region%d", cxlr->id);
2755 rc = device_add(dev);
2759 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
2762 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister,
2769 static int match_root_decoder_by_range(struct device *dev, void *data)
2771 struct range *r1, *r2 = data;
2772 struct cxl_root_decoder *cxlrd;
2774 if (!is_root_decoder(dev))
2777 cxlrd = to_cxl_root_decoder(dev);
2778 r1 = &cxlrd->cxlsd.cxld.hpa_range;
2779 return range_contains(r1, r2);
2782 static int match_region_by_range(struct device *dev, void *data)
2784 struct cxl_region_params *p;
2785 struct cxl_region *cxlr;
2786 struct range *r = data;
2789 if (!is_cxl_region(dev))
2792 cxlr = to_cxl_region(dev);
2795 down_read(&cxl_region_rwsem);
2796 if (p->res && p->res->start == r->start && p->res->end == r->end)
2798 up_read(&cxl_region_rwsem);
2803 /* Establish an empty region covering the given HPA range */
2804 static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
2805 struct cxl_endpoint_decoder *cxled)
2807 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2808 struct cxl_port *port = cxlrd_to_port(cxlrd);
2809 struct range *hpa = &cxled->cxld.hpa_range;
2810 struct cxl_region_params *p;
2811 struct cxl_region *cxlr;
2812 struct resource *res;
2816 cxlr = __create_region(cxlrd, cxled->mode,
2817 atomic_read(&cxlrd->region_id));
2818 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
2821 dev_err(cxlmd->dev.parent,
2822 "%s:%s: %s failed assign region: %ld\n",
2823 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2824 __func__, PTR_ERR(cxlr));
2828 down_write(&cxl_region_rwsem);
2830 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
2831 dev_err(cxlmd->dev.parent,
2832 "%s:%s: %s autodiscovery interrupted\n",
2833 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2839 set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
2841 res = kmalloc(sizeof(*res), GFP_KERNEL);
2847 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
2848 dev_name(&cxlr->dev));
2849 rc = insert_resource(cxlrd->res, res);
2852 * Platform-firmware may not have split resources like "System
2853 * RAM" on CXL window boundaries see cxl_region_iomem_release()
2855 dev_warn(cxlmd->dev.parent,
2856 "%s:%s: %s %s cannot insert resource\n",
2857 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2858 __func__, dev_name(&cxlr->dev));
2862 p->interleave_ways = cxled->cxld.interleave_ways;
2863 p->interleave_granularity = cxled->cxld.interleave_granularity;
2864 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
2866 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
2870 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
2871 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__,
2872 dev_name(&cxlr->dev), p->res, p->interleave_ways,
2873 p->interleave_granularity);
2875 /* ...to match put_device() in cxl_add_to_region() */
2876 get_device(&cxlr->dev);
2877 up_write(&cxl_region_rwsem);
2882 up_write(&cxl_region_rwsem);
2883 devm_release_action(port->uport_dev, unregister_region, cxlr);
2887 int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
2889 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2890 struct range *hpa = &cxled->cxld.hpa_range;
2891 struct cxl_decoder *cxld = &cxled->cxld;
2892 struct device *cxlrd_dev, *region_dev;
2893 struct cxl_root_decoder *cxlrd;
2894 struct cxl_region_params *p;
2895 struct cxl_region *cxlr;
2896 bool attach = false;
2899 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
2900 match_root_decoder_by_range);
2902 dev_err(cxlmd->dev.parent,
2903 "%s:%s no CXL window for range %#llx:%#llx\n",
2904 dev_name(&cxlmd->dev), dev_name(&cxld->dev),
2905 cxld->hpa_range.start, cxld->hpa_range.end);
2909 cxlrd = to_cxl_root_decoder(cxlrd_dev);
2912 * Ensure that if multiple threads race to construct_region() for @hpa
2913 * one does the construction and the others add to that.
2915 mutex_lock(&cxlrd->range_lock);
2916 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
2917 match_region_by_range);
2919 cxlr = construct_region(cxlrd, cxled);
2920 region_dev = &cxlr->dev;
2922 cxlr = to_cxl_region(region_dev);
2923 mutex_unlock(&cxlrd->range_lock);
2925 rc = PTR_ERR_OR_ZERO(cxlr);
2929 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
2931 down_read(&cxl_region_rwsem);
2933 attach = p->state == CXL_CONFIG_COMMIT;
2934 up_read(&cxl_region_rwsem);
2938 * If device_attach() fails the range may still be active via
2939 * the platform-firmware memory map, otherwise the driver for
2940 * regions is local to this file, so driver matching can't fail.
2942 if (device_attach(&cxlr->dev) < 0)
2943 dev_err(&cxlr->dev, "failed to enable, range: %pr\n",
2947 put_device(region_dev);
2949 put_device(cxlrd_dev);
2952 EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
2954 static int is_system_ram(struct resource *res, void *arg)
2956 struct cxl_region *cxlr = arg;
2957 struct cxl_region_params *p = &cxlr->params;
2959 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res);
2963 static int cxl_region_probe(struct device *dev)
2965 struct cxl_region *cxlr = to_cxl_region(dev);
2966 struct cxl_region_params *p = &cxlr->params;
2969 rc = down_read_interruptible(&cxl_region_rwsem);
2971 dev_dbg(&cxlr->dev, "probe interrupted\n");
2975 if (p->state < CXL_CONFIG_COMMIT) {
2976 dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
2981 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
2983 "failed to activate, re-commit region and retry\n");
2989 * From this point on any path that changes the region's state away from
2990 * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
2993 up_read(&cxl_region_rwsem);
2998 switch (cxlr->mode) {
2999 case CXL_DECODER_PMEM:
3000 return devm_cxl_add_pmem_region(cxlr);
3001 case CXL_DECODER_RAM:
3003 * The region can not be manged by CXL if any portion of
3004 * it is already online as 'System RAM'
3006 if (walk_iomem_res_desc(IORES_DESC_NONE,
3007 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
3008 p->res->start, p->res->end, cxlr,
3011 return devm_cxl_add_dax_region(cxlr);
3013 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
3019 static struct cxl_driver cxl_region_driver = {
3020 .name = "cxl_region",
3021 .probe = cxl_region_probe,
3022 .id = CXL_DEVICE_REGION,
3025 int cxl_region_init(void)
3027 return cxl_driver_register(&cxl_region_driver);
3030 void cxl_region_exit(void)
3032 cxl_driver_unregister(&cxl_region_driver);
3035 MODULE_IMPORT_NS(CXL);
3036 MODULE_IMPORT_NS(DEVMEM);
3037 MODULE_ALIAS_CXL(CXL_DEVICE_REGION);