1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-hi-lo.h>
4 #include <linux/seq_file.h>
5 #include <linux/device.h>
6 #include <linux/delay.h>
14 * Compute Express Link Host Managed Device Memory, starting with the
15 * CXL 2.0 specification, is managed by an array of HDM Decoder register
16 * instances per CXL port and per CXL endpoint. Define common helpers
17 * for enumerating these registers and capabilities.
20 DECLARE_RWSEM(cxl_dpa_rwsem);
22 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
27 rc = cxl_decoder_add_locked(cxld, target_map);
29 put_device(&cxld->dev);
30 dev_err(&port->dev, "Failed to add decoder\n");
34 rc = cxl_decoder_autoremove(&port->dev, cxld);
38 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
44 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
45 * single ported host-bridges need not publish a decoder capability when a
46 * passthrough decode can be assumed, i.e. all transactions that the uport sees
47 * are claimed and passed to the single dport. Disable the range until the first
48 * CXL region is enumerated / activated.
50 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
52 struct cxl_switch_decoder *cxlsd;
53 struct cxl_dport *dport = NULL;
54 int single_port_map[1];
57 cxlsd = cxl_switch_decoder_alloc(port, 1);
59 return PTR_ERR(cxlsd);
61 device_lock_assert(&port->dev);
63 xa_for_each(&port->dports, index, dport)
65 single_port_map[0] = dport->port_id;
67 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
69 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
71 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
75 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
76 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
77 cxlhdm->target_count =
78 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
79 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
80 cxlhdm->interleave_mask |= GENMASK(11, 8);
81 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
82 cxlhdm->interleave_mask |= GENMASK(14, 12);
85 static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
86 struct cxl_component_regs *regs)
88 struct cxl_register_map map = {
89 .resource = port->component_reg_phys,
91 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
94 cxl_probe_component_regs(&port->dev, crb, &map.component_map);
95 if (!map.component_map.hdm_decoder.valid) {
96 dev_err(&port->dev, "HDM decoder registers invalid\n");
100 return cxl_map_component_regs(&port->dev, regs, &map,
101 BIT(CXL_CM_CAP_CAP_ID_HDM));
104 static struct cxl_hdm *devm_cxl_setup_emulated_hdm(struct cxl_port *port,
105 struct cxl_endpoint_dvsec_info *info)
107 struct device *dev = &port->dev;
108 struct cxl_hdm *cxlhdm;
110 if (!info->mem_enabled)
111 return ERR_PTR(-ENODEV);
113 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
115 return ERR_PTR(-ENOMEM);
118 cxlhdm->decoder_count = info->ranges;
119 cxlhdm->target_count = info->ranges;
120 dev_set_drvdata(&port->dev, cxlhdm);
126 * devm_cxl_setup_hdm - map HDM decoder component registers
127 * @port: cxl_port to map
128 * @info: cached DVSEC range register info
130 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
131 struct cxl_endpoint_dvsec_info *info)
133 struct device *dev = &port->dev;
134 struct cxl_hdm *cxlhdm;
138 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
140 return ERR_PTR(-ENOMEM);
143 crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
145 if (info->mem_enabled)
146 return devm_cxl_setup_emulated_hdm(port, info);
148 dev_err(dev, "No component registers mapped\n");
149 return ERR_PTR(-ENXIO);
152 rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
157 parse_hdm_decoder_caps(cxlhdm);
158 if (cxlhdm->decoder_count == 0) {
159 dev_err(dev, "Spec violation. Caps invalid\n");
160 return ERR_PTR(-ENXIO);
163 dev_set_drvdata(dev, cxlhdm);
167 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
169 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
171 unsigned long long start = r->start, end = r->end;
173 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
177 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
179 struct resource *p1, *p2;
181 down_read(&cxl_dpa_rwsem);
182 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
183 __cxl_dpa_debug(file, p1, 0);
184 for (p2 = p1->child; p2; p2 = p2->sibling)
185 __cxl_dpa_debug(file, p2, 1);
187 up_read(&cxl_dpa_rwsem);
189 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
192 * Must be called in a context that synchronizes against this decoder's
193 * port ->remove() callback (like an endpoint decoder sysfs attribute)
195 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
197 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
198 struct cxl_port *port = cxled_to_port(cxled);
199 struct cxl_dev_state *cxlds = cxlmd->cxlds;
200 struct resource *res = cxled->dpa_res;
201 resource_size_t skip_start;
203 lockdep_assert_held_write(&cxl_dpa_rwsem);
205 /* save @skip_start, before @res is released */
206 skip_start = res->start - cxled->skip;
207 __release_region(&cxlds->dpa_res, res->start, resource_size(res));
209 __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
211 cxled->dpa_res = NULL;
212 put_device(&cxled->cxld.dev);
216 static void cxl_dpa_release(void *cxled)
218 down_write(&cxl_dpa_rwsem);
219 __cxl_dpa_release(cxled);
220 up_write(&cxl_dpa_rwsem);
224 * Must be called from context that will not race port device
225 * unregistration, like decoder sysfs attribute methods
227 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
229 struct cxl_port *port = cxled_to_port(cxled);
231 lockdep_assert_held_write(&cxl_dpa_rwsem);
232 devm_remove_action(&port->dev, cxl_dpa_release, cxled);
233 __cxl_dpa_release(cxled);
236 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
237 resource_size_t base, resource_size_t len,
238 resource_size_t skipped)
240 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
241 struct cxl_port *port = cxled_to_port(cxled);
242 struct cxl_dev_state *cxlds = cxlmd->cxlds;
243 struct device *dev = &port->dev;
244 struct resource *res;
246 lockdep_assert_held_write(&cxl_dpa_rwsem);
251 if (cxled->dpa_res) {
252 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
253 port->id, cxled->cxld.id, cxled->dpa_res);
257 if (port->hdm_end + 1 != cxled->cxld.id) {
259 * Assumes alloc and commit order is always in hardware instance
260 * order per expectations from 8.2.5.12.20 Committing Decoder
261 * Programming that enforce decoder[m] committed before
262 * decoder[m+1] commit start.
264 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
265 cxled->cxld.id, port->id, port->hdm_end + 1);
270 res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
271 dev_name(&cxled->cxld.dev), 0);
274 "decoder%d.%d: failed to reserve skipped space\n",
275 port->id, cxled->cxld.id);
279 res = __request_region(&cxlds->dpa_res, base, len,
280 dev_name(&cxled->cxld.dev), 0);
282 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
283 port->id, cxled->cxld.id);
285 __release_region(&cxlds->dpa_res, base - skipped,
289 cxled->dpa_res = res;
290 cxled->skip = skipped;
292 if (resource_contains(&cxlds->pmem_res, res))
293 cxled->mode = CXL_DECODER_PMEM;
294 else if (resource_contains(&cxlds->ram_res, res))
295 cxled->mode = CXL_DECODER_RAM;
297 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
298 cxled->cxld.id, cxled->dpa_res);
299 cxled->mode = CXL_DECODER_MIXED;
304 get_device(&cxled->cxld.dev);
308 static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
309 resource_size_t base, resource_size_t len,
310 resource_size_t skipped)
312 struct cxl_port *port = cxled_to_port(cxled);
315 down_write(&cxl_dpa_rwsem);
316 rc = __cxl_dpa_reserve(cxled, base, len, skipped);
317 up_write(&cxl_dpa_rwsem);
322 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
325 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
327 resource_size_t size = 0;
329 down_read(&cxl_dpa_rwsem);
331 size = resource_size(cxled->dpa_res);
332 up_read(&cxl_dpa_rwsem);
337 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
339 resource_size_t base = -1;
341 down_read(&cxl_dpa_rwsem);
343 base = cxled->dpa_res->start;
344 up_read(&cxl_dpa_rwsem);
349 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
351 struct cxl_port *port = cxled_to_port(cxled);
352 struct device *dev = &cxled->cxld.dev;
355 down_write(&cxl_dpa_rwsem);
356 if (!cxled->dpa_res) {
360 if (cxled->cxld.region) {
361 dev_dbg(dev, "decoder assigned to: %s\n",
362 dev_name(&cxled->cxld.region->dev));
366 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
367 dev_dbg(dev, "decoder enabled\n");
371 if (cxled->cxld.id != port->hdm_end) {
372 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
377 devm_cxl_dpa_release(cxled);
380 up_write(&cxl_dpa_rwsem);
384 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
385 enum cxl_decoder_mode mode)
387 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
388 struct cxl_dev_state *cxlds = cxlmd->cxlds;
389 struct device *dev = &cxled->cxld.dev;
393 case CXL_DECODER_RAM:
394 case CXL_DECODER_PMEM:
397 dev_dbg(dev, "unsupported mode: %d\n", mode);
401 down_write(&cxl_dpa_rwsem);
402 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
408 * Only allow modes that are supported by the current partition
411 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
412 dev_dbg(dev, "no available pmem capacity\n");
416 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
417 dev_dbg(dev, "no available ram capacity\n");
425 up_write(&cxl_dpa_rwsem);
430 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
432 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
433 resource_size_t free_ram_start, free_pmem_start;
434 struct cxl_port *port = cxled_to_port(cxled);
435 struct cxl_dev_state *cxlds = cxlmd->cxlds;
436 struct device *dev = &cxled->cxld.dev;
437 resource_size_t start, avail, skip;
438 struct resource *p, *last;
441 down_write(&cxl_dpa_rwsem);
442 if (cxled->cxld.region) {
443 dev_dbg(dev, "decoder attached to %s\n",
444 dev_name(&cxled->cxld.region->dev));
449 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
450 dev_dbg(dev, "decoder enabled\n");
455 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
458 free_ram_start = last->end + 1;
460 free_ram_start = cxlds->ram_res.start;
462 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
465 free_pmem_start = last->end + 1;
467 free_pmem_start = cxlds->pmem_res.start;
469 if (cxled->mode == CXL_DECODER_RAM) {
470 start = free_ram_start;
471 avail = cxlds->ram_res.end - start + 1;
473 } else if (cxled->mode == CXL_DECODER_PMEM) {
474 resource_size_t skip_start, skip_end;
476 start = free_pmem_start;
477 avail = cxlds->pmem_res.end - start + 1;
478 skip_start = free_ram_start;
481 * If some pmem is already allocated, then that allocation
482 * already handled the skip.
484 if (cxlds->pmem_res.child &&
485 skip_start == cxlds->pmem_res.child->start)
486 skip_end = skip_start - 1;
488 skip_end = start - 1;
489 skip = skip_end - skip_start + 1;
491 dev_dbg(dev, "mode not set\n");
497 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
498 cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
504 rc = __cxl_dpa_reserve(cxled, start, size, skip);
506 up_write(&cxl_dpa_rwsem);
511 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
514 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
520 * Input validation ensures these warns never fire, but otherwise
521 * suppress unititalized variable usage warnings.
523 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
524 "invalid interleave_ways: %d\n", cxld->interleave_ways))
526 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
527 "invalid interleave_granularity: %d\n",
528 cxld->interleave_granularity))
531 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
532 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
533 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
536 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
538 u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
539 CXL_HDM_DECODER0_CTRL_TYPE);
542 static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
544 struct cxl_dport **t = &cxlsd->target[0];
545 int ways = cxlsd->cxld.interleave_ways;
547 if (dev_WARN_ONCE(&cxlsd->cxld.dev,
548 ways > 8 || ways > cxlsd->nr_targets,
549 "ways: %d overflows targets: %d\n", ways,
553 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
555 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
557 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
559 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
561 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
563 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
565 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
567 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
573 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
574 * committed or error within 10ms, but just be generous with 20ms to account for
575 * clock skew and other marginal behavior
577 #define COMMIT_TIMEOUT_MS 20
578 static int cxld_await_commit(void __iomem *hdm, int id)
583 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
584 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
585 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
586 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
587 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
590 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
598 static int cxl_decoder_commit(struct cxl_decoder *cxld)
600 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
601 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
602 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
603 int id = cxld->id, rc;
607 if (cxld->flags & CXL_DECODER_F_ENABLE)
610 if (port->commit_end + 1 != id) {
612 "%s: out of order commit, expected decoder%d.%d\n",
613 dev_name(&cxld->dev), port->id, port->commit_end + 1);
617 down_read(&cxl_dpa_rwsem);
618 /* common decoder settings */
619 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
620 cxld_set_interleave(cxld, &ctrl);
621 cxld_set_type(cxld, &ctrl);
622 base = cxld->hpa_range.start;
623 size = range_len(&cxld->hpa_range);
625 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
626 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
627 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
628 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
630 if (is_switch_decoder(&cxld->dev)) {
631 struct cxl_switch_decoder *cxlsd =
632 to_cxl_switch_decoder(&cxld->dev);
633 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
634 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
637 rc = cxlsd_set_targets(cxlsd, &targets);
639 dev_dbg(&port->dev, "%s: target configuration error\n",
640 dev_name(&cxld->dev));
644 writel(upper_32_bits(targets), tl_hi);
645 writel(lower_32_bits(targets), tl_lo);
647 struct cxl_endpoint_decoder *cxled =
648 to_cxl_endpoint_decoder(&cxld->dev);
649 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
650 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
652 writel(upper_32_bits(cxled->skip), sk_hi);
653 writel(lower_32_bits(cxled->skip), sk_lo);
656 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
657 up_read(&cxl_dpa_rwsem);
660 rc = cxld_await_commit(hdm, cxld->id);
663 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
664 dev_name(&cxld->dev), rc);
668 cxld->flags |= CXL_DECODER_F_ENABLE;
673 static int cxl_decoder_reset(struct cxl_decoder *cxld)
675 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
676 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
677 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
681 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
684 if (port->commit_end != id) {
686 "%s: out of order reset, expected decoder%d.%d\n",
687 dev_name(&cxld->dev), port->id, port->commit_end);
691 down_read(&cxl_dpa_rwsem);
692 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
693 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
694 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
696 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
697 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
698 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
699 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
700 up_read(&cxl_dpa_rwsem);
703 cxld->flags &= ~CXL_DECODER_F_ENABLE;
708 static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
709 struct cxl_decoder *cxld, int which,
710 struct cxl_endpoint_dvsec_info *info)
712 if (!is_cxl_endpoint(port))
715 if (!range_len(&info->dvsec_range[which]))
718 cxld->target_type = CXL_DECODER_EXPANDER;
721 cxld->hpa_range = info->dvsec_range[which];
724 * Set the emulated decoder as locked pending additional support to
725 * change the range registers at run time.
727 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
728 port->commit_end = cxld->id;
733 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
734 int *target_map, void __iomem *hdm, int which,
735 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
737 struct cxl_endpoint_decoder *cxled = NULL;
738 u64 size, base, skip, dpa_size;
745 unsigned char target_id[8];
748 if (is_endpoint_decoder(&cxld->dev))
749 cxled = to_cxl_endpoint_decoder(&cxld->dev);
751 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
752 base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
753 size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
754 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
755 cxld->commit = cxl_decoder_commit;
756 cxld->reset = cxl_decoder_reset;
760 if (base == U64_MAX || size == U64_MAX) {
761 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
766 cxld->hpa_range = (struct range) {
768 .end = base + size - 1,
771 if (cxled && !committed && range_len(&info->dvsec_range[which]))
772 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
774 /* decoders are enabled if committed */
776 cxld->flags |= CXL_DECODER_F_ENABLE;
777 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
778 cxld->flags |= CXL_DECODER_F_LOCK;
779 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
780 cxld->target_type = CXL_DECODER_EXPANDER;
782 cxld->target_type = CXL_DECODER_ACCELERATOR;
783 if (cxld->id != port->commit_end + 1) {
785 "decoder%d.%d: Committed out of order\n",
789 port->commit_end = cxld->id;
791 /* unless / until type-2 drivers arrive, assume type-3 */
792 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
793 ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
794 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
796 cxld->target_type = CXL_DECODER_EXPANDER;
798 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
799 &cxld->interleave_ways);
802 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
803 port->id, cxld->id, ctrl);
806 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
807 &cxld->interleave_granularity);
813 ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
814 for (i = 0; i < cxld->interleave_ways; i++)
815 target_map[i] = target_list.target_id[i];
823 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
826 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
827 port->id, cxld->id, size, cxld->interleave_ways);
830 skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
831 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
834 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
835 port->id, cxld->id, *dpa_base,
836 *dpa_base + dpa_size + skip - 1, rc);
839 *dpa_base += dpa_size + skip;
843 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
845 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
853 * Since the register resource was recently claimed via request_region()
854 * be careful about trusting the "not-committed" status until the commit
855 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0
856 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
859 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
860 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
861 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
865 /* ensure that future checks of committed can be trusted */
866 if (committed != cxlhdm->decoder_count)
871 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
872 * @cxlhdm: Structure to populate with HDM capabilities
873 * @info: cached DVSEC range register info
875 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
876 struct cxl_endpoint_dvsec_info *info)
878 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
879 struct cxl_port *port = cxlhdm->port;
883 cxl_settle_decoders(cxlhdm);
885 for (i = 0; i < cxlhdm->decoder_count; i++) {
886 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
887 int rc, target_count = cxlhdm->target_count;
888 struct cxl_decoder *cxld;
890 if (is_cxl_endpoint(port)) {
891 struct cxl_endpoint_decoder *cxled;
893 cxled = cxl_endpoint_decoder_alloc(port);
896 "Failed to allocate the decoder\n");
897 return PTR_ERR(cxled);
901 struct cxl_switch_decoder *cxlsd;
903 cxlsd = cxl_switch_decoder_alloc(port, target_count);
906 "Failed to allocate the decoder\n");
907 return PTR_ERR(cxlsd);
912 rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
915 put_device(&cxld->dev);
918 rc = add_hdm_decoder(port, cxld, target_map);
921 "Failed to add decoder to port\n");
928 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);