1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/seq_file.h>
4 #include <linux/device.h>
5 #include <linux/delay.h>
13 * Compute Express Link Host Managed Device Memory, starting with the
14 * CXL 2.0 specification, is managed by an array of HDM Decoder register
15 * instances per CXL port and per CXL endpoint. Define common helpers
16 * for enumerating these registers and capabilities.
19 DECLARE_RWSEM(cxl_dpa_rwsem);
21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
26 rc = cxl_decoder_add_locked(cxld, target_map);
28 put_device(&cxld->dev);
29 dev_err(&port->dev, "Failed to add decoder\n");
33 rc = cxl_decoder_autoremove(&port->dev, cxld);
37 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
43 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
44 * single ported host-bridges need not publish a decoder capability when a
45 * passthrough decode can be assumed, i.e. all transactions that the uport sees
46 * are claimed and passed to the single dport. Disable the range until the first
47 * CXL region is enumerated / activated.
49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
51 struct cxl_switch_decoder *cxlsd;
52 struct cxl_dport *dport = NULL;
53 int single_port_map[1];
56 cxlsd = cxl_switch_decoder_alloc(port, 1);
58 return PTR_ERR(cxlsd);
60 device_lock_assert(&port->dev);
62 xa_for_each(&port->dports, index, dport)
64 single_port_map[0] = dport->port_id;
66 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
68 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
70 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
74 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
75 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
76 cxlhdm->target_count =
77 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
78 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
79 cxlhdm->interleave_mask |= GENMASK(11, 8);
80 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
81 cxlhdm->interleave_mask |= GENMASK(14, 12);
84 static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
85 struct cxl_component_regs *regs)
87 struct cxl_register_map map = {
88 .resource = port->component_reg_phys,
90 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
93 cxl_probe_component_regs(&port->dev, crb, &map.component_map);
94 if (!map.component_map.hdm_decoder.valid) {
95 dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
96 /* unique error code to indicate no HDM decoder capability */
100 return cxl_map_component_regs(&port->dev, regs, &map,
101 BIT(CXL_CM_CAP_CAP_ID_HDM));
104 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
106 struct cxl_hdm *cxlhdm;
114 cxlhdm = dev_get_drvdata(&info->port->dev);
115 hdm = cxlhdm->regs.hdm_decoder;
121 * If HDM decoders are present and the driver is in control of
122 * Mem_Enable skip DVSEC based emulation
124 if (!info->mem_enabled)
128 * If any decoders are committed already, there should not be any
129 * emulated DVSEC decoders.
131 for (i = 0; i < cxlhdm->decoder_count; i++) {
132 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
133 dev_dbg(&info->port->dev,
134 "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n",
136 FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl),
137 readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)),
138 readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)),
139 readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)),
140 readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i)));
141 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
149 * devm_cxl_setup_hdm - map HDM decoder component registers
150 * @port: cxl_port to map
151 * @info: cached DVSEC range register info
153 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
154 struct cxl_endpoint_dvsec_info *info)
156 struct device *dev = &port->dev;
157 struct cxl_hdm *cxlhdm;
161 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
163 return ERR_PTR(-ENOMEM);
165 dev_set_drvdata(dev, cxlhdm);
167 crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
168 if (!crb && info && info->mem_enabled) {
169 cxlhdm->decoder_count = info->ranges;
172 dev_err(dev, "No component registers mapped\n");
173 return ERR_PTR(-ENXIO);
176 rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
181 parse_hdm_decoder_caps(cxlhdm);
182 if (cxlhdm->decoder_count == 0) {
183 dev_err(dev, "Spec violation. Caps invalid\n");
184 return ERR_PTR(-ENXIO);
188 * Now that the hdm capability is parsed, decide if range
189 * register emulation is needed and fixup cxlhdm accordingly.
191 if (should_emulate_decoders(info)) {
192 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
193 info->ranges > 1 ? "s" : "");
194 cxlhdm->decoder_count = info->ranges;
199 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
201 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
203 unsigned long long start = r->start, end = r->end;
205 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
209 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
211 struct resource *p1, *p2;
213 down_read(&cxl_dpa_rwsem);
214 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
215 __cxl_dpa_debug(file, p1, 0);
216 for (p2 = p1->child; p2; p2 = p2->sibling)
217 __cxl_dpa_debug(file, p2, 1);
219 up_read(&cxl_dpa_rwsem);
221 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
224 * Must be called in a context that synchronizes against this decoder's
225 * port ->remove() callback (like an endpoint decoder sysfs attribute)
227 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
229 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
230 struct cxl_port *port = cxled_to_port(cxled);
231 struct cxl_dev_state *cxlds = cxlmd->cxlds;
232 struct resource *res = cxled->dpa_res;
233 resource_size_t skip_start;
235 lockdep_assert_held_write(&cxl_dpa_rwsem);
237 /* save @skip_start, before @res is released */
238 skip_start = res->start - cxled->skip;
239 __release_region(&cxlds->dpa_res, res->start, resource_size(res));
241 __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
243 cxled->dpa_res = NULL;
244 put_device(&cxled->cxld.dev);
248 static void cxl_dpa_release(void *cxled)
250 down_write(&cxl_dpa_rwsem);
251 __cxl_dpa_release(cxled);
252 up_write(&cxl_dpa_rwsem);
256 * Must be called from context that will not race port device
257 * unregistration, like decoder sysfs attribute methods
259 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
261 struct cxl_port *port = cxled_to_port(cxled);
263 lockdep_assert_held_write(&cxl_dpa_rwsem);
264 devm_remove_action(&port->dev, cxl_dpa_release, cxled);
265 __cxl_dpa_release(cxled);
268 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
269 resource_size_t base, resource_size_t len,
270 resource_size_t skipped)
272 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
273 struct cxl_port *port = cxled_to_port(cxled);
274 struct cxl_dev_state *cxlds = cxlmd->cxlds;
275 struct device *dev = &port->dev;
276 struct resource *res;
278 lockdep_assert_held_write(&cxl_dpa_rwsem);
281 dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
282 port->id, cxled->cxld.id);
286 if (cxled->dpa_res) {
287 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
288 port->id, cxled->cxld.id, cxled->dpa_res);
292 if (port->hdm_end + 1 != cxled->cxld.id) {
294 * Assumes alloc and commit order is always in hardware instance
295 * order per expectations from 8.2.5.12.20 Committing Decoder
296 * Programming that enforce decoder[m] committed before
297 * decoder[m+1] commit start.
299 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
300 cxled->cxld.id, port->id, port->hdm_end + 1);
305 res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
306 dev_name(&cxled->cxld.dev), 0);
309 "decoder%d.%d: failed to reserve skipped space\n",
310 port->id, cxled->cxld.id);
314 res = __request_region(&cxlds->dpa_res, base, len,
315 dev_name(&cxled->cxld.dev), 0);
317 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
318 port->id, cxled->cxld.id);
320 __release_region(&cxlds->dpa_res, base - skipped,
324 cxled->dpa_res = res;
325 cxled->skip = skipped;
327 if (resource_contains(&cxlds->pmem_res, res))
328 cxled->mode = CXL_DECODER_PMEM;
329 else if (resource_contains(&cxlds->ram_res, res))
330 cxled->mode = CXL_DECODER_RAM;
332 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
333 cxled->cxld.id, cxled->dpa_res);
334 cxled->mode = CXL_DECODER_MIXED;
338 get_device(&cxled->cxld.dev);
342 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
343 resource_size_t base, resource_size_t len,
344 resource_size_t skipped)
346 struct cxl_port *port = cxled_to_port(cxled);
349 down_write(&cxl_dpa_rwsem);
350 rc = __cxl_dpa_reserve(cxled, base, len, skipped);
351 up_write(&cxl_dpa_rwsem);
356 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
358 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
360 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
362 resource_size_t size = 0;
364 down_read(&cxl_dpa_rwsem);
366 size = resource_size(cxled->dpa_res);
367 up_read(&cxl_dpa_rwsem);
372 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
374 resource_size_t base = -1;
376 down_read(&cxl_dpa_rwsem);
378 base = cxled->dpa_res->start;
379 up_read(&cxl_dpa_rwsem);
384 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
386 struct cxl_port *port = cxled_to_port(cxled);
387 struct device *dev = &cxled->cxld.dev;
390 down_write(&cxl_dpa_rwsem);
391 if (!cxled->dpa_res) {
395 if (cxled->cxld.region) {
396 dev_dbg(dev, "decoder assigned to: %s\n",
397 dev_name(&cxled->cxld.region->dev));
401 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
402 dev_dbg(dev, "decoder enabled\n");
406 if (cxled->cxld.id != port->hdm_end) {
407 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
412 devm_cxl_dpa_release(cxled);
415 up_write(&cxl_dpa_rwsem);
419 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
420 enum cxl_decoder_mode mode)
422 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
423 struct cxl_dev_state *cxlds = cxlmd->cxlds;
424 struct device *dev = &cxled->cxld.dev;
428 case CXL_DECODER_RAM:
429 case CXL_DECODER_PMEM:
432 dev_dbg(dev, "unsupported mode: %d\n", mode);
436 down_write(&cxl_dpa_rwsem);
437 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
443 * Only allow modes that are supported by the current partition
446 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
447 dev_dbg(dev, "no available pmem capacity\n");
451 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
452 dev_dbg(dev, "no available ram capacity\n");
460 up_write(&cxl_dpa_rwsem);
465 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
467 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
468 resource_size_t free_ram_start, free_pmem_start;
469 struct cxl_port *port = cxled_to_port(cxled);
470 struct cxl_dev_state *cxlds = cxlmd->cxlds;
471 struct device *dev = &cxled->cxld.dev;
472 resource_size_t start, avail, skip;
473 struct resource *p, *last;
476 down_write(&cxl_dpa_rwsem);
477 if (cxled->cxld.region) {
478 dev_dbg(dev, "decoder attached to %s\n",
479 dev_name(&cxled->cxld.region->dev));
484 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
485 dev_dbg(dev, "decoder enabled\n");
490 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
493 free_ram_start = last->end + 1;
495 free_ram_start = cxlds->ram_res.start;
497 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
500 free_pmem_start = last->end + 1;
502 free_pmem_start = cxlds->pmem_res.start;
504 if (cxled->mode == CXL_DECODER_RAM) {
505 start = free_ram_start;
506 avail = cxlds->ram_res.end - start + 1;
508 } else if (cxled->mode == CXL_DECODER_PMEM) {
509 resource_size_t skip_start, skip_end;
511 start = free_pmem_start;
512 avail = cxlds->pmem_res.end - start + 1;
513 skip_start = free_ram_start;
516 * If some pmem is already allocated, then that allocation
517 * already handled the skip.
519 if (cxlds->pmem_res.child &&
520 skip_start == cxlds->pmem_res.child->start)
521 skip_end = skip_start - 1;
523 skip_end = start - 1;
524 skip = skip_end - skip_start + 1;
526 dev_dbg(dev, "mode not set\n");
532 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
533 cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
539 rc = __cxl_dpa_reserve(cxled, start, size, skip);
541 up_write(&cxl_dpa_rwsem);
546 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
549 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
555 * Input validation ensures these warns never fire, but otherwise
556 * suppress unititalized variable usage warnings.
558 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
559 "invalid interleave_ways: %d\n", cxld->interleave_ways))
561 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
562 "invalid interleave_granularity: %d\n",
563 cxld->interleave_granularity))
566 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
567 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
568 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
571 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
573 u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
574 CXL_HDM_DECODER0_CTRL_TYPE);
577 static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
579 struct cxl_dport **t = &cxlsd->target[0];
580 int ways = cxlsd->cxld.interleave_ways;
582 if (dev_WARN_ONCE(&cxlsd->cxld.dev,
583 ways > 8 || ways > cxlsd->nr_targets,
584 "ways: %d overflows targets: %d\n", ways,
588 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
590 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
592 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
594 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
596 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
598 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
600 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
602 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
608 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
609 * committed or error within 10ms, but just be generous with 20ms to account for
610 * clock skew and other marginal behavior
612 #define COMMIT_TIMEOUT_MS 20
613 static int cxld_await_commit(void __iomem *hdm, int id)
618 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
619 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
620 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
621 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
622 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
625 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
633 static int cxl_decoder_commit(struct cxl_decoder *cxld)
635 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
636 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
637 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
638 int id = cxld->id, rc;
642 if (cxld->flags & CXL_DECODER_F_ENABLE)
645 if (port->commit_end + 1 != id) {
647 "%s: out of order commit, expected decoder%d.%d\n",
648 dev_name(&cxld->dev), port->id, port->commit_end + 1);
652 down_read(&cxl_dpa_rwsem);
653 /* common decoder settings */
654 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
655 cxld_set_interleave(cxld, &ctrl);
656 cxld_set_type(cxld, &ctrl);
657 base = cxld->hpa_range.start;
658 size = range_len(&cxld->hpa_range);
660 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
661 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
662 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
663 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
665 if (is_switch_decoder(&cxld->dev)) {
666 struct cxl_switch_decoder *cxlsd =
667 to_cxl_switch_decoder(&cxld->dev);
668 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
669 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
672 rc = cxlsd_set_targets(cxlsd, &targets);
674 dev_dbg(&port->dev, "%s: target configuration error\n",
675 dev_name(&cxld->dev));
679 writel(upper_32_bits(targets), tl_hi);
680 writel(lower_32_bits(targets), tl_lo);
682 struct cxl_endpoint_decoder *cxled =
683 to_cxl_endpoint_decoder(&cxld->dev);
684 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
685 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
687 writel(upper_32_bits(cxled->skip), sk_hi);
688 writel(lower_32_bits(cxled->skip), sk_lo);
691 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
692 up_read(&cxl_dpa_rwsem);
695 rc = cxld_await_commit(hdm, cxld->id);
698 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
699 dev_name(&cxld->dev), rc);
703 cxld->flags |= CXL_DECODER_F_ENABLE;
708 static int cxl_decoder_reset(struct cxl_decoder *cxld)
710 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
711 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
712 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
716 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
719 if (port->commit_end != id) {
721 "%s: out of order reset, expected decoder%d.%d\n",
722 dev_name(&cxld->dev), port->id, port->commit_end);
726 down_read(&cxl_dpa_rwsem);
727 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
728 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
729 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
731 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
732 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
733 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
734 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
735 up_read(&cxl_dpa_rwsem);
738 cxld->flags &= ~CXL_DECODER_F_ENABLE;
740 /* Userspace is now responsible for reconfiguring this decoder */
741 if (is_endpoint_decoder(&cxld->dev)) {
742 struct cxl_endpoint_decoder *cxled;
744 cxled = to_cxl_endpoint_decoder(&cxld->dev);
745 cxled->state = CXL_DECODER_STATE_MANUAL;
751 static int cxl_setup_hdm_decoder_from_dvsec(
752 struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
753 int which, struct cxl_endpoint_dvsec_info *info)
755 struct cxl_endpoint_decoder *cxled;
759 if (!is_cxl_endpoint(port))
762 cxled = to_cxl_endpoint_decoder(&cxld->dev);
763 len = range_len(&info->dvsec_range[which]);
767 cxld->target_type = CXL_DECODER_EXPANDER;
770 cxld->hpa_range = info->dvsec_range[which];
773 * Set the emulated decoder as locked pending additional support to
774 * change the range registers at run time.
776 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
777 port->commit_end = cxld->id;
779 rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
782 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
783 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
787 cxled->state = CXL_DECODER_STATE_AUTO;
792 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
793 int *target_map, void __iomem *hdm, int which,
794 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
796 u64 size, base, skip, dpa_size, lo, hi;
797 struct cxl_endpoint_decoder *cxled;
804 unsigned char target_id[8];
807 if (should_emulate_decoders(info))
808 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
811 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
812 lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
813 hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
814 base = (hi << 32) + lo;
815 lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
816 hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
817 size = (hi << 32) + lo;
818 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
819 cxld->commit = cxl_decoder_commit;
820 cxld->reset = cxl_decoder_reset;
824 if (base == U64_MAX || size == U64_MAX) {
825 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
830 cxld->hpa_range = (struct range) {
832 .end = base + size - 1,
835 /* decoders are enabled if committed */
837 cxld->flags |= CXL_DECODER_F_ENABLE;
838 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
839 cxld->flags |= CXL_DECODER_F_LOCK;
840 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
841 cxld->target_type = CXL_DECODER_EXPANDER;
843 cxld->target_type = CXL_DECODER_ACCELERATOR;
844 if (cxld->id != port->commit_end + 1) {
846 "decoder%d.%d: Committed out of order\n",
853 "decoder%d.%d: Committed with zero size\n",
857 port->commit_end = cxld->id;
859 /* unless / until type-2 drivers arrive, assume type-3 */
860 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
861 ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
862 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
864 cxld->target_type = CXL_DECODER_EXPANDER;
866 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
867 &cxld->interleave_ways);
870 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
871 port->id, cxld->id, ctrl);
874 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
875 &cxld->interleave_granularity);
879 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
880 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
881 cxld->interleave_ways, cxld->interleave_granularity);
884 lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
885 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
886 target_list.value = (hi << 32) + lo;
887 for (i = 0; i < cxld->interleave_ways; i++)
888 target_map[i] = target_list.target_id[i];
896 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
899 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
900 port->id, cxld->id, size, cxld->interleave_ways);
903 lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
904 hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
905 skip = (hi << 32) + lo;
906 cxled = to_cxl_endpoint_decoder(&cxld->dev);
907 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
910 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
911 port->id, cxld->id, *dpa_base,
912 *dpa_base + dpa_size + skip - 1, rc);
915 *dpa_base += dpa_size + skip;
917 cxled->state = CXL_DECODER_STATE_AUTO;
922 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
924 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
932 * Since the register resource was recently claimed via request_region()
933 * be careful about trusting the "not-committed" status until the commit
934 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0
935 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
938 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
939 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
940 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
944 /* ensure that future checks of committed can be trusted */
945 if (committed != cxlhdm->decoder_count)
950 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
951 * @cxlhdm: Structure to populate with HDM capabilities
952 * @info: cached DVSEC range register info
954 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
955 struct cxl_endpoint_dvsec_info *info)
957 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
958 struct cxl_port *port = cxlhdm->port;
962 cxl_settle_decoders(cxlhdm);
964 for (i = 0; i < cxlhdm->decoder_count; i++) {
965 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
966 int rc, target_count = cxlhdm->target_count;
967 struct cxl_decoder *cxld;
969 if (is_cxl_endpoint(port)) {
970 struct cxl_endpoint_decoder *cxled;
972 cxled = cxl_endpoint_decoder_alloc(port);
975 "Failed to allocate decoder%d.%d\n",
977 return PTR_ERR(cxled);
981 struct cxl_switch_decoder *cxlsd;
983 cxlsd = cxl_switch_decoder_alloc(port, target_count);
986 "Failed to allocate decoder%d.%d\n",
988 return PTR_ERR(cxlsd);
993 rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
997 "Failed to initialize decoder%d.%d\n",
999 put_device(&cxld->dev);
1002 rc = add_hdm_decoder(port, cxld, target_map);
1004 dev_warn(&port->dev,
1005 "Failed to add decoder%d.%d\n", port->id, i);
1012 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);