cxl/hdm: Create emulated cxl_hdm for devices that do not have HDM decoders
[linux-block.git] / drivers / cxl / core / hdm.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-hi-lo.h>
4 #include <linux/seq_file.h>
5 #include <linux/device.h>
6 #include <linux/delay.h>
7
8 #include "cxlmem.h"
9 #include "core.h"
10
11 /**
12  * DOC: cxl core hdm
13  *
14  * Compute Express Link Host Managed Device Memory, starting with the
15  * CXL 2.0 specification, is managed by an array of HDM Decoder register
16  * instances per CXL port and per CXL endpoint. Define common helpers
17  * for enumerating these registers and capabilities.
18  */
19
20 DECLARE_RWSEM(cxl_dpa_rwsem);
21
22 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
23                            int *target_map)
24 {
25         int rc;
26
27         rc = cxl_decoder_add_locked(cxld, target_map);
28         if (rc) {
29                 put_device(&cxld->dev);
30                 dev_err(&port->dev, "Failed to add decoder\n");
31                 return rc;
32         }
33
34         rc = cxl_decoder_autoremove(&port->dev, cxld);
35         if (rc)
36                 return rc;
37
38         dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
39
40         return 0;
41 }
42
43 /*
44  * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
45  * single ported host-bridges need not publish a decoder capability when a
46  * passthrough decode can be assumed, i.e. all transactions that the uport sees
47  * are claimed and passed to the single dport. Disable the range until the first
48  * CXL region is enumerated / activated.
49  */
50 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
51 {
52         struct cxl_switch_decoder *cxlsd;
53         struct cxl_dport *dport = NULL;
54         int single_port_map[1];
55         unsigned long index;
56
57         cxlsd = cxl_switch_decoder_alloc(port, 1);
58         if (IS_ERR(cxlsd))
59                 return PTR_ERR(cxlsd);
60
61         device_lock_assert(&port->dev);
62
63         xa_for_each(&port->dports, index, dport)
64                 break;
65         single_port_map[0] = dport->port_id;
66
67         return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
68 }
69 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
70
71 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
72 {
73         u32 hdm_cap;
74
75         hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
76         cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
77         cxlhdm->target_count =
78                 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
79         if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
80                 cxlhdm->interleave_mask |= GENMASK(11, 8);
81         if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
82                 cxlhdm->interleave_mask |= GENMASK(14, 12);
83 }
84
85 static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
86                                 struct cxl_component_regs *regs)
87 {
88         struct cxl_register_map map = {
89                 .resource = port->component_reg_phys,
90                 .base = crb,
91                 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
92         };
93
94         cxl_probe_component_regs(&port->dev, crb, &map.component_map);
95         if (!map.component_map.hdm_decoder.valid) {
96                 dev_err(&port->dev, "HDM decoder registers invalid\n");
97                 return -ENXIO;
98         }
99
100         return cxl_map_component_regs(&port->dev, regs, &map,
101                                       BIT(CXL_CM_CAP_CAP_ID_HDM));
102 }
103
104 static struct cxl_hdm *devm_cxl_setup_emulated_hdm(struct cxl_port *port,
105                                                    struct cxl_endpoint_dvsec_info *info)
106 {
107         struct device *dev = &port->dev;
108         struct cxl_hdm *cxlhdm;
109
110         if (!info->mem_enabled)
111                 return ERR_PTR(-ENODEV);
112
113         cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
114         if (!cxlhdm)
115                 return ERR_PTR(-ENOMEM);
116
117         cxlhdm->port = port;
118         cxlhdm->decoder_count = info->ranges;
119         cxlhdm->target_count = info->ranges;
120         dev_set_drvdata(&port->dev, cxlhdm);
121
122         return cxlhdm;
123 }
124
125 /**
126  * devm_cxl_setup_hdm - map HDM decoder component registers
127  * @port: cxl_port to map
128  * @info: cached DVSEC range register info
129  */
130 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
131                                    struct cxl_endpoint_dvsec_info *info)
132 {
133         struct device *dev = &port->dev;
134         struct cxl_hdm *cxlhdm;
135         void __iomem *crb;
136         int rc;
137
138         cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
139         if (!cxlhdm)
140                 return ERR_PTR(-ENOMEM);
141
142         cxlhdm->port = port;
143         crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
144         if (!crb) {
145                 if (info->mem_enabled)
146                         return devm_cxl_setup_emulated_hdm(port, info);
147
148                 dev_err(dev, "No component registers mapped\n");
149                 return ERR_PTR(-ENXIO);
150         }
151
152         rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
153         iounmap(crb);
154         if (rc)
155                 return ERR_PTR(rc);
156
157         parse_hdm_decoder_caps(cxlhdm);
158         if (cxlhdm->decoder_count == 0) {
159                 dev_err(dev, "Spec violation. Caps invalid\n");
160                 return ERR_PTR(-ENXIO);
161         }
162
163         dev_set_drvdata(dev, cxlhdm);
164
165         return cxlhdm;
166 }
167 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
168
169 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
170 {
171         unsigned long long start = r->start, end = r->end;
172
173         seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
174                    r->name);
175 }
176
177 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
178 {
179         struct resource *p1, *p2;
180
181         down_read(&cxl_dpa_rwsem);
182         for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
183                 __cxl_dpa_debug(file, p1, 0);
184                 for (p2 = p1->child; p2; p2 = p2->sibling)
185                         __cxl_dpa_debug(file, p2, 1);
186         }
187         up_read(&cxl_dpa_rwsem);
188 }
189 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
190
191 /*
192  * Must be called in a context that synchronizes against this decoder's
193  * port ->remove() callback (like an endpoint decoder sysfs attribute)
194  */
195 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
196 {
197         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
198         struct cxl_port *port = cxled_to_port(cxled);
199         struct cxl_dev_state *cxlds = cxlmd->cxlds;
200         struct resource *res = cxled->dpa_res;
201         resource_size_t skip_start;
202
203         lockdep_assert_held_write(&cxl_dpa_rwsem);
204
205         /* save @skip_start, before @res is released */
206         skip_start = res->start - cxled->skip;
207         __release_region(&cxlds->dpa_res, res->start, resource_size(res));
208         if (cxled->skip)
209                 __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
210         cxled->skip = 0;
211         cxled->dpa_res = NULL;
212         put_device(&cxled->cxld.dev);
213         port->hdm_end--;
214 }
215
216 static void cxl_dpa_release(void *cxled)
217 {
218         down_write(&cxl_dpa_rwsem);
219         __cxl_dpa_release(cxled);
220         up_write(&cxl_dpa_rwsem);
221 }
222
223 /*
224  * Must be called from context that will not race port device
225  * unregistration, like decoder sysfs attribute methods
226  */
227 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
228 {
229         struct cxl_port *port = cxled_to_port(cxled);
230
231         lockdep_assert_held_write(&cxl_dpa_rwsem);
232         devm_remove_action(&port->dev, cxl_dpa_release, cxled);
233         __cxl_dpa_release(cxled);
234 }
235
236 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
237                              resource_size_t base, resource_size_t len,
238                              resource_size_t skipped)
239 {
240         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
241         struct cxl_port *port = cxled_to_port(cxled);
242         struct cxl_dev_state *cxlds = cxlmd->cxlds;
243         struct device *dev = &port->dev;
244         struct resource *res;
245
246         lockdep_assert_held_write(&cxl_dpa_rwsem);
247
248         if (!len)
249                 goto success;
250
251         if (cxled->dpa_res) {
252                 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
253                         port->id, cxled->cxld.id, cxled->dpa_res);
254                 return -EBUSY;
255         }
256
257         if (port->hdm_end + 1 != cxled->cxld.id) {
258                 /*
259                  * Assumes alloc and commit order is always in hardware instance
260                  * order per expectations from 8.2.5.12.20 Committing Decoder
261                  * Programming that enforce decoder[m] committed before
262                  * decoder[m+1] commit start.
263                  */
264                 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
265                         cxled->cxld.id, port->id, port->hdm_end + 1);
266                 return -EBUSY;
267         }
268
269         if (skipped) {
270                 res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
271                                        dev_name(&cxled->cxld.dev), 0);
272                 if (!res) {
273                         dev_dbg(dev,
274                                 "decoder%d.%d: failed to reserve skipped space\n",
275                                 port->id, cxled->cxld.id);
276                         return -EBUSY;
277                 }
278         }
279         res = __request_region(&cxlds->dpa_res, base, len,
280                                dev_name(&cxled->cxld.dev), 0);
281         if (!res) {
282                 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
283                         port->id, cxled->cxld.id);
284                 if (skipped)
285                         __release_region(&cxlds->dpa_res, base - skipped,
286                                          skipped);
287                 return -EBUSY;
288         }
289         cxled->dpa_res = res;
290         cxled->skip = skipped;
291
292         if (resource_contains(&cxlds->pmem_res, res))
293                 cxled->mode = CXL_DECODER_PMEM;
294         else if (resource_contains(&cxlds->ram_res, res))
295                 cxled->mode = CXL_DECODER_RAM;
296         else {
297                 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
298                         cxled->cxld.id, cxled->dpa_res);
299                 cxled->mode = CXL_DECODER_MIXED;
300         }
301
302 success:
303         port->hdm_end++;
304         get_device(&cxled->cxld.dev);
305         return 0;
306 }
307
308 static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
309                                 resource_size_t base, resource_size_t len,
310                                 resource_size_t skipped)
311 {
312         struct cxl_port *port = cxled_to_port(cxled);
313         int rc;
314
315         down_write(&cxl_dpa_rwsem);
316         rc = __cxl_dpa_reserve(cxled, base, len, skipped);
317         up_write(&cxl_dpa_rwsem);
318
319         if (rc)
320                 return rc;
321
322         return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
323 }
324
325 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
326 {
327         resource_size_t size = 0;
328
329         down_read(&cxl_dpa_rwsem);
330         if (cxled->dpa_res)
331                 size = resource_size(cxled->dpa_res);
332         up_read(&cxl_dpa_rwsem);
333
334         return size;
335 }
336
337 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
338 {
339         resource_size_t base = -1;
340
341         down_read(&cxl_dpa_rwsem);
342         if (cxled->dpa_res)
343                 base = cxled->dpa_res->start;
344         up_read(&cxl_dpa_rwsem);
345
346         return base;
347 }
348
349 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
350 {
351         struct cxl_port *port = cxled_to_port(cxled);
352         struct device *dev = &cxled->cxld.dev;
353         int rc;
354
355         down_write(&cxl_dpa_rwsem);
356         if (!cxled->dpa_res) {
357                 rc = 0;
358                 goto out;
359         }
360         if (cxled->cxld.region) {
361                 dev_dbg(dev, "decoder assigned to: %s\n",
362                         dev_name(&cxled->cxld.region->dev));
363                 rc = -EBUSY;
364                 goto out;
365         }
366         if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
367                 dev_dbg(dev, "decoder enabled\n");
368                 rc = -EBUSY;
369                 goto out;
370         }
371         if (cxled->cxld.id != port->hdm_end) {
372                 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
373                         port->hdm_end);
374                 rc = -EBUSY;
375                 goto out;
376         }
377         devm_cxl_dpa_release(cxled);
378         rc = 0;
379 out:
380         up_write(&cxl_dpa_rwsem);
381         return rc;
382 }
383
384 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
385                      enum cxl_decoder_mode mode)
386 {
387         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
388         struct cxl_dev_state *cxlds = cxlmd->cxlds;
389         struct device *dev = &cxled->cxld.dev;
390         int rc;
391
392         switch (mode) {
393         case CXL_DECODER_RAM:
394         case CXL_DECODER_PMEM:
395                 break;
396         default:
397                 dev_dbg(dev, "unsupported mode: %d\n", mode);
398                 return -EINVAL;
399         }
400
401         down_write(&cxl_dpa_rwsem);
402         if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
403                 rc = -EBUSY;
404                 goto out;
405         }
406
407         /*
408          * Only allow modes that are supported by the current partition
409          * configuration
410          */
411         if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
412                 dev_dbg(dev, "no available pmem capacity\n");
413                 rc = -ENXIO;
414                 goto out;
415         }
416         if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
417                 dev_dbg(dev, "no available ram capacity\n");
418                 rc = -ENXIO;
419                 goto out;
420         }
421
422         cxled->mode = mode;
423         rc = 0;
424 out:
425         up_write(&cxl_dpa_rwsem);
426
427         return rc;
428 }
429
430 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
431 {
432         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
433         resource_size_t free_ram_start, free_pmem_start;
434         struct cxl_port *port = cxled_to_port(cxled);
435         struct cxl_dev_state *cxlds = cxlmd->cxlds;
436         struct device *dev = &cxled->cxld.dev;
437         resource_size_t start, avail, skip;
438         struct resource *p, *last;
439         int rc;
440
441         down_write(&cxl_dpa_rwsem);
442         if (cxled->cxld.region) {
443                 dev_dbg(dev, "decoder attached to %s\n",
444                         dev_name(&cxled->cxld.region->dev));
445                 rc = -EBUSY;
446                 goto out;
447         }
448
449         if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
450                 dev_dbg(dev, "decoder enabled\n");
451                 rc = -EBUSY;
452                 goto out;
453         }
454
455         for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
456                 last = p;
457         if (last)
458                 free_ram_start = last->end + 1;
459         else
460                 free_ram_start = cxlds->ram_res.start;
461
462         for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
463                 last = p;
464         if (last)
465                 free_pmem_start = last->end + 1;
466         else
467                 free_pmem_start = cxlds->pmem_res.start;
468
469         if (cxled->mode == CXL_DECODER_RAM) {
470                 start = free_ram_start;
471                 avail = cxlds->ram_res.end - start + 1;
472                 skip = 0;
473         } else if (cxled->mode == CXL_DECODER_PMEM) {
474                 resource_size_t skip_start, skip_end;
475
476                 start = free_pmem_start;
477                 avail = cxlds->pmem_res.end - start + 1;
478                 skip_start = free_ram_start;
479
480                 /*
481                  * If some pmem is already allocated, then that allocation
482                  * already handled the skip.
483                  */
484                 if (cxlds->pmem_res.child &&
485                     skip_start == cxlds->pmem_res.child->start)
486                         skip_end = skip_start - 1;
487                 else
488                         skip_end = start - 1;
489                 skip = skip_end - skip_start + 1;
490         } else {
491                 dev_dbg(dev, "mode not set\n");
492                 rc = -EINVAL;
493                 goto out;
494         }
495
496         if (size > avail) {
497                 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
498                         cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
499                         &avail);
500                 rc = -ENOSPC;
501                 goto out;
502         }
503
504         rc = __cxl_dpa_reserve(cxled, start, size, skip);
505 out:
506         up_write(&cxl_dpa_rwsem);
507
508         if (rc)
509                 return rc;
510
511         return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
512 }
513
514 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
515 {
516         u16 eig;
517         u8 eiw;
518
519         /*
520          * Input validation ensures these warns never fire, but otherwise
521          * suppress unititalized variable usage warnings.
522          */
523         if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
524                       "invalid interleave_ways: %d\n", cxld->interleave_ways))
525                 return;
526         if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
527                       "invalid interleave_granularity: %d\n",
528                       cxld->interleave_granularity))
529                 return;
530
531         u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
532         u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
533         *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
534 }
535
536 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
537 {
538         u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
539                           CXL_HDM_DECODER0_CTRL_TYPE);
540 }
541
542 static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
543 {
544         struct cxl_dport **t = &cxlsd->target[0];
545         int ways = cxlsd->cxld.interleave_ways;
546
547         if (dev_WARN_ONCE(&cxlsd->cxld.dev,
548                           ways > 8 || ways > cxlsd->nr_targets,
549                           "ways: %d overflows targets: %d\n", ways,
550                           cxlsd->nr_targets))
551                 return -ENXIO;
552
553         *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
554         if (ways > 1)
555                 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
556         if (ways > 2)
557                 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
558         if (ways > 3)
559                 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
560         if (ways > 4)
561                 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
562         if (ways > 5)
563                 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
564         if (ways > 6)
565                 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
566         if (ways > 7)
567                 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
568
569         return 0;
570 }
571
572 /*
573  * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
574  * committed or error within 10ms, but just be generous with 20ms to account for
575  * clock skew and other marginal behavior
576  */
577 #define COMMIT_TIMEOUT_MS 20
578 static int cxld_await_commit(void __iomem *hdm, int id)
579 {
580         u32 ctrl;
581         int i;
582
583         for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
584                 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
585                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
586                         ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
587                         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
588                         return -EIO;
589                 }
590                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
591                         return 0;
592                 fsleep(1000);
593         }
594
595         return -ETIMEDOUT;
596 }
597
598 static int cxl_decoder_commit(struct cxl_decoder *cxld)
599 {
600         struct cxl_port *port = to_cxl_port(cxld->dev.parent);
601         struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
602         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
603         int id = cxld->id, rc;
604         u64 base, size;
605         u32 ctrl;
606
607         if (cxld->flags & CXL_DECODER_F_ENABLE)
608                 return 0;
609
610         if (port->commit_end + 1 != id) {
611                 dev_dbg(&port->dev,
612                         "%s: out of order commit, expected decoder%d.%d\n",
613                         dev_name(&cxld->dev), port->id, port->commit_end + 1);
614                 return -EBUSY;
615         }
616
617         down_read(&cxl_dpa_rwsem);
618         /* common decoder settings */
619         ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
620         cxld_set_interleave(cxld, &ctrl);
621         cxld_set_type(cxld, &ctrl);
622         base = cxld->hpa_range.start;
623         size = range_len(&cxld->hpa_range);
624
625         writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
626         writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
627         writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
628         writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
629
630         if (is_switch_decoder(&cxld->dev)) {
631                 struct cxl_switch_decoder *cxlsd =
632                         to_cxl_switch_decoder(&cxld->dev);
633                 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
634                 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
635                 u64 targets;
636
637                 rc = cxlsd_set_targets(cxlsd, &targets);
638                 if (rc) {
639                         dev_dbg(&port->dev, "%s: target configuration error\n",
640                                 dev_name(&cxld->dev));
641                         goto err;
642                 }
643
644                 writel(upper_32_bits(targets), tl_hi);
645                 writel(lower_32_bits(targets), tl_lo);
646         } else {
647                 struct cxl_endpoint_decoder *cxled =
648                         to_cxl_endpoint_decoder(&cxld->dev);
649                 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
650                 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
651
652                 writel(upper_32_bits(cxled->skip), sk_hi);
653                 writel(lower_32_bits(cxled->skip), sk_lo);
654         }
655
656         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
657         up_read(&cxl_dpa_rwsem);
658
659         port->commit_end++;
660         rc = cxld_await_commit(hdm, cxld->id);
661 err:
662         if (rc) {
663                 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
664                         dev_name(&cxld->dev), rc);
665                 cxld->reset(cxld);
666                 return rc;
667         }
668         cxld->flags |= CXL_DECODER_F_ENABLE;
669
670         return 0;
671 }
672
673 static int cxl_decoder_reset(struct cxl_decoder *cxld)
674 {
675         struct cxl_port *port = to_cxl_port(cxld->dev.parent);
676         struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
677         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
678         int id = cxld->id;
679         u32 ctrl;
680
681         if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
682                 return 0;
683
684         if (port->commit_end != id) {
685                 dev_dbg(&port->dev,
686                         "%s: out of order reset, expected decoder%d.%d\n",
687                         dev_name(&cxld->dev), port->id, port->commit_end);
688                 return -EBUSY;
689         }
690
691         down_read(&cxl_dpa_rwsem);
692         ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
693         ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
694         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
695
696         writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
697         writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
698         writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
699         writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
700         up_read(&cxl_dpa_rwsem);
701
702         port->commit_end--;
703         cxld->flags &= ~CXL_DECODER_F_ENABLE;
704
705         return 0;
706 }
707
708 static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
709                                             struct cxl_decoder *cxld, int which,
710                                             struct cxl_endpoint_dvsec_info *info)
711 {
712         if (!is_cxl_endpoint(port))
713                 return -EOPNOTSUPP;
714
715         if (!range_len(&info->dvsec_range[which]))
716                 return -ENOENT;
717
718         cxld->target_type = CXL_DECODER_EXPANDER;
719         cxld->commit = NULL;
720         cxld->reset = NULL;
721         cxld->hpa_range = info->dvsec_range[which];
722
723         /*
724          * Set the emulated decoder as locked pending additional support to
725          * change the range registers at run time.
726          */
727         cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
728         port->commit_end = cxld->id;
729
730         return 0;
731 }
732
733 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
734                             int *target_map, void __iomem *hdm, int which,
735                             u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
736 {
737         struct cxl_endpoint_decoder *cxled = NULL;
738         u64 size, base, skip, dpa_size;
739         bool committed;
740         u32 remainder;
741         int i, rc;
742         u32 ctrl;
743         union {
744                 u64 value;
745                 unsigned char target_id[8];
746         } target_list;
747
748         if (is_endpoint_decoder(&cxld->dev))
749                 cxled = to_cxl_endpoint_decoder(&cxld->dev);
750
751         ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
752         base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
753         size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
754         committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
755         cxld->commit = cxl_decoder_commit;
756         cxld->reset = cxl_decoder_reset;
757
758         if (!committed)
759                 size = 0;
760         if (base == U64_MAX || size == U64_MAX) {
761                 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
762                          port->id, cxld->id);
763                 return -ENXIO;
764         }
765
766         cxld->hpa_range = (struct range) {
767                 .start = base,
768                 .end = base + size - 1,
769         };
770
771         if (cxled && !committed && range_len(&info->dvsec_range[which]))
772                 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
773
774         /* decoders are enabled if committed */
775         if (committed) {
776                 cxld->flags |= CXL_DECODER_F_ENABLE;
777                 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
778                         cxld->flags |= CXL_DECODER_F_LOCK;
779                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
780                         cxld->target_type = CXL_DECODER_EXPANDER;
781                 else
782                         cxld->target_type = CXL_DECODER_ACCELERATOR;
783                 if (cxld->id != port->commit_end + 1) {
784                         dev_warn(&port->dev,
785                                  "decoder%d.%d: Committed out of order\n",
786                                  port->id, cxld->id);
787                         return -ENXIO;
788                 }
789                 port->commit_end = cxld->id;
790         } else {
791                 /* unless / until type-2 drivers arrive, assume type-3 */
792                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
793                         ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
794                         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
795                 }
796                 cxld->target_type = CXL_DECODER_EXPANDER;
797         }
798         rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
799                           &cxld->interleave_ways);
800         if (rc) {
801                 dev_warn(&port->dev,
802                          "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
803                          port->id, cxld->id, ctrl);
804                 return rc;
805         }
806         rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
807                                  &cxld->interleave_granularity);
808         if (rc)
809                 return rc;
810
811         if (!cxled) {
812                 target_list.value =
813                         ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
814                 for (i = 0; i < cxld->interleave_ways; i++)
815                         target_map[i] = target_list.target_id[i];
816
817                 return 0;
818         }
819
820         if (!committed)
821                 return 0;
822
823         dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
824         if (remainder) {
825                 dev_err(&port->dev,
826                         "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
827                         port->id, cxld->id, size, cxld->interleave_ways);
828                 return -ENXIO;
829         }
830         skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
831         rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
832         if (rc) {
833                 dev_err(&port->dev,
834                         "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
835                         port->id, cxld->id, *dpa_base,
836                         *dpa_base + dpa_size + skip - 1, rc);
837                 return rc;
838         }
839         *dpa_base += dpa_size + skip;
840         return 0;
841 }
842
843 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
844 {
845         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
846         int committed, i;
847         u32 ctrl;
848
849         if (!hdm)
850                 return;
851
852         /*
853          * Since the register resource was recently claimed via request_region()
854          * be careful about trusting the "not-committed" status until the commit
855          * timeout has elapsed.  The commit timeout is 10ms (CXL 2.0
856          * 8.2.5.12.20), but double it to be tolerant of any clock skew between
857          * host and target.
858          */
859         for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
860                 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
861                 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
862                         committed++;
863         }
864
865         /* ensure that future checks of committed can be trusted */
866         if (committed != cxlhdm->decoder_count)
867                 msleep(20);
868 }
869
870 /**
871  * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
872  * @cxlhdm: Structure to populate with HDM capabilities
873  * @info: cached DVSEC range register info
874  */
875 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
876                                 struct cxl_endpoint_dvsec_info *info)
877 {
878         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
879         struct cxl_port *port = cxlhdm->port;
880         int i;
881         u64 dpa_base = 0;
882
883         cxl_settle_decoders(cxlhdm);
884
885         for (i = 0; i < cxlhdm->decoder_count; i++) {
886                 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
887                 int rc, target_count = cxlhdm->target_count;
888                 struct cxl_decoder *cxld;
889
890                 if (is_cxl_endpoint(port)) {
891                         struct cxl_endpoint_decoder *cxled;
892
893                         cxled = cxl_endpoint_decoder_alloc(port);
894                         if (IS_ERR(cxled)) {
895                                 dev_warn(&port->dev,
896                                          "Failed to allocate the decoder\n");
897                                 return PTR_ERR(cxled);
898                         }
899                         cxld = &cxled->cxld;
900                 } else {
901                         struct cxl_switch_decoder *cxlsd;
902
903                         cxlsd = cxl_switch_decoder_alloc(port, target_count);
904                         if (IS_ERR(cxlsd)) {
905                                 dev_warn(&port->dev,
906                                          "Failed to allocate the decoder\n");
907                                 return PTR_ERR(cxlsd);
908                         }
909                         cxld = &cxlsd->cxld;
910                 }
911
912                 rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
913                                       &dpa_base, info);
914                 if (rc) {
915                         put_device(&cxld->dev);
916                         return rc;
917                 }
918                 rc = add_hdm_decoder(port, cxld, target_map);
919                 if (rc) {
920                         dev_warn(&port->dev,
921                                  "Failed to add decoder to port\n");
922                         return rc;
923                 }
924         }
925
926         return 0;
927 }
928 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);