Merge tag 'sound-4.17-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux-block.git] / drivers / pci / of.c
CommitLineData
736759ef 1// SPDX-License-Identifier: GPL-2.0+
98d9f30c
BH
2/*
3 * PCI <-> OF mapping helpers
4 *
5 * Copyright 2011 IBM Corp.
98d9f30c 6 */
4670d610 7#define pr_fmt(fmt) "PCI: OF: " fmt
98d9f30c 8
b165e2b6 9#include <linux/irqdomain.h>
98d9f30c
BH
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <linux/of.h>
c8d17588 13#include <linux/of_irq.h>
4670d610 14#include <linux/of_address.h>
98d9f30c
BH
15#include <linux/of_pci.h>
16#include "pci.h"
17
18void pci_set_of_node(struct pci_dev *dev)
19{
20 if (!dev->bus->dev.of_node)
21 return;
22 dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
23 dev->devfn);
24}
25
26void pci_release_of_node(struct pci_dev *dev)
27{
28 of_node_put(dev->dev.of_node);
29 dev->dev.of_node = NULL;
30}
31
32void pci_set_bus_of_node(struct pci_bus *bus)
33{
34 if (bus->self == NULL)
35 bus->dev.of_node = pcibios_get_phb_of_node(bus);
36 else
37 bus->dev.of_node = of_node_get(bus->self->dev.of_node);
38}
39
40void pci_release_bus_of_node(struct pci_bus *bus)
41{
42 of_node_put(bus->dev.of_node);
43 bus->dev.of_node = NULL;
44}
45
46struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
47{
48 /* This should only be called for PHBs */
49 if (WARN_ON(bus->self || bus->parent))
50 return NULL;
51
4670d610
RH
52 /*
53 * Look for a node pointer in either the intermediary device we
54 * create above the root bus or its own parent. Normally only
98d9f30c
BH
55 * the later is populated.
56 */
57 if (bus->bridge->of_node)
58 return of_node_get(bus->bridge->of_node);
69566dd8 59 if (bus->bridge->parent && bus->bridge->parent->of_node)
98d9f30c
BH
60 return of_node_get(bus->bridge->parent->of_node);
61 return NULL;
62}
b165e2b6
MZ
63
64struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
65{
66#ifdef CONFIG_IRQ_DOMAIN
b165e2b6
MZ
67 struct irq_domain *d;
68
69 if (!bus->dev.of_node)
70 return NULL;
71
72 /* Start looking for a phandle to an MSI controller. */
c8d17588
MZ
73 d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
74 if (d)
75 return d;
471c931c
MZ
76
77 /*
78 * If we don't have an msi-parent property, look for a domain
79 * directly attached to the host bridge.
80 */
c8d17588 81 d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
b165e2b6
MZ
82 if (d)
83 return d;
84
c8d17588 85 return irq_find_host(bus->dev.of_node);
b165e2b6
MZ
86#else
87 return NULL;
88#endif
89}
4670d610 90
4670d610
RH
91static inline int __of_pci_pci_compare(struct device_node *node,
92 unsigned int data)
93{
94 int devfn;
95
96 devfn = of_pci_get_devfn(node);
97 if (devfn < 0)
98 return 0;
99
100 return devfn == data;
101}
102
103struct device_node *of_pci_find_child_device(struct device_node *parent,
104 unsigned int devfn)
105{
106 struct device_node *node, *node2;
107
108 for_each_child_of_node(parent, node) {
109 if (__of_pci_pci_compare(node, devfn))
110 return node;
111 /*
112 * Some OFs create a parent node "multifunc-device" as
113 * a fake root for all functions of a multi-function
114 * device we go down them as well.
115 */
116 if (!strcmp(node->name, "multifunc-device")) {
117 for_each_child_of_node(node, node2) {
118 if (__of_pci_pci_compare(node2, devfn)) {
119 of_node_put(node);
120 return node2;
121 }
122 }
123 }
124 }
125 return NULL;
126}
127EXPORT_SYMBOL_GPL(of_pci_find_child_device);
128
129/**
130 * of_pci_get_devfn() - Get device and function numbers for a device node
131 * @np: device node
132 *
133 * Parses a standard 5-cell PCI resource and returns an 8-bit value that can
134 * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device
135 * and function numbers respectively. On error a negative error code is
136 * returned.
137 */
138int of_pci_get_devfn(struct device_node *np)
139{
140 u32 reg[5];
141 int error;
142
143 error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
144 if (error)
145 return error;
146
147 return (reg[0] >> 8) & 0xff;
148}
149EXPORT_SYMBOL_GPL(of_pci_get_devfn);
150
151/**
152 * of_pci_parse_bus_range() - parse the bus-range property of a PCI device
153 * @node: device node
154 * @res: address to a struct resource to return the bus-range
155 *
156 * Returns 0 on success or a negative error-code on failure.
157 */
158int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
159{
160 u32 bus_range[2];
161 int error;
162
163 error = of_property_read_u32_array(node, "bus-range", bus_range,
164 ARRAY_SIZE(bus_range));
165 if (error)
166 return error;
167
168 res->name = node->name;
169 res->start = bus_range[0];
170 res->end = bus_range[1];
171 res->flags = IORESOURCE_BUS;
172
173 return 0;
174}
175EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
176
177/**
178 * This function will try to obtain the host bridge domain number by
179 * finding a property called "linux,pci-domain" of the given device node.
180 *
181 * @node: device tree node with the domain information
182 *
183 * Returns the associated domain number from DT in the range [0-0xffff], or
184 * a negative value if the required property is not found.
185 */
186int of_get_pci_domain_nr(struct device_node *node)
187{
188 u32 domain;
189 int error;
190
191 error = of_property_read_u32(node, "linux,pci-domain", &domain);
192 if (error)
193 return error;
194
195 return (u16)domain;
196}
197EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
198
199/**
200 * This function will try to find the limitation of link speed by finding
201 * a property called "max-link-speed" of the given device node.
202 *
203 * @node: device tree node with the max link speed information
204 *
205 * Returns the associated max link speed from DT, or a negative value if the
206 * required property is not found or is invalid.
207 */
208int of_pci_get_max_link_speed(struct device_node *node)
209{
210 u32 max_link_speed;
211
212 if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
213 max_link_speed > 4)
214 return -EINVAL;
215
216 return max_link_speed;
217}
218EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
219
220/**
221 * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
222 * is present and valid
223 */
224void of_pci_check_probe_only(void)
225{
226 u32 val;
227 int ret;
228
229 ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
230 if (ret) {
231 if (ret == -ENODATA || ret == -EOVERFLOW)
232 pr_warn("linux,pci-probe-only without valid value, ignoring\n");
233 return;
234 }
235
236 if (val)
237 pci_add_flags(PCI_PROBE_ONLY);
238 else
239 pci_clear_flags(PCI_PROBE_ONLY);
240
241 pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis");
242}
243EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
244
245#if defined(CONFIG_OF_ADDRESS)
246/**
247 * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
248 * @dev: device node of the host bridge having the range property
249 * @busno: bus number associated with the bridge root bus
250 * @bus_max: maximum number of buses for this bridge
251 * @resources: list where the range of resources will be added after DT parsing
252 * @io_base: pointer to a variable that will contain on return the physical
253 * address for the start of the I/O range. Can be NULL if the caller doesn't
254 * expect I/O ranges to be present in the device tree.
255 *
256 * It is the caller's job to free the @resources list.
257 *
258 * This function will parse the "ranges" property of a PCI host bridge device
259 * node and setup the resource mapping based on its content. It is expected
260 * that the property conforms with the Power ePAPR document.
261 *
262 * It returns zero if the range parsing has been successful or a standard error
263 * value if it failed.
264 */
265int of_pci_get_host_bridge_resources(struct device_node *dev,
266 unsigned char busno, unsigned char bus_max,
267 struct list_head *resources, resource_size_t *io_base)
268{
269 struct resource_entry *window;
270 struct resource *res;
271 struct resource *bus_range;
272 struct of_pci_range range;
273 struct of_pci_range_parser parser;
274 char range_type[4];
275 int err;
276
277 if (io_base)
278 *io_base = (resource_size_t)OF_BAD_ADDR;
279
280 bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL);
281 if (!bus_range)
282 return -ENOMEM;
283
284 pr_info("host bridge %pOF ranges:\n", dev);
285
286 err = of_pci_parse_bus_range(dev, bus_range);
287 if (err) {
288 bus_range->start = busno;
289 bus_range->end = bus_max;
290 bus_range->flags = IORESOURCE_BUS;
291 pr_info(" No bus range found for %pOF, using %pR\n",
292 dev, bus_range);
293 } else {
294 if (bus_range->end > bus_range->start + bus_max)
295 bus_range->end = bus_range->start + bus_max;
296 }
297 pci_add_resource(resources, bus_range);
298
299 /* Check for ranges property */
300 err = of_pci_range_parser_init(&parser, dev);
301 if (err)
302 goto parse_failed;
303
304 pr_debug("Parsing ranges property...\n");
305 for_each_of_pci_range(&parser, &range) {
306 /* Read next ranges element */
307 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
308 snprintf(range_type, 4, " IO");
309 else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
310 snprintf(range_type, 4, "MEM");
311 else
312 snprintf(range_type, 4, "err");
313 pr_info(" %s %#010llx..%#010llx -> %#010llx\n", range_type,
314 range.cpu_addr, range.cpu_addr + range.size - 1,
315 range.pci_addr);
316
317 /*
318 * If we failed translation or got a zero-sized region
319 * then skip this range
320 */
321 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
322 continue;
323
324 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
325 if (!res) {
326 err = -ENOMEM;
327 goto parse_failed;
328 }
329
330 err = of_pci_range_to_resource(&range, dev, res);
331 if (err) {
332 kfree(res);
333 continue;
334 }
335
336 if (resource_type(res) == IORESOURCE_IO) {
337 if (!io_base) {
338 pr_err("I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
339 dev);
340 err = -EINVAL;
341 goto conversion_failed;
342 }
343 if (*io_base != (resource_size_t)OF_BAD_ADDR)
344 pr_warn("More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
345 dev);
346 *io_base = range.cpu_addr;
347 }
348
349 pci_add_resource_offset(resources, res, res->start - range.pci_addr);
350 }
351
352 return 0;
353
354conversion_failed:
355 kfree(res);
356parse_failed:
357 resource_list_for_each_entry(window, resources)
358 kfree(window->res);
359 pci_free_resource_list(resources);
360 return err;
361}
362EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
363#endif /* CONFIG_OF_ADDRESS */
364
365/**
366 * of_pci_map_rid - Translate a requester ID through a downstream mapping.
367 * @np: root complex device node.
368 * @rid: PCI requester ID to map.
369 * @map_name: property name of the map to use.
370 * @map_mask_name: optional property name of the mask to use.
371 * @target: optional pointer to a target device node.
372 * @id_out: optional pointer to receive the translated ID.
373 *
374 * Given a PCI requester ID, look up the appropriate implementation-defined
375 * platform ID and/or the target device which receives transactions on that
376 * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
377 * @id_out may be NULL if only the other is required. If @target points to
378 * a non-NULL device node pointer, only entries targeting that node will be
379 * matched; if it points to a NULL value, it will receive the device node of
380 * the first matching target phandle, with a reference held.
381 *
382 * Return: 0 on success or a standard error code on failure.
383 */
384int of_pci_map_rid(struct device_node *np, u32 rid,
385 const char *map_name, const char *map_mask_name,
386 struct device_node **target, u32 *id_out)
387{
388 u32 map_mask, masked_rid;
389 int map_len;
390 const __be32 *map = NULL;
391
392 if (!np || !map_name || (!target && !id_out))
393 return -EINVAL;
394
395 map = of_get_property(np, map_name, &map_len);
396 if (!map) {
397 if (target)
398 return -ENODEV;
399 /* Otherwise, no map implies no translation */
400 *id_out = rid;
401 return 0;
402 }
403
404 if (!map_len || map_len % (4 * sizeof(*map))) {
405 pr_err("%pOF: Error: Bad %s length: %d\n", np,
406 map_name, map_len);
407 return -EINVAL;
408 }
409
410 /* The default is to select all bits. */
411 map_mask = 0xffffffff;
412
413 /*
414 * Can be overridden by "{iommu,msi}-map-mask" property.
415 * If of_property_read_u32() fails, the default is used.
416 */
417 if (map_mask_name)
418 of_property_read_u32(np, map_mask_name, &map_mask);
419
420 masked_rid = map_mask & rid;
421 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
422 struct device_node *phandle_node;
423 u32 rid_base = be32_to_cpup(map + 0);
424 u32 phandle = be32_to_cpup(map + 1);
425 u32 out_base = be32_to_cpup(map + 2);
426 u32 rid_len = be32_to_cpup(map + 3);
427
428 if (rid_base & ~map_mask) {
429 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
430 np, map_name, map_name,
431 map_mask, rid_base);
432 return -EFAULT;
433 }
434
435 if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
436 continue;
437
438 phandle_node = of_find_node_by_phandle(phandle);
439 if (!phandle_node)
440 return -ENODEV;
441
442 if (target) {
443 if (*target)
444 of_node_put(phandle_node);
445 else
446 *target = phandle_node;
447
448 if (*target != phandle_node)
449 continue;
450 }
451
452 if (id_out)
453 *id_out = masked_rid - rid_base + out_base;
454
455 pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
456 np, map_name, map_mask, rid_base, out_base,
457 rid_len, rid, masked_rid - rid_base + out_base);
458 return 0;
459 }
460
461 pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
462 np, map_name, rid, target && *target ? *target : NULL);
463 return -EFAULT;
464}
465
466#if IS_ENABLED(CONFIG_OF_IRQ)
467/**
468 * of_irq_parse_pci - Resolve the interrupt for a PCI device
469 * @pdev: the device whose interrupt is to be resolved
470 * @out_irq: structure of_irq filled by this function
471 *
472 * This function resolves the PCI interrupt for a given PCI device. If a
473 * device-node exists for a given pci_dev, it will use normal OF tree
474 * walking. If not, it will implement standard swizzling and walk up the
475 * PCI tree until an device-node is found, at which point it will finish
476 * resolving using the OF tree walking.
477 */
7e297843 478static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
4670d610
RH
479{
480 struct device_node *dn, *ppnode;
481 struct pci_dev *ppdev;
482 __be32 laddr[3];
483 u8 pin;
484 int rc;
485
486 /*
487 * Check if we have a device node, if yes, fallback to standard
488 * device tree parsing
489 */
490 dn = pci_device_to_OF_node(pdev);
491 if (dn) {
492 rc = of_irq_parse_one(dn, 0, out_irq);
493 if (!rc)
494 return rc;
495 }
496
497 /*
498 * Ok, we don't, time to have fun. Let's start by building up an
499 * interrupt spec. we assume #interrupt-cells is 1, which is standard
500 * for PCI. If you do different, then don't use that routine.
501 */
502 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
503 if (rc != 0)
504 goto err;
505 /* No pin, exit with no error message. */
506 if (pin == 0)
507 return -ENODEV;
508
509 /* Now we walk up the PCI tree */
510 for (;;) {
511 /* Get the pci_dev of our parent */
512 ppdev = pdev->bus->self;
513
514 /* Ouch, it's a host bridge... */
515 if (ppdev == NULL) {
516 ppnode = pci_bus_to_OF_node(pdev->bus);
517
518 /* No node for host bridge ? give up */
519 if (ppnode == NULL) {
520 rc = -EINVAL;
521 goto err;
522 }
523 } else {
524 /* We found a P2P bridge, check if it has a node */
525 ppnode = pci_device_to_OF_node(ppdev);
526 }
527
528 /*
529 * Ok, we have found a parent with a device-node, hand over to
530 * the OF parsing code.
531 * We build a unit address from the linux device to be used for
532 * resolution. Note that we use the linux bus number which may
533 * not match your firmware bus numbering.
534 * Fortunately, in most cases, interrupt-map-mask doesn't
535 * include the bus number as part of the matching.
536 * You should still be careful about that though if you intend
537 * to rely on this function (you ship a firmware that doesn't
538 * create device nodes for all PCI devices).
539 */
540 if (ppnode)
541 break;
542
543 /*
544 * We can only get here if we hit a P2P bridge with no node;
545 * let's do standard swizzling and try again
546 */
547 pin = pci_swizzle_interrupt_pin(pdev, pin);
548 pdev = ppdev;
549 }
550
551 out_irq->np = ppnode;
552 out_irq->args_count = 1;
553 out_irq->args[0] = pin;
554 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
555 laddr[1] = laddr[2] = cpu_to_be32(0);
556 rc = of_irq_parse_raw(laddr, out_irq);
557 if (rc)
558 goto err;
559 return 0;
560err:
561 if (rc == -ENOENT) {
562 dev_warn(&pdev->dev,
563 "%s: no interrupt-map found, INTx interrupts not available\n",
564 __func__);
565 pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n",
566 __func__);
567 } else {
568 dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc);
569 }
570 return rc;
571}
4670d610
RH
572
573/**
574 * of_irq_parse_and_map_pci() - Decode a PCI IRQ from the device tree and map to a VIRQ
575 * @dev: The PCI device needing an IRQ
576 * @slot: PCI slot number; passed when used as map_irq callback. Unused
577 * @pin: PCI IRQ pin number; passed when used as map_irq callback. Unused
578 *
579 * @slot and @pin are unused, but included in the function so that this
580 * function can be used directly as the map_irq callback to
581 * pci_assign_irq() and struct pci_host_bridge.map_irq pointer
582 */
583int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
584{
585 struct of_phandle_args oirq;
586 int ret;
587
588 ret = of_irq_parse_pci(dev, &oirq);
589 if (ret)
590 return 0; /* Proper return code 0 == NO_IRQ */
591
592 return irq_create_of_mapping(&oirq);
593}
594EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
595#endif /* CONFIG_OF_IRQ */
c7f75aec 596
3a8f77e4
CP
597int pci_parse_request_of_pci_ranges(struct device *dev,
598 struct list_head *resources,
599 struct resource **bus_range)
600{
601 int err, res_valid = 0;
602 struct device_node *np = dev->of_node;
603 resource_size_t iobase;
604 struct resource_entry *win, *tmp;
605
606 INIT_LIST_HEAD(resources);
607 err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
608 if (err)
609 return err;
610
611 err = devm_request_pci_bus_resources(dev, resources);
612 if (err)
613 goto out_release_res;
614
615 resource_list_for_each_entry_safe(win, tmp, resources) {
616 struct resource *res = win->res;
617
618 switch (resource_type(res)) {
619 case IORESOURCE_IO:
620 err = pci_remap_iospace(res, iobase);
621 if (err) {
622 dev_warn(dev, "error %d: failed to map resource %pR\n",
623 err, res);
624 resource_list_destroy_entry(win);
625 }
626 break;
627 case IORESOURCE_MEM:
628 res_valid |= !(res->flags & IORESOURCE_PREFETCH);
629 break;
630 case IORESOURCE_BUS:
631 if (bus_range)
632 *bus_range = res;
633 break;
634 }
635 }
636
637 if (res_valid)
638 return 0;
639
640 dev_err(dev, "non-prefetchable memory resource required\n");
641 err = -EINVAL;
642
643 out_release_res:
644 pci_free_resource_list(resources);
645 return err;
646}
c7f75aec 647