PCI: dwc: Fix dw_pcie_ops NULL pointer dereference
[linux-block.git] / drivers / pci / dwc / pcie-designware-host.c
CommitLineData
feb85d9b
KVA
1/*
2 * Synopsys Designware PCIe host controller driver
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/irqdomain.h>
15#include <linux/of_address.h>
16#include <linux/of_pci.h>
17#include <linux/pci_regs.h>
18#include <linux/platform_device.h>
19
20#include "pcie-designware.h"
21
22static struct pci_ops dw_pcie_ops;
23
24static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
25 u32 *val)
26{
27 struct dw_pcie *pci;
28
29 if (pp->ops->rd_own_conf)
30 return pp->ops->rd_own_conf(pp, where, size, val);
31
32 pci = to_dw_pcie_from_pp(pp);
33 return dw_pcie_read(pci->dbi_base + where, size, val);
34}
35
36static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
37 u32 val)
38{
39 struct dw_pcie *pci;
40
41 if (pp->ops->wr_own_conf)
42 return pp->ops->wr_own_conf(pp, where, size, val);
43
44 pci = to_dw_pcie_from_pp(pp);
45 return dw_pcie_write(pci->dbi_base + where, size, val);
46}
47
48static struct irq_chip dw_msi_irq_chip = {
49 .name = "PCI-MSI",
50 .irq_enable = pci_msi_unmask_irq,
51 .irq_disable = pci_msi_mask_irq,
52 .irq_mask = pci_msi_mask_irq,
53 .irq_unmask = pci_msi_unmask_irq,
54};
55
56/* MSI int handler */
57irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
58{
59 unsigned long val;
60 int i, pos, irq;
61 irqreturn_t ret = IRQ_NONE;
62
63 for (i = 0; i < MAX_MSI_CTRLS; i++) {
64 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
65 (u32 *)&val);
66 if (val) {
67 ret = IRQ_HANDLED;
68 pos = 0;
69 while ((pos = find_next_bit(&val, 32, pos)) != 32) {
70 irq = irq_find_mapping(pp->irq_domain,
71 i * 32 + pos);
72 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
73 i * 12, 4, 1 << pos);
74 generic_handle_irq(irq);
75 pos++;
76 }
77 }
78 }
79
80 return ret;
81}
82
83void dw_pcie_msi_init(struct pcie_port *pp)
84{
85 u64 msi_target;
86
87 pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
88 msi_target = virt_to_phys((void *)pp->msi_data);
89
90 /* program the msi_data */
91 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
92 (u32)(msi_target & 0xffffffff));
93 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
94 (u32)(msi_target >> 32 & 0xffffffff));
95}
96
97static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
98{
99 unsigned int res, bit, val;
100
101 res = (irq / 32) * 12;
102 bit = irq % 32;
103 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
104 val &= ~(1 << bit);
105 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
106}
107
108static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
109 unsigned int nvec, unsigned int pos)
110{
111 unsigned int i;
112
113 for (i = 0; i < nvec; i++) {
114 irq_set_msi_desc_off(irq_base, i, NULL);
115 /* Disable corresponding interrupt on MSI controller */
116 if (pp->ops->msi_clear_irq)
117 pp->ops->msi_clear_irq(pp, pos + i);
118 else
119 dw_pcie_msi_clear_irq(pp, pos + i);
120 }
121
122 bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
123}
124
125static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
126{
127 unsigned int res, bit, val;
128
129 res = (irq / 32) * 12;
130 bit = irq % 32;
131 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
132 val |= 1 << bit;
133 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
134}
135
136static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
137{
138 int irq, pos0, i;
139 struct pcie_port *pp;
140
141 pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc);
142 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
143 order_base_2(no_irqs));
144 if (pos0 < 0)
145 goto no_valid_irq;
146
147 irq = irq_find_mapping(pp->irq_domain, pos0);
148 if (!irq)
149 goto no_valid_irq;
150
151 /*
152 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
153 * descs so there is no need to allocate descs here. We can therefore
154 * assume that if irq_find_mapping above returns non-zero, then the
155 * descs are also successfully allocated.
156 */
157
158 for (i = 0; i < no_irqs; i++) {
159 if (irq_set_msi_desc_off(irq, i, desc) != 0) {
160 clear_irq_range(pp, irq, i, pos0);
161 goto no_valid_irq;
162 }
163 /*Enable corresponding interrupt in MSI interrupt controller */
164 if (pp->ops->msi_set_irq)
165 pp->ops->msi_set_irq(pp, pos0 + i);
166 else
167 dw_pcie_msi_set_irq(pp, pos0 + i);
168 }
169
170 *pos = pos0;
171 desc->nvec_used = no_irqs;
172 desc->msi_attrib.multiple = order_base_2(no_irqs);
173
174 return irq;
175
176no_valid_irq:
177 *pos = pos0;
178 return -ENOSPC;
179}
180
181static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
182{
183 struct msi_msg msg;
184 u64 msi_target;
185
186 if (pp->ops->get_msi_addr)
187 msi_target = pp->ops->get_msi_addr(pp);
188 else
189 msi_target = virt_to_phys((void *)pp->msi_data);
190
191 msg.address_lo = (u32)(msi_target & 0xffffffff);
192 msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
193
194 if (pp->ops->get_msi_data)
195 msg.data = pp->ops->get_msi_data(pp, pos);
196 else
197 msg.data = pos;
198
199 pci_write_msi_msg(irq, &msg);
200}
201
202static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
203 struct msi_desc *desc)
204{
205 int irq, pos;
206 struct pcie_port *pp = pdev->bus->sysdata;
207
208 if (desc->msi_attrib.is_msix)
209 return -EINVAL;
210
211 irq = assign_irq(1, desc, &pos);
212 if (irq < 0)
213 return irq;
214
215 dw_msi_setup_msg(pp, irq, pos);
216
217 return 0;
218}
219
220static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
221 int nvec, int type)
222{
223#ifdef CONFIG_PCI_MSI
224 int irq, pos;
225 struct msi_desc *desc;
226 struct pcie_port *pp = pdev->bus->sysdata;
227
228 /* MSI-X interrupts are not supported */
229 if (type == PCI_CAP_ID_MSIX)
230 return -EINVAL;
231
232 WARN_ON(!list_is_singular(&pdev->dev.msi_list));
233 desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
234
235 irq = assign_irq(nvec, desc, &pos);
236 if (irq < 0)
237 return irq;
238
239 dw_msi_setup_msg(pp, irq, pos);
240
241 return 0;
242#else
243 return -EINVAL;
244#endif
245}
246
247static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
248{
249 struct irq_data *data = irq_get_irq_data(irq);
250 struct msi_desc *msi = irq_data_get_msi_desc(data);
251 struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi);
252
253 clear_irq_range(pp, irq, 1, data->hwirq);
254}
255
256static struct msi_controller dw_pcie_msi_chip = {
257 .setup_irq = dw_msi_setup_irq,
258 .setup_irqs = dw_msi_setup_irqs,
259 .teardown_irq = dw_msi_teardown_irq,
260};
261
262static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
263 irq_hw_number_t hwirq)
264{
265 irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
266 irq_set_chip_data(irq, domain->host_data);
267
268 return 0;
269}
270
271static const struct irq_domain_ops msi_domain_ops = {
272 .map = dw_pcie_msi_map,
273};
274
275int dw_pcie_host_init(struct pcie_port *pp)
276{
277 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
278 struct device *dev = pci->dev;
279 struct device_node *np = dev->of_node;
280 struct platform_device *pdev = to_platform_device(dev);
281 struct pci_bus *bus, *child;
282 struct resource *cfg_res;
283 int i, ret;
284 LIST_HEAD(res);
285 struct resource_entry *win, *tmp;
286
287 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
288 if (cfg_res) {
289 pp->cfg0_size = resource_size(cfg_res) / 2;
290 pp->cfg1_size = resource_size(cfg_res) / 2;
291 pp->cfg0_base = cfg_res->start;
292 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
293 } else if (!pp->va_cfg0_base) {
294 dev_err(dev, "missing *config* reg space\n");
295 }
296
297 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
298 if (ret)
299 return ret;
300
301 ret = devm_request_pci_bus_resources(dev, &res);
302 if (ret)
303 goto error;
304
305 /* Get the I/O and memory ranges from DT */
306 resource_list_for_each_entry_safe(win, tmp, &res) {
307 switch (resource_type(win->res)) {
308 case IORESOURCE_IO:
309 ret = pci_remap_iospace(win->res, pp->io_base);
310 if (ret) {
311 dev_warn(dev, "error %d: failed to map resource %pR\n",
312 ret, win->res);
313 resource_list_destroy_entry(win);
314 } else {
315 pp->io = win->res;
316 pp->io->name = "I/O";
317 pp->io_size = resource_size(pp->io);
318 pp->io_bus_addr = pp->io->start - win->offset;
319 }
320 break;
321 case IORESOURCE_MEM:
322 pp->mem = win->res;
323 pp->mem->name = "MEM";
324 pp->mem_size = resource_size(pp->mem);
325 pp->mem_bus_addr = pp->mem->start - win->offset;
326 break;
327 case 0:
328 pp->cfg = win->res;
329 pp->cfg0_size = resource_size(pp->cfg) / 2;
330 pp->cfg1_size = resource_size(pp->cfg) / 2;
331 pp->cfg0_base = pp->cfg->start;
332 pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
333 break;
334 case IORESOURCE_BUS:
335 pp->busn = win->res;
336 break;
337 }
338 }
339
340 if (!pci->dbi_base) {
341 pci->dbi_base = devm_ioremap(dev, pp->cfg->start,
342 resource_size(pp->cfg));
343 if (!pci->dbi_base) {
344 dev_err(dev, "error with ioremap\n");
345 ret = -ENOMEM;
346 goto error;
347 }
348 }
349
350 pp->mem_base = pp->mem->start;
351
352 if (!pp->va_cfg0_base) {
353 pp->va_cfg0_base = devm_ioremap(dev, pp->cfg0_base,
354 pp->cfg0_size);
355 if (!pp->va_cfg0_base) {
356 dev_err(dev, "error with ioremap in function\n");
357 ret = -ENOMEM;
358 goto error;
359 }
360 }
361
362 if (!pp->va_cfg1_base) {
363 pp->va_cfg1_base = devm_ioremap(dev, pp->cfg1_base,
364 pp->cfg1_size);
365 if (!pp->va_cfg1_base) {
366 dev_err(dev, "error with ioremap\n");
367 ret = -ENOMEM;
368 goto error;
369 }
370 }
371
372 ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
373 if (ret)
374 pci->num_viewport = 2;
375
376 if (IS_ENABLED(CONFIG_PCI_MSI)) {
377 if (!pp->ops->msi_host_init) {
378 pp->irq_domain = irq_domain_add_linear(dev->of_node,
379 MAX_MSI_IRQS, &msi_domain_ops,
380 &dw_pcie_msi_chip);
381 if (!pp->irq_domain) {
382 dev_err(dev, "irq domain init failed\n");
383 ret = -ENXIO;
384 goto error;
385 }
386
387 for (i = 0; i < MAX_MSI_IRQS; i++)
388 irq_create_mapping(pp->irq_domain, i);
389 } else {
390 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
391 if (ret < 0)
392 goto error;
393 }
394 }
395
396 if (pp->ops->host_init)
397 pp->ops->host_init(pp);
398
399 pp->root_bus_nr = pp->busn->start;
400 if (IS_ENABLED(CONFIG_PCI_MSI)) {
401 bus = pci_scan_root_bus_msi(dev, pp->root_bus_nr,
402 &dw_pcie_ops, pp, &res,
403 &dw_pcie_msi_chip);
404 dw_pcie_msi_chip.dev = dev;
405 } else
406 bus = pci_scan_root_bus(dev, pp->root_bus_nr, &dw_pcie_ops,
407 pp, &res);
408 if (!bus) {
409 ret = -ENOMEM;
410 goto error;
411 }
412
413 if (pp->ops->scan_bus)
414 pp->ops->scan_bus(pp);
415
416#ifdef CONFIG_ARM
417 /* support old dtbs that incorrectly describe IRQs */
418 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
419#endif
420
421 pci_bus_size_bridges(bus);
422 pci_bus_assign_resources(bus);
423
424 list_for_each_entry(child, &bus->children, node)
425 pcie_bus_configure_settings(child);
426
427 pci_bus_add_devices(bus);
428 return 0;
429
430error:
431 pci_free_resource_list(&res);
432 return ret;
433}
434
435static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
436 u32 devfn, int where, int size, u32 *val)
437{
438 int ret, type;
439 u32 busdev, cfg_size;
440 u64 cpu_addr;
441 void __iomem *va_cfg_base;
442 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
443
444 if (pp->ops->rd_other_conf)
445 return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
446
447 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
448 PCIE_ATU_FUNC(PCI_FUNC(devfn));
449
450 if (bus->parent->number == pp->root_bus_nr) {
451 type = PCIE_ATU_TYPE_CFG0;
452 cpu_addr = pp->cfg0_base;
453 cfg_size = pp->cfg0_size;
454 va_cfg_base = pp->va_cfg0_base;
455 } else {
456 type = PCIE_ATU_TYPE_CFG1;
457 cpu_addr = pp->cfg1_base;
458 cfg_size = pp->cfg1_size;
459 va_cfg_base = pp->va_cfg1_base;
460 }
461
462 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
463 type, cpu_addr,
464 busdev, cfg_size);
465 ret = dw_pcie_read(va_cfg_base + where, size, val);
466 if (pci->num_viewport <= 2)
467 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
468 PCIE_ATU_TYPE_IO, pp->io_base,
469 pp->io_bus_addr, pp->io_size);
470
471 return ret;
472}
473
474static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
475 u32 devfn, int where, int size, u32 val)
476{
477 int ret, type;
478 u32 busdev, cfg_size;
479 u64 cpu_addr;
480 void __iomem *va_cfg_base;
481 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
482
483 if (pp->ops->wr_other_conf)
484 return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
485
486 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
487 PCIE_ATU_FUNC(PCI_FUNC(devfn));
488
489 if (bus->parent->number == pp->root_bus_nr) {
490 type = PCIE_ATU_TYPE_CFG0;
491 cpu_addr = pp->cfg0_base;
492 cfg_size = pp->cfg0_size;
493 va_cfg_base = pp->va_cfg0_base;
494 } else {
495 type = PCIE_ATU_TYPE_CFG1;
496 cpu_addr = pp->cfg1_base;
497 cfg_size = pp->cfg1_size;
498 va_cfg_base = pp->va_cfg1_base;
499 }
500
501 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
502 type, cpu_addr,
503 busdev, cfg_size);
504 ret = dw_pcie_write(va_cfg_base + where, size, val);
505 if (pci->num_viewport <= 2)
506 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
507 PCIE_ATU_TYPE_IO, pp->io_base,
508 pp->io_bus_addr, pp->io_size);
509
510 return ret;
511}
512
513static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
514 int dev)
515{
516 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
517
518 /* If there is no link, then there is no device */
519 if (bus->number != pp->root_bus_nr) {
520 if (!dw_pcie_link_up(pci))
521 return 0;
522 }
523
524 /* access only one slot on each root port */
525 if (bus->number == pp->root_bus_nr && dev > 0)
526 return 0;
527
528 return 1;
529}
530
531static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
532 int size, u32 *val)
533{
534 struct pcie_port *pp = bus->sysdata;
535
536 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
537 *val = 0xffffffff;
538 return PCIBIOS_DEVICE_NOT_FOUND;
539 }
540
541 if (bus->number == pp->root_bus_nr)
542 return dw_pcie_rd_own_conf(pp, where, size, val);
543
544 return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
545}
546
547static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
548 int where, int size, u32 val)
549{
550 struct pcie_port *pp = bus->sysdata;
551
552 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
553 return PCIBIOS_DEVICE_NOT_FOUND;
554
555 if (bus->number == pp->root_bus_nr)
556 return dw_pcie_wr_own_conf(pp, where, size, val);
557
558 return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
559}
560
561static struct pci_ops dw_pcie_ops = {
562 .read = dw_pcie_rd_conf,
563 .write = dw_pcie_wr_conf,
564};
565
566static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
567{
568 u32 val;
569
570 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
571 if (val == 0xffffffff)
572 return 1;
573
574 return 0;
575}
576
577void dw_pcie_setup_rc(struct pcie_port *pp)
578{
579 u32 val;
580 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
581
582 dw_pcie_setup(pci);
583
584 /* setup RC BARs */
585 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
586 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
587
588 /* setup interrupt pins */
589 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
590 val &= 0xffff00ff;
591 val |= 0x00000100;
592 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
593
594 /* setup bus numbers */
595 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
596 val &= 0xff000000;
597 val |= 0x00010100;
598 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
599
600 /* setup command register */
601 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
602 val &= 0xffff0000;
603 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
604 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
605 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
606
607 /*
608 * If the platform provides ->rd_other_conf, it means the platform
609 * uses its own address translation component rather than ATU, so
610 * we should not program the ATU here.
611 */
612 if (!pp->ops->rd_other_conf) {
613 /* get iATU unroll support */
614 pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
615 dev_dbg(pci->dev, "iATU unroll: %s\n",
616 pci->iatu_unroll_enabled ? "enabled" : "disabled");
617
618 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
619 PCIE_ATU_TYPE_MEM, pp->mem_base,
620 pp->mem_bus_addr, pp->mem_size);
621 if (pci->num_viewport > 2)
622 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
623 PCIE_ATU_TYPE_IO, pp->io_base,
624 pp->io_bus_addr, pp->io_size);
625 }
626
627 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
628
629 /* program correct class for RC */
630 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
631
632 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
633 val |= PORT_LOGIC_SPEED_CHANGE;
634 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
635}