PCI: dra7xx: Enable MSI and legacy interrupts simultaneously
[linux-2.6-block.git] / drivers / pci / dwc / pci-dra7xx.c
CommitLineData
47ff3de9
KVA
1/*
2 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
3 *
4 * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
47ff3de9
KVA
13#include <linux/err.h>
14#include <linux/interrupt.h>
15#include <linux/irq.h>
16#include <linux/irqdomain.h>
17#include <linux/kernel.h>
d29438d6 18#include <linux/init.h>
78bdcad0 19#include <linux/of_gpio.h>
ab5fe4f4 20#include <linux/of_pci.h>
47ff3de9
KVA
21#include <linux/pci.h>
22#include <linux/phy/phy.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/resource.h>
26#include <linux/types.h>
27
28#include "pcie-designware.h"
29
30/* PCIe controller wrapper DRA7XX configuration registers */
31
32#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
33#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
34#define ERR_SYS BIT(0)
35#define ERR_FATAL BIT(1)
36#define ERR_NONFATAL BIT(2)
37#define ERR_COR BIT(3)
38#define ERR_AXI BIT(4)
39#define ERR_ECRC BIT(5)
40#define PME_TURN_OFF BIT(8)
41#define PME_TO_ACK BIT(9)
42#define PM_PME BIT(10)
43#define LINK_REQ_RST BIT(11)
44#define LINK_UP_EVT BIT(12)
45#define CFG_BME_EVT BIT(13)
46#define CFG_MSE_EVT BIT(14)
47#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
48 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
49 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
50
51#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
52#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
53#define INTA BIT(0)
54#define INTB BIT(1)
55#define INTC BIT(2)
56#define INTD BIT(3)
57#define MSI BIT(4)
58#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
59
60#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
61#define LTSSM_EN 0x1
62
63#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
64#define LINK_UP BIT(16)
883cc17c 65#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
47ff3de9 66
ab5fe4f4
KVA
67#define EXP_CAP_ID_OFFSET 0x70
68
47ff3de9 69struct dra7xx_pcie {
47ff3de9 70 struct pcie_port pp;
8e5ec414
BH
71 void __iomem *base; /* DT ti_conf */
72 int phy_count; /* DT phy-names count */
73 struct phy **phy;
ab5fe4f4 74 int link_gen;
ebe85a44 75 struct irq_domain *irq_domain;
47ff3de9
KVA
76};
77
78#define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp)
79
80static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
81{
82 return readl(pcie->base + offset);
83}
84
85static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
86 u32 value)
87{
88 writel(value, pcie->base + offset);
89}
90
91static int dra7xx_pcie_link_up(struct pcie_port *pp)
92{
93 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
94 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
95
96 return !!(reg & LINK_UP);
97}
98
21baa1c4 99static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
47ff3de9 100{
21baa1c4 101 struct pcie_port *pp = &dra7xx->pp;
c7f8146b 102 struct device *dev = pp->dev;
6cbb247e 103 u32 reg;
ab5fe4f4 104 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
47ff3de9
KVA
105
106 if (dw_pcie_link_up(pp)) {
c7f8146b 107 dev_err(dev, "link is already up\n");
47ff3de9
KVA
108 return 0;
109 }
110
ab5fe4f4
KVA
111 if (dra7xx->link_gen == 1) {
112 dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
113 4, &reg);
114 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
115 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
116 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
117 dw_pcie_cfg_write(pp->dbi_base + exp_cap_off +
118 PCI_EXP_LNKCAP, 4, reg);
119 }
120
121 dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
122 2, &reg);
123 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
124 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
125 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
126 dw_pcie_cfg_write(pp->dbi_base + exp_cap_off +
127 PCI_EXP_LNKCTL2, 2, reg);
128 }
129 }
130
47ff3de9
KVA
131 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
132 reg |= LTSSM_EN;
133 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
134
886bc5ce 135 return dw_pcie_wait_for_link(pp);
47ff3de9
KVA
136}
137
21baa1c4 138static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
47ff3de9 139{
47ff3de9
KVA
140 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
141 ~INTERRUPTS);
142 dra7xx_pcie_writel(dra7xx,
143 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
144 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
145 ~LEG_EP_INTERRUPTS & ~MSI);
ebe85a44
KVA
146 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
147 MSI | LEG_EP_INTERRUPTS);
47ff3de9
KVA
148}
149
150static void dra7xx_pcie_host_init(struct pcie_port *pp)
151{
21baa1c4
BH
152 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
153
9cdce1cd
ZW
154 pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
155 pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
156 pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
157 pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
883cc17c 158
7e57fd14
JZ
159 dw_pcie_setup_rc(pp);
160
21baa1c4 161 dra7xx_pcie_establish_link(dra7xx);
ebe85a44 162 dw_pcie_msi_init(pp);
21baa1c4 163 dra7xx_pcie_enable_interrupts(dra7xx);
47ff3de9
KVA
164}
165
166static struct pcie_host_ops dra7xx_pcie_host_ops = {
167 .link_up = dra7xx_pcie_link_up,
168 .host_init = dra7xx_pcie_host_init,
169};
170
171static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
172 irq_hw_number_t hwirq)
173{
174 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
175 irq_set_chip_data(irq, domain->host_data);
47ff3de9
KVA
176
177 return 0;
178}
179
180static const struct irq_domain_ops intx_domain_ops = {
181 .map = dra7xx_pcie_intx_map,
182};
183
184static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
185{
186 struct device *dev = pp->dev;
ebe85a44 187 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
47ff3de9
KVA
188 struct device_node *node = dev->of_node;
189 struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
190
191 if (!pcie_intc_node) {
192 dev_err(dev, "No PCIe Intc node found\n");
991bfef8 193 return -ENODEV;
47ff3de9
KVA
194 }
195
ebe85a44
KVA
196 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
197 &intx_domain_ops, pp);
198 if (!dra7xx->irq_domain) {
47ff3de9 199 dev_err(dev, "Failed to get a INTx IRQ domain\n");
991bfef8 200 return -ENODEV;
47ff3de9
KVA
201 }
202
203 return 0;
204}
205
206static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
207{
21baa1c4
BH
208 struct dra7xx_pcie *dra7xx = arg;
209 struct pcie_port *pp = &dra7xx->pp;
47ff3de9
KVA
210 u32 reg;
211
212 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
213
214 switch (reg) {
215 case MSI:
216 dw_handle_msi_irq(pp);
217 break;
218 case INTA:
219 case INTB:
220 case INTC:
221 case INTD:
ebe85a44
KVA
222 generic_handle_irq(irq_find_mapping(dra7xx->irq_domain,
223 ffs(reg)));
47ff3de9
KVA
224 break;
225 }
226
227 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
228
229 return IRQ_HANDLED;
230}
231
232
233static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
234{
235 struct dra7xx_pcie *dra7xx = arg;
c7f8146b 236 struct device *dev = dra7xx->pp.dev;
47ff3de9
KVA
237 u32 reg;
238
239 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
240
241 if (reg & ERR_SYS)
c7f8146b 242 dev_dbg(dev, "System Error\n");
47ff3de9
KVA
243
244 if (reg & ERR_FATAL)
c7f8146b 245 dev_dbg(dev, "Fatal Error\n");
47ff3de9
KVA
246
247 if (reg & ERR_NONFATAL)
c7f8146b 248 dev_dbg(dev, "Non Fatal Error\n");
47ff3de9
KVA
249
250 if (reg & ERR_COR)
c7f8146b 251 dev_dbg(dev, "Correctable Error\n");
47ff3de9
KVA
252
253 if (reg & ERR_AXI)
c7f8146b 254 dev_dbg(dev, "AXI tag lookup fatal Error\n");
47ff3de9
KVA
255
256 if (reg & ERR_ECRC)
c7f8146b 257 dev_dbg(dev, "ECRC Error\n");
47ff3de9
KVA
258
259 if (reg & PME_TURN_OFF)
c7f8146b 260 dev_dbg(dev,
47ff3de9
KVA
261 "Power Management Event Turn-Off message received\n");
262
263 if (reg & PME_TO_ACK)
c7f8146b 264 dev_dbg(dev,
47ff3de9
KVA
265 "Power Management Turn-Off Ack message received\n");
266
267 if (reg & PM_PME)
c7f8146b 268 dev_dbg(dev, "PM Power Management Event message received\n");
47ff3de9
KVA
269
270 if (reg & LINK_REQ_RST)
c7f8146b 271 dev_dbg(dev, "Link Request Reset\n");
47ff3de9
KVA
272
273 if (reg & LINK_UP_EVT)
c7f8146b 274 dev_dbg(dev, "Link-up state change\n");
47ff3de9
KVA
275
276 if (reg & CFG_BME_EVT)
c7f8146b 277 dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
47ff3de9
KVA
278
279 if (reg & CFG_MSE_EVT)
c7f8146b 280 dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
47ff3de9
KVA
281
282 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
283
284 return IRQ_HANDLED;
285}
286
e73044a0
JH
287static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
288 struct platform_device *pdev)
47ff3de9
KVA
289{
290 int ret;
150645b9
BH
291 struct pcie_port *pp = &dra7xx->pp;
292 struct device *dev = pp->dev;
47ff3de9 293 struct resource *res;
47ff3de9
KVA
294
295 pp->irq = platform_get_irq(pdev, 1);
296 if (pp->irq < 0) {
297 dev_err(dev, "missing IRQ resource\n");
298 return -EINVAL;
299 }
300
c7f8146b 301 ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
8ff0ef99 302 IRQF_SHARED | IRQF_NO_THREAD,
21baa1c4 303 "dra7-pcie-msi", dra7xx);
47ff3de9 304 if (ret) {
c7f8146b 305 dev_err(dev, "failed to request irq\n");
47ff3de9
KVA
306 return ret;
307 }
308
ebe85a44
KVA
309 ret = dra7xx_pcie_init_irq_domain(pp);
310 if (ret < 0)
311 return ret;
47ff3de9
KVA
312
313 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
314 pp->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
315 if (!pp->dbi_base)
316 return -ENOMEM;
317
318 ret = dw_pcie_host_init(pp);
319 if (ret) {
c7f8146b 320 dev_err(dev, "failed to initialize host\n");
47ff3de9
KVA
321 return ret;
322 }
323
324 return 0;
325}
326
327static int __init dra7xx_pcie_probe(struct platform_device *pdev)
328{
329 u32 reg;
330 int ret;
331 int irq;
332 int i;
333 int phy_count;
334 struct phy **phy;
335 void __iomem *base;
336 struct resource *res;
337 struct dra7xx_pcie *dra7xx;
150645b9 338 struct pcie_port *pp;
47ff3de9
KVA
339 struct device *dev = &pdev->dev;
340 struct device_node *np = dev->of_node;
341 char name[10];
602d38bc 342 struct gpio_desc *reset;
47ff3de9
KVA
343
344 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
345 if (!dra7xx)
346 return -ENOMEM;
347
150645b9
BH
348 pp = &dra7xx->pp;
349 pp->dev = dev;
350 pp->ops = &dra7xx_pcie_host_ops;
351
47ff3de9
KVA
352 irq = platform_get_irq(pdev, 0);
353 if (irq < 0) {
354 dev_err(dev, "missing IRQ resource\n");
355 return -EINVAL;
356 }
357
358 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
359 IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
360 if (ret) {
361 dev_err(dev, "failed to request irq\n");
362 return ret;
363 }
364
365 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
366 base = devm_ioremap_nocache(dev, res->start, resource_size(res));
367 if (!base)
368 return -ENOMEM;
369
370 phy_count = of_property_count_strings(np, "phy-names");
371 if (phy_count < 0) {
372 dev_err(dev, "unable to find the strings\n");
373 return phy_count;
374 }
375
376 phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
377 if (!phy)
378 return -ENOMEM;
379
380 for (i = 0; i < phy_count; i++) {
381 snprintf(name, sizeof(name), "pcie-phy%d", i);
382 phy[i] = devm_phy_get(dev, name);
383 if (IS_ERR(phy[i]))
384 return PTR_ERR(phy[i]);
385
386 ret = phy_init(phy[i]);
387 if (ret < 0)
388 goto err_phy;
389
390 ret = phy_power_on(phy[i]);
391 if (ret < 0) {
392 phy_exit(phy[i]);
393 goto err_phy;
394 }
395 }
396
397 dra7xx->base = base;
398 dra7xx->phy = phy;
47ff3de9
KVA
399 dra7xx->phy_count = phy_count;
400
401 pm_runtime_enable(dev);
402 ret = pm_runtime_get_sync(dev);
d3f4caa3 403 if (ret < 0) {
47ff3de9 404 dev_err(dev, "pm_runtime_get_sync failed\n");
0e2bdb0e 405 goto err_get_sync;
47ff3de9
KVA
406 }
407
602d38bc
KVA
408 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
409 if (IS_ERR(reset)) {
410 ret = PTR_ERR(reset);
411 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
78bdcad0 412 goto err_gpio;
47ff3de9
KVA
413 }
414
415 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
416 reg &= ~LTSSM_EN;
417 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
418
ab5fe4f4
KVA
419 dra7xx->link_gen = of_pci_get_max_link_speed(np);
420 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
421 dra7xx->link_gen = 2;
422
23926c8d 423 ret = dra7xx_add_pcie_port(dra7xx, pdev);
47ff3de9 424 if (ret < 0)
78bdcad0 425 goto err_gpio;
47ff3de9 426
03fa2ae1 427 platform_set_drvdata(pdev, dra7xx);
47ff3de9
KVA
428 return 0;
429
78bdcad0 430err_gpio:
47ff3de9 431 pm_runtime_put(dev);
0e2bdb0e
KVA
432
433err_get_sync:
47ff3de9
KVA
434 pm_runtime_disable(dev);
435
436err_phy:
437 while (--i >= 0) {
438 phy_power_off(phy[i]);
439 phy_exit(phy[i]);
440 }
441
442 return ret;
443}
444
e52eb445 445#ifdef CONFIG_PM_SLEEP
389c7094
KVA
446static int dra7xx_pcie_suspend(struct device *dev)
447{
448 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
449 struct pcie_port *pp = &dra7xx->pp;
450 u32 val;
451
452 /* clear MSE */
feeb7201 453 val = dw_pcie_readl_rc(pp, PCI_COMMAND);
389c7094 454 val &= ~PCI_COMMAND_MEMORY;
feeb7201 455 dw_pcie_writel_rc(pp, PCI_COMMAND, val);
389c7094
KVA
456
457 return 0;
458}
459
460static int dra7xx_pcie_resume(struct device *dev)
461{
462 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
463 struct pcie_port *pp = &dra7xx->pp;
464 u32 val;
465
466 /* set MSE */
feeb7201 467 val = dw_pcie_readl_rc(pp, PCI_COMMAND);
389c7094 468 val |= PCI_COMMAND_MEMORY;
feeb7201 469 dw_pcie_writel_rc(pp, PCI_COMMAND, val);
389c7094
KVA
470
471 return 0;
472}
473
e52eb445
KVA
474static int dra7xx_pcie_suspend_noirq(struct device *dev)
475{
476 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
477 int count = dra7xx->phy_count;
478
479 while (count--) {
480 phy_power_off(dra7xx->phy[count]);
481 phy_exit(dra7xx->phy[count]);
482 }
483
484 return 0;
485}
486
487static int dra7xx_pcie_resume_noirq(struct device *dev)
488{
489 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
490 int phy_count = dra7xx->phy_count;
491 int ret;
492 int i;
493
494 for (i = 0; i < phy_count; i++) {
495 ret = phy_init(dra7xx->phy[i]);
496 if (ret < 0)
497 goto err_phy;
498
499 ret = phy_power_on(dra7xx->phy[i]);
500 if (ret < 0) {
501 phy_exit(dra7xx->phy[i]);
502 goto err_phy;
503 }
504 }
505
506 return 0;
507
508err_phy:
509 while (--i >= 0) {
510 phy_power_off(dra7xx->phy[i]);
511 phy_exit(dra7xx->phy[i]);
512 }
513
514 return ret;
515}
516#endif
517
518static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
389c7094 519 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
e52eb445
KVA
520 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
521 dra7xx_pcie_resume_noirq)
522};
523
47ff3de9
KVA
524static const struct of_device_id of_dra7xx_pcie_match[] = {
525 { .compatible = "ti,dra7-pcie", },
526 {},
527};
47ff3de9
KVA
528
529static struct platform_driver dra7xx_pcie_driver = {
47ff3de9
KVA
530 .driver = {
531 .name = "dra7-pcie",
47ff3de9 532 .of_match_table = of_dra7xx_pcie_match,
d29438d6 533 .suppress_bind_attrs = true,
e52eb445 534 .pm = &dra7xx_pcie_pm_ops,
47ff3de9
KVA
535 },
536};
d29438d6 537builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);