PCI: dwc: designware: Move register defines to designware header file
[linux-2.6-block.git] / drivers / pci / dwc / pci-dra7xx.c
CommitLineData
47ff3de9
KVA
1/*
2 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
3 *
4 * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
47ff3de9
KVA
13#include <linux/err.h>
14#include <linux/interrupt.h>
15#include <linux/irq.h>
16#include <linux/irqdomain.h>
17#include <linux/kernel.h>
d29438d6 18#include <linux/init.h>
78bdcad0 19#include <linux/of_gpio.h>
ab5fe4f4 20#include <linux/of_pci.h>
47ff3de9
KVA
21#include <linux/pci.h>
22#include <linux/phy/phy.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/resource.h>
26#include <linux/types.h>
27
28#include "pcie-designware.h"
29
30/* PCIe controller wrapper DRA7XX configuration registers */
31
32#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
33#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
34#define ERR_SYS BIT(0)
35#define ERR_FATAL BIT(1)
36#define ERR_NONFATAL BIT(2)
37#define ERR_COR BIT(3)
38#define ERR_AXI BIT(4)
39#define ERR_ECRC BIT(5)
40#define PME_TURN_OFF BIT(8)
41#define PME_TO_ACK BIT(9)
42#define PM_PME BIT(10)
43#define LINK_REQ_RST BIT(11)
44#define LINK_UP_EVT BIT(12)
45#define CFG_BME_EVT BIT(13)
46#define CFG_MSE_EVT BIT(14)
47#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
48 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
49 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
50
51#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
52#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
53#define INTA BIT(0)
54#define INTB BIT(1)
55#define INTC BIT(2)
56#define INTD BIT(3)
57#define MSI BIT(4)
58#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
59
60#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
61#define LTSSM_EN 0x1
62
63#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
64#define LINK_UP BIT(16)
883cc17c 65#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
47ff3de9 66
ab5fe4f4
KVA
67#define EXP_CAP_ID_OFFSET 0x70
68
47ff3de9 69struct dra7xx_pcie {
47ff3de9 70 struct pcie_port pp;
8e5ec414
BH
71 void __iomem *base; /* DT ti_conf */
72 int phy_count; /* DT phy-names count */
73 struct phy **phy;
ab5fe4f4 74 int link_gen;
ebe85a44 75 struct irq_domain *irq_domain;
47ff3de9
KVA
76};
77
78#define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp)
79
80static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
81{
82 return readl(pcie->base + offset);
83}
84
85static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
86 u32 value)
87{
88 writel(value, pcie->base + offset);
89}
90
91static int dra7xx_pcie_link_up(struct pcie_port *pp)
92{
93 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
94 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
95
96 return !!(reg & LINK_UP);
97}
98
21baa1c4 99static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
47ff3de9 100{
21baa1c4 101 struct pcie_port *pp = &dra7xx->pp;
c7f8146b 102 struct device *dev = pp->dev;
6cbb247e 103 u32 reg;
ab5fe4f4 104 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
47ff3de9
KVA
105
106 if (dw_pcie_link_up(pp)) {
c7f8146b 107 dev_err(dev, "link is already up\n");
47ff3de9
KVA
108 return 0;
109 }
110
ab5fe4f4
KVA
111 if (dra7xx->link_gen == 1) {
112 dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
113 4, &reg);
114 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
115 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
116 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
117 dw_pcie_cfg_write(pp->dbi_base + exp_cap_off +
118 PCI_EXP_LNKCAP, 4, reg);
119 }
120
121 dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
122 2, &reg);
123 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
124 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
125 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
126 dw_pcie_cfg_write(pp->dbi_base + exp_cap_off +
127 PCI_EXP_LNKCTL2, 2, reg);
128 }
129 }
130
47ff3de9
KVA
131 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
132 reg |= LTSSM_EN;
133 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
134
886bc5ce 135 return dw_pcie_wait_for_link(pp);
47ff3de9
KVA
136}
137
21baa1c4 138static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
47ff3de9 139{
47ff3de9
KVA
140 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
141 ~INTERRUPTS);
142 dra7xx_pcie_writel(dra7xx,
143 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
144 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
145 ~LEG_EP_INTERRUPTS & ~MSI);
ebe85a44
KVA
146 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
147 MSI | LEG_EP_INTERRUPTS);
47ff3de9
KVA
148}
149
150static void dra7xx_pcie_host_init(struct pcie_port *pp)
151{
21baa1c4
BH
152 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
153
9cdce1cd
ZW
154 pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
155 pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
156 pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
157 pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
883cc17c 158
7e57fd14
JZ
159 dw_pcie_setup_rc(pp);
160
21baa1c4 161 dra7xx_pcie_establish_link(dra7xx);
ebe85a44 162 dw_pcie_msi_init(pp);
21baa1c4 163 dra7xx_pcie_enable_interrupts(dra7xx);
47ff3de9
KVA
164}
165
166static struct pcie_host_ops dra7xx_pcie_host_ops = {
167 .link_up = dra7xx_pcie_link_up,
168 .host_init = dra7xx_pcie_host_init,
169};
170
171static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
172 irq_hw_number_t hwirq)
173{
174 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
175 irq_set_chip_data(irq, domain->host_data);
47ff3de9
KVA
176
177 return 0;
178}
179
180static const struct irq_domain_ops intx_domain_ops = {
181 .map = dra7xx_pcie_intx_map,
182};
183
184static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
185{
186 struct device *dev = pp->dev;
ebe85a44 187 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
47ff3de9
KVA
188 struct device_node *node = dev->of_node;
189 struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
190
191 if (!pcie_intc_node) {
192 dev_err(dev, "No PCIe Intc node found\n");
991bfef8 193 return -ENODEV;
47ff3de9
KVA
194 }
195
ebe85a44
KVA
196 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
197 &intx_domain_ops, pp);
198 if (!dra7xx->irq_domain) {
47ff3de9 199 dev_err(dev, "Failed to get a INTx IRQ domain\n");
991bfef8 200 return -ENODEV;
47ff3de9
KVA
201 }
202
203 return 0;
204}
205
206static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
207{
21baa1c4
BH
208 struct dra7xx_pcie *dra7xx = arg;
209 struct pcie_port *pp = &dra7xx->pp;
47ff3de9
KVA
210 u32 reg;
211
212 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
213
214 switch (reg) {
215 case MSI:
216 dw_handle_msi_irq(pp);
217 break;
218 case INTA:
219 case INTB:
220 case INTC:
221 case INTD:
ebe85a44
KVA
222 generic_handle_irq(irq_find_mapping(dra7xx->irq_domain,
223 ffs(reg)));
47ff3de9
KVA
224 break;
225 }
226
227 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
228
229 return IRQ_HANDLED;
230}
231
232
233static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
234{
235 struct dra7xx_pcie *dra7xx = arg;
c7f8146b 236 struct device *dev = dra7xx->pp.dev;
47ff3de9
KVA
237 u32 reg;
238
239 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
240
241 if (reg & ERR_SYS)
c7f8146b 242 dev_dbg(dev, "System Error\n");
47ff3de9
KVA
243
244 if (reg & ERR_FATAL)
c7f8146b 245 dev_dbg(dev, "Fatal Error\n");
47ff3de9
KVA
246
247 if (reg & ERR_NONFATAL)
c7f8146b 248 dev_dbg(dev, "Non Fatal Error\n");
47ff3de9
KVA
249
250 if (reg & ERR_COR)
c7f8146b 251 dev_dbg(dev, "Correctable Error\n");
47ff3de9
KVA
252
253 if (reg & ERR_AXI)
c7f8146b 254 dev_dbg(dev, "AXI tag lookup fatal Error\n");
47ff3de9
KVA
255
256 if (reg & ERR_ECRC)
c7f8146b 257 dev_dbg(dev, "ECRC Error\n");
47ff3de9
KVA
258
259 if (reg & PME_TURN_OFF)
c7f8146b 260 dev_dbg(dev,
47ff3de9
KVA
261 "Power Management Event Turn-Off message received\n");
262
263 if (reg & PME_TO_ACK)
c7f8146b 264 dev_dbg(dev,
47ff3de9
KVA
265 "Power Management Turn-Off Ack message received\n");
266
267 if (reg & PM_PME)
c7f8146b 268 dev_dbg(dev, "PM Power Management Event message received\n");
47ff3de9
KVA
269
270 if (reg & LINK_REQ_RST)
c7f8146b 271 dev_dbg(dev, "Link Request Reset\n");
47ff3de9
KVA
272
273 if (reg & LINK_UP_EVT)
c7f8146b 274 dev_dbg(dev, "Link-up state change\n");
47ff3de9
KVA
275
276 if (reg & CFG_BME_EVT)
c7f8146b 277 dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
47ff3de9
KVA
278
279 if (reg & CFG_MSE_EVT)
c7f8146b 280 dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
47ff3de9
KVA
281
282 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
283
284 return IRQ_HANDLED;
285}
286
e73044a0
JH
287static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
288 struct platform_device *pdev)
47ff3de9
KVA
289{
290 int ret;
150645b9
BH
291 struct pcie_port *pp = &dra7xx->pp;
292 struct device *dev = pp->dev;
47ff3de9 293 struct resource *res;
47ff3de9
KVA
294
295 pp->irq = platform_get_irq(pdev, 1);
296 if (pp->irq < 0) {
297 dev_err(dev, "missing IRQ resource\n");
298 return -EINVAL;
299 }
300
c7f8146b 301 ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
8ff0ef99 302 IRQF_SHARED | IRQF_NO_THREAD,
21baa1c4 303 "dra7-pcie-msi", dra7xx);
47ff3de9 304 if (ret) {
c7f8146b 305 dev_err(dev, "failed to request irq\n");
47ff3de9
KVA
306 return ret;
307 }
308
ebe85a44
KVA
309 ret = dra7xx_pcie_init_irq_domain(pp);
310 if (ret < 0)
311 return ret;
47ff3de9
KVA
312
313 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
314 pp->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
315 if (!pp->dbi_base)
316 return -ENOMEM;
317
318 ret = dw_pcie_host_init(pp);
319 if (ret) {
c7f8146b 320 dev_err(dev, "failed to initialize host\n");
47ff3de9
KVA
321 return ret;
322 }
323
324 return 0;
325}
326
1f6c4501
KVA
327static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
328{
329 int phy_count = dra7xx->phy_count;
330
331 while (phy_count--) {
332 phy_power_off(dra7xx->phy[phy_count]);
333 phy_exit(dra7xx->phy[phy_count]);
334 }
335}
336
337static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
338{
339 int phy_count = dra7xx->phy_count;
340 int ret;
341 int i;
342
343 for (i = 0; i < phy_count; i++) {
344 ret = phy_init(dra7xx->phy[i]);
345 if (ret < 0)
346 goto err_phy;
347
348 ret = phy_power_on(dra7xx->phy[i]);
349 if (ret < 0) {
350 phy_exit(dra7xx->phy[i]);
351 goto err_phy;
352 }
353 }
354
355 return 0;
356
357err_phy:
358 while (--i >= 0) {
359 phy_power_off(dra7xx->phy[i]);
360 phy_exit(dra7xx->phy[i]);
361 }
362
363 return ret;
364}
365
47ff3de9
KVA
366static int __init dra7xx_pcie_probe(struct platform_device *pdev)
367{
368 u32 reg;
369 int ret;
370 int irq;
371 int i;
372 int phy_count;
373 struct phy **phy;
374 void __iomem *base;
375 struct resource *res;
376 struct dra7xx_pcie *dra7xx;
150645b9 377 struct pcie_port *pp;
47ff3de9
KVA
378 struct device *dev = &pdev->dev;
379 struct device_node *np = dev->of_node;
380 char name[10];
602d38bc 381 struct gpio_desc *reset;
47ff3de9
KVA
382
383 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
384 if (!dra7xx)
385 return -ENOMEM;
386
150645b9
BH
387 pp = &dra7xx->pp;
388 pp->dev = dev;
389 pp->ops = &dra7xx_pcie_host_ops;
390
47ff3de9
KVA
391 irq = platform_get_irq(pdev, 0);
392 if (irq < 0) {
393 dev_err(dev, "missing IRQ resource\n");
394 return -EINVAL;
395 }
396
397 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
398 IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
399 if (ret) {
400 dev_err(dev, "failed to request irq\n");
401 return ret;
402 }
403
404 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
405 base = devm_ioremap_nocache(dev, res->start, resource_size(res));
406 if (!base)
407 return -ENOMEM;
408
409 phy_count = of_property_count_strings(np, "phy-names");
410 if (phy_count < 0) {
411 dev_err(dev, "unable to find the strings\n");
412 return phy_count;
413 }
414
415 phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
416 if (!phy)
417 return -ENOMEM;
418
419 for (i = 0; i < phy_count; i++) {
420 snprintf(name, sizeof(name), "pcie-phy%d", i);
421 phy[i] = devm_phy_get(dev, name);
422 if (IS_ERR(phy[i]))
423 return PTR_ERR(phy[i]);
47ff3de9
KVA
424 }
425
426 dra7xx->base = base;
427 dra7xx->phy = phy;
47ff3de9
KVA
428 dra7xx->phy_count = phy_count;
429
1f6c4501
KVA
430 ret = dra7xx_pcie_enable_phy(dra7xx);
431 if (ret) {
432 dev_err(dev, "failed to enable phy\n");
433 return ret;
434 }
435
47ff3de9
KVA
436 pm_runtime_enable(dev);
437 ret = pm_runtime_get_sync(dev);
d3f4caa3 438 if (ret < 0) {
47ff3de9 439 dev_err(dev, "pm_runtime_get_sync failed\n");
0e2bdb0e 440 goto err_get_sync;
47ff3de9
KVA
441 }
442
602d38bc
KVA
443 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
444 if (IS_ERR(reset)) {
445 ret = PTR_ERR(reset);
446 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
78bdcad0 447 goto err_gpio;
47ff3de9
KVA
448 }
449
450 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
451 reg &= ~LTSSM_EN;
452 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
453
ab5fe4f4
KVA
454 dra7xx->link_gen = of_pci_get_max_link_speed(np);
455 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
456 dra7xx->link_gen = 2;
457
23926c8d 458 ret = dra7xx_add_pcie_port(dra7xx, pdev);
47ff3de9 459 if (ret < 0)
78bdcad0 460 goto err_gpio;
47ff3de9 461
03fa2ae1 462 platform_set_drvdata(pdev, dra7xx);
47ff3de9
KVA
463 return 0;
464
78bdcad0 465err_gpio:
47ff3de9 466 pm_runtime_put(dev);
0e2bdb0e
KVA
467
468err_get_sync:
47ff3de9 469 pm_runtime_disable(dev);
1f6c4501 470 dra7xx_pcie_disable_phy(dra7xx);
47ff3de9
KVA
471
472 return ret;
473}
474
e52eb445 475#ifdef CONFIG_PM_SLEEP
389c7094
KVA
476static int dra7xx_pcie_suspend(struct device *dev)
477{
478 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
479 struct pcie_port *pp = &dra7xx->pp;
480 u32 val;
481
482 /* clear MSE */
feeb7201 483 val = dw_pcie_readl_rc(pp, PCI_COMMAND);
389c7094 484 val &= ~PCI_COMMAND_MEMORY;
feeb7201 485 dw_pcie_writel_rc(pp, PCI_COMMAND, val);
389c7094
KVA
486
487 return 0;
488}
489
490static int dra7xx_pcie_resume(struct device *dev)
491{
492 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
493 struct pcie_port *pp = &dra7xx->pp;
494 u32 val;
495
496 /* set MSE */
feeb7201 497 val = dw_pcie_readl_rc(pp, PCI_COMMAND);
389c7094 498 val |= PCI_COMMAND_MEMORY;
feeb7201 499 dw_pcie_writel_rc(pp, PCI_COMMAND, val);
389c7094
KVA
500
501 return 0;
502}
503
e52eb445
KVA
504static int dra7xx_pcie_suspend_noirq(struct device *dev)
505{
506 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
e52eb445 507
1f6c4501 508 dra7xx_pcie_disable_phy(dra7xx);
e52eb445
KVA
509
510 return 0;
511}
512
513static int dra7xx_pcie_resume_noirq(struct device *dev)
514{
515 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
e52eb445 516 int ret;
e52eb445 517
1f6c4501
KVA
518 ret = dra7xx_pcie_enable_phy(dra7xx);
519 if (ret) {
520 dev_err(dev, "failed to enable phy\n");
521 return ret;
e52eb445
KVA
522 }
523
524 return 0;
e52eb445
KVA
525}
526#endif
527
528static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
389c7094 529 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
e52eb445
KVA
530 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
531 dra7xx_pcie_resume_noirq)
532};
533
47ff3de9
KVA
534static const struct of_device_id of_dra7xx_pcie_match[] = {
535 { .compatible = "ti,dra7-pcie", },
536 {},
537};
47ff3de9
KVA
538
539static struct platform_driver dra7xx_pcie_driver = {
47ff3de9
KVA
540 .driver = {
541 .name = "dra7-pcie",
47ff3de9 542 .of_match_table = of_dra7xx_pcie_match,
d29438d6 543 .suppress_bind_attrs = true,
e52eb445 544 .pm = &dra7xx_pcie_pm_ops,
47ff3de9
KVA
545 },
546};
d29438d6 547builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);