PCI: dra7xx: Fix legacy INTD IRQ handling
[linux-2.6-block.git] / drivers / pci / dwc / pci-dra7xx.c
CommitLineData
47ff3de9
KVA
1/*
2 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
3 *
4 * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
608793e2 13#include <linux/delay.h>
7a4db656 14#include <linux/device.h>
47ff3de9
KVA
15#include <linux/err.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/irqdomain.h>
19#include <linux/kernel.h>
d29438d6 20#include <linux/init.h>
608793e2 21#include <linux/of_device.h>
78bdcad0 22#include <linux/of_gpio.h>
ab5fe4f4 23#include <linux/of_pci.h>
47ff3de9
KVA
24#include <linux/pci.h>
25#include <linux/phy/phy.h>
26#include <linux/platform_device.h>
27#include <linux/pm_runtime.h>
28#include <linux/resource.h>
29#include <linux/types.h>
f7a2757f
KVA
30#include <linux/mfd/syscon.h>
31#include <linux/regmap.h>
47ff3de9
KVA
32
33#include "pcie-designware.h"
34
35/* PCIe controller wrapper DRA7XX configuration registers */
36
37#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
38#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
39#define ERR_SYS BIT(0)
40#define ERR_FATAL BIT(1)
41#define ERR_NONFATAL BIT(2)
42#define ERR_COR BIT(3)
43#define ERR_AXI BIT(4)
44#define ERR_ECRC BIT(5)
45#define PME_TURN_OFF BIT(8)
46#define PME_TO_ACK BIT(9)
47#define PM_PME BIT(10)
48#define LINK_REQ_RST BIT(11)
49#define LINK_UP_EVT BIT(12)
50#define CFG_BME_EVT BIT(13)
51#define CFG_MSE_EVT BIT(14)
52#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
53 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
54 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
55
56#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
57#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
58#define INTA BIT(0)
59#define INTB BIT(1)
60#define INTC BIT(2)
61#define INTD BIT(3)
62#define MSI BIT(4)
63#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
64
608793e2
KVA
65#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
66#define DEVICE_TYPE_EP 0x0
67#define DEVICE_TYPE_LEG_EP 0x1
68#define DEVICE_TYPE_RC 0x4
69
47ff3de9
KVA
70#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
71#define LTSSM_EN 0x1
72
73#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
74#define LINK_UP BIT(16)
883cc17c 75#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
47ff3de9 76
ab5fe4f4
KVA
77#define EXP_CAP_ID_OFFSET 0x70
78
608793e2
KVA
79#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
80#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
81
82#define PCIECTRL_TI_CONF_MSI_XMT 0x012c
83#define MSI_REQ_GRANT BIT(0)
84#define MSI_VECTOR_SHIFT 7
85
47ff3de9 86struct dra7xx_pcie {
442ec4c0 87 struct dw_pcie *pci;
8e5ec414
BH
88 void __iomem *base; /* DT ti_conf */
89 int phy_count; /* DT phy-names count */
90 struct phy **phy;
ab5fe4f4 91 int link_gen;
ebe85a44 92 struct irq_domain *irq_domain;
608793e2
KVA
93 enum dw_pcie_device_mode mode;
94};
95
96struct dra7xx_pcie_of_data {
97 enum dw_pcie_device_mode mode;
47ff3de9
KVA
98};
99
442ec4c0 100#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
47ff3de9
KVA
101
102static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
103{
104 return readl(pcie->base + offset);
105}
106
107static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
108 u32 value)
109{
110 writel(value, pcie->base + offset);
111}
112
b6900aeb 113static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
2ed6cc71
KVA
114{
115 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
116}
117
442ec4c0 118static int dra7xx_pcie_link_up(struct dw_pcie *pci)
47ff3de9 119{
442ec4c0 120 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
47ff3de9
KVA
121 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
122
123 return !!(reg & LINK_UP);
124}
125
608793e2 126static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
47ff3de9 127{
608793e2
KVA
128 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
129 u32 reg;
130
131 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
132 reg &= ~LTSSM_EN;
133 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
134}
135
136static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
137{
138 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
442ec4c0 139 struct device *dev = pci->dev;
6cbb247e 140 u32 reg;
ab5fe4f4 141 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
47ff3de9 142
442ec4c0 143 if (dw_pcie_link_up(pci)) {
c7f8146b 144 dev_err(dev, "link is already up\n");
47ff3de9
KVA
145 return 0;
146 }
147
ab5fe4f4 148 if (dra7xx->link_gen == 1) {
442ec4c0 149 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
19ce01cc 150 4, &reg);
ab5fe4f4
KVA
151 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
152 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
153 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
442ec4c0 154 dw_pcie_write(pci->dbi_base + exp_cap_off +
19ce01cc 155 PCI_EXP_LNKCAP, 4, reg);
ab5fe4f4
KVA
156 }
157
442ec4c0 158 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
19ce01cc 159 2, &reg);
ab5fe4f4
KVA
160 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
161 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
162 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
442ec4c0 163 dw_pcie_write(pci->dbi_base + exp_cap_off +
19ce01cc 164 PCI_EXP_LNKCTL2, 2, reg);
ab5fe4f4
KVA
165 }
166 }
167
47ff3de9
KVA
168 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
169 reg |= LTSSM_EN;
170 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
171
608793e2 172 return 0;
47ff3de9
KVA
173}
174
5ffd90a0 175static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
47ff3de9 176{
47ff3de9 177 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
40aa52c4 178 LEG_EP_INTERRUPTS | MSI);
5ffd90a0
KVA
179
180 dra7xx_pcie_writel(dra7xx,
181 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
ebe85a44 182 MSI | LEG_EP_INTERRUPTS);
47ff3de9
KVA
183}
184
5ffd90a0
KVA
185static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
186{
187 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
40aa52c4 188 INTERRUPTS);
5ffd90a0
KVA
189 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
190 INTERRUPTS);
191}
192
193static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
194{
195 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
196 dra7xx_pcie_enable_msi_interrupts(dra7xx);
197}
198
4a301766 199static int dra7xx_pcie_host_init(struct pcie_port *pp)
47ff3de9 200{
442ec4c0
KVA
201 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
202 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
21baa1c4 203
7e57fd14
JZ
204 dw_pcie_setup_rc(pp);
205
608793e2
KVA
206 dra7xx_pcie_establish_link(pci);
207 dw_pcie_wait_for_link(pci);
ebe85a44 208 dw_pcie_msi_init(pp);
21baa1c4 209 dra7xx_pcie_enable_interrupts(dra7xx);
4a301766
BA
210
211 return 0;
47ff3de9
KVA
212}
213
4ab2e7c0 214static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
47ff3de9
KVA
215 .host_init = dra7xx_pcie_host_init,
216};
217
218static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
219 irq_hw_number_t hwirq)
220{
221 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
222 irq_set_chip_data(irq, domain->host_data);
47ff3de9
KVA
223
224 return 0;
225}
226
227static const struct irq_domain_ops intx_domain_ops = {
228 .map = dra7xx_pcie_intx_map,
524d59f6 229 .xlate = pci_irqd_intx_xlate,
47ff3de9
KVA
230};
231
232static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
233{
442ec4c0
KVA
234 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
235 struct device *dev = pci->dev;
236 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
47ff3de9
KVA
237 struct device_node *node = dev->of_node;
238 struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
239
240 if (!pcie_intc_node) {
241 dev_err(dev, "No PCIe Intc node found\n");
991bfef8 242 return -ENODEV;
47ff3de9
KVA
243 }
244
61534d1a 245 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
ebe85a44
KVA
246 &intx_domain_ops, pp);
247 if (!dra7xx->irq_domain) {
47ff3de9 248 dev_err(dev, "Failed to get a INTx IRQ domain\n");
991bfef8 249 return -ENODEV;
47ff3de9
KVA
250 }
251
252 return 0;
253}
254
255static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
256{
21baa1c4 257 struct dra7xx_pcie *dra7xx = arg;
442ec4c0
KVA
258 struct dw_pcie *pci = dra7xx->pci;
259 struct pcie_port *pp = &pci->pp;
47ff3de9
KVA
260 u32 reg;
261
262 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
263
264 switch (reg) {
265 case MSI:
266 dw_handle_msi_irq(pp);
267 break;
268 case INTA:
269 case INTB:
270 case INTC:
271 case INTD:
ebe85a44 272 generic_handle_irq(irq_find_mapping(dra7xx->irq_domain,
524d59f6 273 ffs(reg) - 1));
47ff3de9
KVA
274 break;
275 }
276
277 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
278
279 return IRQ_HANDLED;
280}
281
47ff3de9
KVA
282static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
283{
284 struct dra7xx_pcie *dra7xx = arg;
442ec4c0
KVA
285 struct dw_pcie *pci = dra7xx->pci;
286 struct device *dev = pci->dev;
608793e2 287 struct dw_pcie_ep *ep = &pci->ep;
47ff3de9
KVA
288 u32 reg;
289
290 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
291
292 if (reg & ERR_SYS)
c7f8146b 293 dev_dbg(dev, "System Error\n");
47ff3de9
KVA
294
295 if (reg & ERR_FATAL)
c7f8146b 296 dev_dbg(dev, "Fatal Error\n");
47ff3de9
KVA
297
298 if (reg & ERR_NONFATAL)
c7f8146b 299 dev_dbg(dev, "Non Fatal Error\n");
47ff3de9
KVA
300
301 if (reg & ERR_COR)
c7f8146b 302 dev_dbg(dev, "Correctable Error\n");
47ff3de9
KVA
303
304 if (reg & ERR_AXI)
c7f8146b 305 dev_dbg(dev, "AXI tag lookup fatal Error\n");
47ff3de9
KVA
306
307 if (reg & ERR_ECRC)
c7f8146b 308 dev_dbg(dev, "ECRC Error\n");
47ff3de9
KVA
309
310 if (reg & PME_TURN_OFF)
c7f8146b 311 dev_dbg(dev,
47ff3de9
KVA
312 "Power Management Event Turn-Off message received\n");
313
314 if (reg & PME_TO_ACK)
c7f8146b 315 dev_dbg(dev,
47ff3de9
KVA
316 "Power Management Turn-Off Ack message received\n");
317
318 if (reg & PM_PME)
c7f8146b 319 dev_dbg(dev, "PM Power Management Event message received\n");
47ff3de9
KVA
320
321 if (reg & LINK_REQ_RST)
c7f8146b 322 dev_dbg(dev, "Link Request Reset\n");
47ff3de9 323
608793e2
KVA
324 if (reg & LINK_UP_EVT) {
325 if (dra7xx->mode == DW_PCIE_EP_TYPE)
326 dw_pcie_ep_linkup(ep);
c7f8146b 327 dev_dbg(dev, "Link-up state change\n");
608793e2 328 }
47ff3de9
KVA
329
330 if (reg & CFG_BME_EVT)
c7f8146b 331 dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
47ff3de9
KVA
332
333 if (reg & CFG_MSE_EVT)
c7f8146b 334 dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
47ff3de9
KVA
335
336 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
337
338 return IRQ_HANDLED;
339}
340
608793e2
KVA
341static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
342{
343 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
344 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
85aa1399
KVA
345 enum pci_barno bar;
346
347 for (bar = BAR_0; bar <= BAR_5; bar++)
348 dw_pcie_ep_reset_bar(pci, bar);
608793e2
KVA
349
350 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
351}
352
353static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
354{
355 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
356 mdelay(1);
357 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
358}
359
360static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
361 u8 interrupt_num)
362{
363 u32 reg;
364
365 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
366 reg |= MSI_REQ_GRANT;
367 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
368}
369
370static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
371 enum pci_epc_irq_type type, u8 interrupt_num)
372{
373 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
374 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
375
376 switch (type) {
377 case PCI_EPC_IRQ_LEGACY:
378 dra7xx_pcie_raise_legacy_irq(dra7xx);
379 break;
380 case PCI_EPC_IRQ_MSI:
381 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
382 break;
383 default:
384 dev_err(pci->dev, "UNKNOWN IRQ type\n");
385 }
386
387 return 0;
388}
389
390static struct dw_pcie_ep_ops pcie_ep_ops = {
391 .ep_init = dra7xx_pcie_ep_init,
392 .raise_irq = dra7xx_pcie_raise_irq,
393};
394
395static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
396 struct platform_device *pdev)
397{
398 int ret;
399 struct dw_pcie_ep *ep;
400 struct resource *res;
401 struct device *dev = &pdev->dev;
402 struct dw_pcie *pci = dra7xx->pci;
403
404 ep = &pci->ep;
405 ep->ops = &pcie_ep_ops;
406
407 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
408 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
409 if (!pci->dbi_base)
410 return -ENOMEM;
411
412 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
413 pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
414 if (!pci->dbi_base2)
415 return -ENOMEM;
416
417 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
418 if (!res)
419 return -EINVAL;
420
421 ep->phys_base = res->start;
422 ep->addr_size = resource_size(res);
423
424 ret = dw_pcie_ep_init(ep);
425 if (ret) {
426 dev_err(dev, "failed to initialize endpoint\n");
427 return ret;
428 }
429
430 return 0;
431}
432
e73044a0
JH
433static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
434 struct platform_device *pdev)
47ff3de9
KVA
435{
436 int ret;
442ec4c0
KVA
437 struct dw_pcie *pci = dra7xx->pci;
438 struct pcie_port *pp = &pci->pp;
439 struct device *dev = pci->dev;
47ff3de9 440 struct resource *res;
47ff3de9
KVA
441
442 pp->irq = platform_get_irq(pdev, 1);
443 if (pp->irq < 0) {
444 dev_err(dev, "missing IRQ resource\n");
2f3ec752 445 return pp->irq;
47ff3de9
KVA
446 }
447
c7f8146b 448 ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
8ff0ef99 449 IRQF_SHARED | IRQF_NO_THREAD,
21baa1c4 450 "dra7-pcie-msi", dra7xx);
47ff3de9 451 if (ret) {
c7f8146b 452 dev_err(dev, "failed to request irq\n");
47ff3de9
KVA
453 return ret;
454 }
455
ebe85a44
KVA
456 ret = dra7xx_pcie_init_irq_domain(pp);
457 if (ret < 0)
458 return ret;
47ff3de9
KVA
459
460 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
442ec4c0
KVA
461 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
462 if (!pci->dbi_base)
47ff3de9
KVA
463 return -ENOMEM;
464
71890ea0
NC
465 pp->ops = &dra7xx_pcie_host_ops;
466
47ff3de9
KVA
467 ret = dw_pcie_host_init(pp);
468 if (ret) {
c7f8146b 469 dev_err(dev, "failed to initialize host\n");
47ff3de9
KVA
470 return ret;
471 }
472
473 return 0;
474}
475
442ec4c0 476static const struct dw_pcie_ops dw_pcie_ops = {
2ed6cc71 477 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
608793e2
KVA
478 .start_link = dra7xx_pcie_establish_link,
479 .stop_link = dra7xx_pcie_stop_link,
442ec4c0
KVA
480 .link_up = dra7xx_pcie_link_up,
481};
482
1f6c4501
KVA
483static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
484{
485 int phy_count = dra7xx->phy_count;
486
487 while (phy_count--) {
488 phy_power_off(dra7xx->phy[phy_count]);
489 phy_exit(dra7xx->phy[phy_count]);
490 }
491}
492
493static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
494{
495 int phy_count = dra7xx->phy_count;
496 int ret;
497 int i;
498
499 for (i = 0; i < phy_count; i++) {
500 ret = phy_init(dra7xx->phy[i]);
501 if (ret < 0)
502 goto err_phy;
503
504 ret = phy_power_on(dra7xx->phy[i]);
505 if (ret < 0) {
506 phy_exit(dra7xx->phy[i]);
507 goto err_phy;
508 }
509 }
510
511 return 0;
512
513err_phy:
514 while (--i >= 0) {
515 phy_power_off(dra7xx->phy[i]);
516 phy_exit(dra7xx->phy[i]);
517 }
518
519 return ret;
520}
521
608793e2
KVA
522static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
523 .mode = DW_PCIE_RC_TYPE,
524};
525
526static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
527 .mode = DW_PCIE_EP_TYPE,
528};
529
530static const struct of_device_id of_dra7xx_pcie_match[] = {
531 {
532 .compatible = "ti,dra7-pcie",
533 .data = &dra7xx_pcie_rc_of_data,
534 },
535 {
536 .compatible = "ti,dra7-pcie-ep",
537 .data = &dra7xx_pcie_ep_of_data,
538 },
539 {},
540};
541
f7a2757f
KVA
542/*
543 * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
544 * @dra7xx: the dra7xx device where the workaround should be applied
545 *
546 * Access to the PCIe slave port that are not 32-bit aligned will result
547 * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
548 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
549 * 0x3.
550 *
551 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
552 */
553static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
554{
555 int ret;
556 struct device_node *np = dev->of_node;
557 struct of_phandle_args args;
558 struct regmap *regmap;
559
560 regmap = syscon_regmap_lookup_by_phandle(np,
561 "ti,syscon-unaligned-access");
562 if (IS_ERR(regmap)) {
563 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
564 return -EINVAL;
565 }
566
567 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
568 2, 0, &args);
569 if (ret) {
570 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
571 return ret;
572 }
573
574 ret = regmap_update_bits(regmap, args.args[0], args.args[1],
575 args.args[1]);
576 if (ret)
577 dev_err(dev, "failed to enable unaligned access\n");
578
579 of_node_put(args.np);
580
581 return ret;
582}
583
47ff3de9
KVA
584static int __init dra7xx_pcie_probe(struct platform_device *pdev)
585{
586 u32 reg;
587 int ret;
588 int irq;
589 int i;
590 int phy_count;
591 struct phy **phy;
7a4db656 592 struct device_link **link;
47ff3de9
KVA
593 void __iomem *base;
594 struct resource *res;
442ec4c0 595 struct dw_pcie *pci;
442ec4c0 596 struct dra7xx_pcie *dra7xx;
47ff3de9
KVA
597 struct device *dev = &pdev->dev;
598 struct device_node *np = dev->of_node;
599 char name[10];
602d38bc 600 struct gpio_desc *reset;
608793e2
KVA
601 const struct of_device_id *match;
602 const struct dra7xx_pcie_of_data *data;
603 enum dw_pcie_device_mode mode;
604
605 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
606 if (!match)
607 return -EINVAL;
608
609 data = (struct dra7xx_pcie_of_data *)match->data;
610 mode = (enum dw_pcie_device_mode)data->mode;
47ff3de9
KVA
611
612 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
613 if (!dra7xx)
614 return -ENOMEM;
615
442ec4c0
KVA
616 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
617 if (!pci)
618 return -ENOMEM;
619
620 pci->dev = dev;
621 pci->ops = &dw_pcie_ops;
622
47ff3de9
KVA
623 irq = platform_get_irq(pdev, 0);
624 if (irq < 0) {
a0d21ba1
GS
625 dev_err(dev, "missing IRQ resource: %d\n", irq);
626 return irq;
47ff3de9
KVA
627 }
628
47ff3de9
KVA
629 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
630 base = devm_ioremap_nocache(dev, res->start, resource_size(res));
631 if (!base)
632 return -ENOMEM;
633
634 phy_count = of_property_count_strings(np, "phy-names");
635 if (phy_count < 0) {
636 dev_err(dev, "unable to find the strings\n");
637 return phy_count;
638 }
639
640 phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
641 if (!phy)
642 return -ENOMEM;
643
7a4db656
KVA
644 link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
645 if (!link)
646 return -ENOMEM;
647
47ff3de9
KVA
648 for (i = 0; i < phy_count; i++) {
649 snprintf(name, sizeof(name), "pcie-phy%d", i);
650 phy[i] = devm_phy_get(dev, name);
651 if (IS_ERR(phy[i]))
652 return PTR_ERR(phy[i]);
7a4db656
KVA
653
654 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
655 if (!link[i]) {
656 ret = -EINVAL;
657 goto err_link;
658 }
47ff3de9
KVA
659 }
660
661 dra7xx->base = base;
662 dra7xx->phy = phy;
442ec4c0 663 dra7xx->pci = pci;
47ff3de9
KVA
664 dra7xx->phy_count = phy_count;
665
1f6c4501
KVA
666 ret = dra7xx_pcie_enable_phy(dra7xx);
667 if (ret) {
668 dev_err(dev, "failed to enable phy\n");
669 return ret;
670 }
671
9bcf0a6f
KVA
672 platform_set_drvdata(pdev, dra7xx);
673
47ff3de9
KVA
674 pm_runtime_enable(dev);
675 ret = pm_runtime_get_sync(dev);
d3f4caa3 676 if (ret < 0) {
47ff3de9 677 dev_err(dev, "pm_runtime_get_sync failed\n");
0e2bdb0e 678 goto err_get_sync;
47ff3de9
KVA
679 }
680
602d38bc
KVA
681 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
682 if (IS_ERR(reset)) {
683 ret = PTR_ERR(reset);
684 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
78bdcad0 685 goto err_gpio;
47ff3de9
KVA
686 }
687
688 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
689 reg &= ~LTSSM_EN;
690 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
691
ab5fe4f4
KVA
692 dra7xx->link_gen = of_pci_get_max_link_speed(np);
693 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
694 dra7xx->link_gen = 2;
695
608793e2
KVA
696 switch (mode) {
697 case DW_PCIE_RC_TYPE:
f1aba0a0
NC
698 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
699 ret = -ENODEV;
700 goto err_gpio;
701 }
702
608793e2
KVA
703 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
704 DEVICE_TYPE_RC);
705 ret = dra7xx_add_pcie_port(dra7xx, pdev);
706 if (ret < 0)
707 goto err_gpio;
708 break;
709 case DW_PCIE_EP_TYPE:
f1aba0a0
NC
710 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
711 ret = -ENODEV;
712 goto err_gpio;
713 }
714
608793e2
KVA
715 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
716 DEVICE_TYPE_EP);
f7a2757f
KVA
717
718 ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
719 if (ret)
720 goto err_gpio;
721
608793e2
KVA
722 ret = dra7xx_add_pcie_ep(dra7xx, pdev);
723 if (ret < 0)
724 goto err_gpio;
725 break;
726 default:
727 dev_err(dev, "INVALID device type %d\n", mode);
728 }
729 dra7xx->mode = mode;
47ff3de9 730
d4c7d1a0
K
731 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
732 IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
733 if (ret) {
734 dev_err(dev, "failed to request irq\n");
735 goto err_gpio;
736 }
737
47ff3de9
KVA
738 return 0;
739
78bdcad0 740err_gpio:
47ff3de9 741 pm_runtime_put(dev);
0e2bdb0e
KVA
742
743err_get_sync:
47ff3de9 744 pm_runtime_disable(dev);
1f6c4501 745 dra7xx_pcie_disable_phy(dra7xx);
47ff3de9 746
7a4db656
KVA
747err_link:
748 while (--i >= 0)
749 device_link_del(link[i]);
750
47ff3de9
KVA
751 return ret;
752}
753
e52eb445 754#ifdef CONFIG_PM_SLEEP
389c7094
KVA
755static int dra7xx_pcie_suspend(struct device *dev)
756{
757 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
442ec4c0 758 struct dw_pcie *pci = dra7xx->pci;
389c7094
KVA
759 u32 val;
760
608793e2
KVA
761 if (dra7xx->mode != DW_PCIE_RC_TYPE)
762 return 0;
763
389c7094 764 /* clear MSE */
442ec4c0 765 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
389c7094 766 val &= ~PCI_COMMAND_MEMORY;
442ec4c0 767 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
389c7094
KVA
768
769 return 0;
770}
771
772static int dra7xx_pcie_resume(struct device *dev)
773{
774 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
442ec4c0 775 struct dw_pcie *pci = dra7xx->pci;
389c7094
KVA
776 u32 val;
777
608793e2
KVA
778 if (dra7xx->mode != DW_PCIE_RC_TYPE)
779 return 0;
780
389c7094 781 /* set MSE */
442ec4c0 782 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
389c7094 783 val |= PCI_COMMAND_MEMORY;
442ec4c0 784 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
389c7094
KVA
785
786 return 0;
787}
788
e52eb445
KVA
789static int dra7xx_pcie_suspend_noirq(struct device *dev)
790{
791 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
e52eb445 792
1f6c4501 793 dra7xx_pcie_disable_phy(dra7xx);
e52eb445
KVA
794
795 return 0;
796}
797
798static int dra7xx_pcie_resume_noirq(struct device *dev)
799{
800 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
e52eb445 801 int ret;
e52eb445 802
1f6c4501
KVA
803 ret = dra7xx_pcie_enable_phy(dra7xx);
804 if (ret) {
805 dev_err(dev, "failed to enable phy\n");
806 return ret;
e52eb445
KVA
807 }
808
809 return 0;
e52eb445
KVA
810}
811#endif
812
4751fac7 813static void dra7xx_pcie_shutdown(struct platform_device *pdev)
9c049bea
K
814{
815 struct device *dev = &pdev->dev;
816 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
817 int ret;
818
819 dra7xx_pcie_stop_link(dra7xx->pci);
820
821 ret = pm_runtime_put_sync(dev);
822 if (ret < 0)
823 dev_dbg(dev, "pm_runtime_put_sync failed\n");
824
825 pm_runtime_disable(dev);
826 dra7xx_pcie_disable_phy(dra7xx);
827}
828
e52eb445 829static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
389c7094 830 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
e52eb445
KVA
831 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
832 dra7xx_pcie_resume_noirq)
833};
834
47ff3de9 835static struct platform_driver dra7xx_pcie_driver = {
47ff3de9
KVA
836 .driver = {
837 .name = "dra7-pcie",
47ff3de9 838 .of_match_table = of_dra7xx_pcie_match,
d29438d6 839 .suppress_bind_attrs = true,
e52eb445 840 .pm = &dra7xx_pcie_pm_ops,
47ff3de9 841 },
9c049bea 842 .shutdown = dra7xx_pcie_shutdown,
47ff3de9 843};
d29438d6 844builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);