phy: brcm-sata: Allow RX equalizer tuning
[linux-2.6-block.git] / drivers / pci / dwc / pci-dra7xx.c
CommitLineData
47ff3de9
KVA
1/*
2 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
3 *
4 * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
608793e2 13#include <linux/delay.h>
47ff3de9
KVA
14#include <linux/err.h>
15#include <linux/interrupt.h>
16#include <linux/irq.h>
17#include <linux/irqdomain.h>
18#include <linux/kernel.h>
d29438d6 19#include <linux/init.h>
608793e2 20#include <linux/of_device.h>
78bdcad0 21#include <linux/of_gpio.h>
ab5fe4f4 22#include <linux/of_pci.h>
47ff3de9
KVA
23#include <linux/pci.h>
24#include <linux/phy/phy.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/resource.h>
28#include <linux/types.h>
f7a2757f
KVA
29#include <linux/mfd/syscon.h>
30#include <linux/regmap.h>
47ff3de9
KVA
31
32#include "pcie-designware.h"
33
34/* PCIe controller wrapper DRA7XX configuration registers */
35
36#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
37#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
38#define ERR_SYS BIT(0)
39#define ERR_FATAL BIT(1)
40#define ERR_NONFATAL BIT(2)
41#define ERR_COR BIT(3)
42#define ERR_AXI BIT(4)
43#define ERR_ECRC BIT(5)
44#define PME_TURN_OFF BIT(8)
45#define PME_TO_ACK BIT(9)
46#define PM_PME BIT(10)
47#define LINK_REQ_RST BIT(11)
48#define LINK_UP_EVT BIT(12)
49#define CFG_BME_EVT BIT(13)
50#define CFG_MSE_EVT BIT(14)
51#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
54
55#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
56#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
57#define INTA BIT(0)
58#define INTB BIT(1)
59#define INTC BIT(2)
60#define INTD BIT(3)
61#define MSI BIT(4)
62#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
63
608793e2
KVA
64#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
65#define DEVICE_TYPE_EP 0x0
66#define DEVICE_TYPE_LEG_EP 0x1
67#define DEVICE_TYPE_RC 0x4
68
47ff3de9
KVA
69#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
70#define LTSSM_EN 0x1
71
72#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
73#define LINK_UP BIT(16)
883cc17c 74#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
47ff3de9 75
ab5fe4f4
KVA
76#define EXP_CAP_ID_OFFSET 0x70
77
608793e2
KVA
78#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
79#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
80
81#define PCIECTRL_TI_CONF_MSI_XMT 0x012c
82#define MSI_REQ_GRANT BIT(0)
83#define MSI_VECTOR_SHIFT 7
84
47ff3de9 85struct dra7xx_pcie {
442ec4c0 86 struct dw_pcie *pci;
8e5ec414
BH
87 void __iomem *base; /* DT ti_conf */
88 int phy_count; /* DT phy-names count */
89 struct phy **phy;
ab5fe4f4 90 int link_gen;
ebe85a44 91 struct irq_domain *irq_domain;
608793e2
KVA
92 enum dw_pcie_device_mode mode;
93};
94
95struct dra7xx_pcie_of_data {
96 enum dw_pcie_device_mode mode;
47ff3de9
KVA
97};
98
442ec4c0 99#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
47ff3de9
KVA
100
101static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
102{
103 return readl(pcie->base + offset);
104}
105
106static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
107 u32 value)
108{
109 writel(value, pcie->base + offset);
110}
111
2ed6cc71
KVA
112static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)
113{
114 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
115}
116
442ec4c0 117static int dra7xx_pcie_link_up(struct dw_pcie *pci)
47ff3de9 118{
442ec4c0 119 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
47ff3de9
KVA
120 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
121
122 return !!(reg & LINK_UP);
123}
124
608793e2 125static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
47ff3de9 126{
608793e2
KVA
127 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
128 u32 reg;
129
130 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
131 reg &= ~LTSSM_EN;
132 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
133}
134
135static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
136{
137 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
442ec4c0 138 struct device *dev = pci->dev;
6cbb247e 139 u32 reg;
ab5fe4f4 140 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
47ff3de9 141
442ec4c0 142 if (dw_pcie_link_up(pci)) {
c7f8146b 143 dev_err(dev, "link is already up\n");
47ff3de9
KVA
144 return 0;
145 }
146
ab5fe4f4 147 if (dra7xx->link_gen == 1) {
442ec4c0 148 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
19ce01cc 149 4, &reg);
ab5fe4f4
KVA
150 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
151 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
152 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
442ec4c0 153 dw_pcie_write(pci->dbi_base + exp_cap_off +
19ce01cc 154 PCI_EXP_LNKCAP, 4, reg);
ab5fe4f4
KVA
155 }
156
442ec4c0 157 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
19ce01cc 158 2, &reg);
ab5fe4f4
KVA
159 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
160 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
161 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
442ec4c0 162 dw_pcie_write(pci->dbi_base + exp_cap_off +
19ce01cc 163 PCI_EXP_LNKCTL2, 2, reg);
ab5fe4f4
KVA
164 }
165 }
166
47ff3de9
KVA
167 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
168 reg |= LTSSM_EN;
169 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
170
608793e2 171 return 0;
47ff3de9
KVA
172}
173
5ffd90a0 174static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
47ff3de9 175{
47ff3de9 176 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
40aa52c4 177 LEG_EP_INTERRUPTS | MSI);
5ffd90a0
KVA
178
179 dra7xx_pcie_writel(dra7xx,
180 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
ebe85a44 181 MSI | LEG_EP_INTERRUPTS);
47ff3de9
KVA
182}
183
5ffd90a0
KVA
184static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
185{
186 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
40aa52c4 187 INTERRUPTS);
5ffd90a0
KVA
188 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
189 INTERRUPTS);
190}
191
192static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
193{
194 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
195 dra7xx_pcie_enable_msi_interrupts(dra7xx);
196}
197
4a301766 198static int dra7xx_pcie_host_init(struct pcie_port *pp)
47ff3de9 199{
442ec4c0
KVA
200 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
201 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
21baa1c4 202
7e57fd14
JZ
203 dw_pcie_setup_rc(pp);
204
608793e2
KVA
205 dra7xx_pcie_establish_link(pci);
206 dw_pcie_wait_for_link(pci);
ebe85a44 207 dw_pcie_msi_init(pp);
21baa1c4 208 dra7xx_pcie_enable_interrupts(dra7xx);
4a301766
BA
209
210 return 0;
47ff3de9
KVA
211}
212
4ab2e7c0 213static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
47ff3de9
KVA
214 .host_init = dra7xx_pcie_host_init,
215};
216
217static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
218 irq_hw_number_t hwirq)
219{
220 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
221 irq_set_chip_data(irq, domain->host_data);
47ff3de9
KVA
222
223 return 0;
224}
225
226static const struct irq_domain_ops intx_domain_ops = {
227 .map = dra7xx_pcie_intx_map,
228};
229
230static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
231{
442ec4c0
KVA
232 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
233 struct device *dev = pci->dev;
234 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
47ff3de9
KVA
235 struct device_node *node = dev->of_node;
236 struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
237
238 if (!pcie_intc_node) {
239 dev_err(dev, "No PCIe Intc node found\n");
991bfef8 240 return -ENODEV;
47ff3de9
KVA
241 }
242
61534d1a 243 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
ebe85a44
KVA
244 &intx_domain_ops, pp);
245 if (!dra7xx->irq_domain) {
47ff3de9 246 dev_err(dev, "Failed to get a INTx IRQ domain\n");
991bfef8 247 return -ENODEV;
47ff3de9
KVA
248 }
249
250 return 0;
251}
252
253static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
254{
21baa1c4 255 struct dra7xx_pcie *dra7xx = arg;
442ec4c0
KVA
256 struct dw_pcie *pci = dra7xx->pci;
257 struct pcie_port *pp = &pci->pp;
47ff3de9
KVA
258 u32 reg;
259
260 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
261
262 switch (reg) {
263 case MSI:
264 dw_handle_msi_irq(pp);
265 break;
266 case INTA:
267 case INTB:
268 case INTC:
269 case INTD:
ebe85a44
KVA
270 generic_handle_irq(irq_find_mapping(dra7xx->irq_domain,
271 ffs(reg)));
47ff3de9
KVA
272 break;
273 }
274
275 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
276
277 return IRQ_HANDLED;
278}
279
47ff3de9
KVA
280static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
281{
282 struct dra7xx_pcie *dra7xx = arg;
442ec4c0
KVA
283 struct dw_pcie *pci = dra7xx->pci;
284 struct device *dev = pci->dev;
608793e2 285 struct dw_pcie_ep *ep = &pci->ep;
47ff3de9
KVA
286 u32 reg;
287
288 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
289
290 if (reg & ERR_SYS)
c7f8146b 291 dev_dbg(dev, "System Error\n");
47ff3de9
KVA
292
293 if (reg & ERR_FATAL)
c7f8146b 294 dev_dbg(dev, "Fatal Error\n");
47ff3de9
KVA
295
296 if (reg & ERR_NONFATAL)
c7f8146b 297 dev_dbg(dev, "Non Fatal Error\n");
47ff3de9
KVA
298
299 if (reg & ERR_COR)
c7f8146b 300 dev_dbg(dev, "Correctable Error\n");
47ff3de9
KVA
301
302 if (reg & ERR_AXI)
c7f8146b 303 dev_dbg(dev, "AXI tag lookup fatal Error\n");
47ff3de9
KVA
304
305 if (reg & ERR_ECRC)
c7f8146b 306 dev_dbg(dev, "ECRC Error\n");
47ff3de9
KVA
307
308 if (reg & PME_TURN_OFF)
c7f8146b 309 dev_dbg(dev,
47ff3de9
KVA
310 "Power Management Event Turn-Off message received\n");
311
312 if (reg & PME_TO_ACK)
c7f8146b 313 dev_dbg(dev,
47ff3de9
KVA
314 "Power Management Turn-Off Ack message received\n");
315
316 if (reg & PM_PME)
c7f8146b 317 dev_dbg(dev, "PM Power Management Event message received\n");
47ff3de9
KVA
318
319 if (reg & LINK_REQ_RST)
c7f8146b 320 dev_dbg(dev, "Link Request Reset\n");
47ff3de9 321
608793e2
KVA
322 if (reg & LINK_UP_EVT) {
323 if (dra7xx->mode == DW_PCIE_EP_TYPE)
324 dw_pcie_ep_linkup(ep);
c7f8146b 325 dev_dbg(dev, "Link-up state change\n");
608793e2 326 }
47ff3de9
KVA
327
328 if (reg & CFG_BME_EVT)
c7f8146b 329 dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
47ff3de9
KVA
330
331 if (reg & CFG_MSE_EVT)
c7f8146b 332 dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
47ff3de9
KVA
333
334 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
335
336 return IRQ_HANDLED;
337}
338
85aa1399
KVA
339static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
340{
341 u32 reg;
342
343 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
344 dw_pcie_writel_dbi2(pci, reg, 0x0);
345 dw_pcie_writel_dbi(pci, reg, 0x0);
346}
347
608793e2
KVA
348static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
349{
350 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
351 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
85aa1399
KVA
352 enum pci_barno bar;
353
354 for (bar = BAR_0; bar <= BAR_5; bar++)
355 dw_pcie_ep_reset_bar(pci, bar);
608793e2
KVA
356
357 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
358}
359
360static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
361{
362 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
363 mdelay(1);
364 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
365}
366
367static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
368 u8 interrupt_num)
369{
370 u32 reg;
371
372 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
373 reg |= MSI_REQ_GRANT;
374 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
375}
376
377static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
378 enum pci_epc_irq_type type, u8 interrupt_num)
379{
380 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
381 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
382
383 switch (type) {
384 case PCI_EPC_IRQ_LEGACY:
385 dra7xx_pcie_raise_legacy_irq(dra7xx);
386 break;
387 case PCI_EPC_IRQ_MSI:
388 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
389 break;
390 default:
391 dev_err(pci->dev, "UNKNOWN IRQ type\n");
392 }
393
394 return 0;
395}
396
397static struct dw_pcie_ep_ops pcie_ep_ops = {
398 .ep_init = dra7xx_pcie_ep_init,
399 .raise_irq = dra7xx_pcie_raise_irq,
400};
401
402static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
403 struct platform_device *pdev)
404{
405 int ret;
406 struct dw_pcie_ep *ep;
407 struct resource *res;
408 struct device *dev = &pdev->dev;
409 struct dw_pcie *pci = dra7xx->pci;
410
411 ep = &pci->ep;
412 ep->ops = &pcie_ep_ops;
413
414 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
415 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
416 if (!pci->dbi_base)
417 return -ENOMEM;
418
419 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
420 pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
421 if (!pci->dbi_base2)
422 return -ENOMEM;
423
424 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
425 if (!res)
426 return -EINVAL;
427
428 ep->phys_base = res->start;
429 ep->addr_size = resource_size(res);
430
431 ret = dw_pcie_ep_init(ep);
432 if (ret) {
433 dev_err(dev, "failed to initialize endpoint\n");
434 return ret;
435 }
436
437 return 0;
438}
439
e73044a0
JH
440static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
441 struct platform_device *pdev)
47ff3de9
KVA
442{
443 int ret;
442ec4c0
KVA
444 struct dw_pcie *pci = dra7xx->pci;
445 struct pcie_port *pp = &pci->pp;
446 struct device *dev = pci->dev;
47ff3de9 447 struct resource *res;
47ff3de9
KVA
448
449 pp->irq = platform_get_irq(pdev, 1);
450 if (pp->irq < 0) {
451 dev_err(dev, "missing IRQ resource\n");
2f3ec752 452 return pp->irq;
47ff3de9
KVA
453 }
454
c7f8146b 455 ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
8ff0ef99 456 IRQF_SHARED | IRQF_NO_THREAD,
21baa1c4 457 "dra7-pcie-msi", dra7xx);
47ff3de9 458 if (ret) {
c7f8146b 459 dev_err(dev, "failed to request irq\n");
47ff3de9
KVA
460 return ret;
461 }
462
ebe85a44
KVA
463 ret = dra7xx_pcie_init_irq_domain(pp);
464 if (ret < 0)
465 return ret;
47ff3de9
KVA
466
467 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
442ec4c0
KVA
468 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
469 if (!pci->dbi_base)
47ff3de9
KVA
470 return -ENOMEM;
471
472 ret = dw_pcie_host_init(pp);
473 if (ret) {
c7f8146b 474 dev_err(dev, "failed to initialize host\n");
47ff3de9
KVA
475 return ret;
476 }
477
478 return 0;
479}
480
442ec4c0 481static const struct dw_pcie_ops dw_pcie_ops = {
2ed6cc71 482 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
608793e2
KVA
483 .start_link = dra7xx_pcie_establish_link,
484 .stop_link = dra7xx_pcie_stop_link,
442ec4c0
KVA
485 .link_up = dra7xx_pcie_link_up,
486};
487
1f6c4501
KVA
488static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
489{
490 int phy_count = dra7xx->phy_count;
491
492 while (phy_count--) {
493 phy_power_off(dra7xx->phy[phy_count]);
494 phy_exit(dra7xx->phy[phy_count]);
495 }
496}
497
498static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
499{
500 int phy_count = dra7xx->phy_count;
501 int ret;
502 int i;
503
504 for (i = 0; i < phy_count; i++) {
505 ret = phy_init(dra7xx->phy[i]);
506 if (ret < 0)
507 goto err_phy;
508
509 ret = phy_power_on(dra7xx->phy[i]);
510 if (ret < 0) {
511 phy_exit(dra7xx->phy[i]);
512 goto err_phy;
513 }
514 }
515
516 return 0;
517
518err_phy:
519 while (--i >= 0) {
520 phy_power_off(dra7xx->phy[i]);
521 phy_exit(dra7xx->phy[i]);
522 }
523
524 return ret;
525}
526
608793e2
KVA
527static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
528 .mode = DW_PCIE_RC_TYPE,
529};
530
531static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
532 .mode = DW_PCIE_EP_TYPE,
533};
534
535static const struct of_device_id of_dra7xx_pcie_match[] = {
536 {
537 .compatible = "ti,dra7-pcie",
538 .data = &dra7xx_pcie_rc_of_data,
539 },
540 {
541 .compatible = "ti,dra7-pcie-ep",
542 .data = &dra7xx_pcie_ep_of_data,
543 },
544 {},
545};
546
f7a2757f
KVA
547/*
548 * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
549 * @dra7xx: the dra7xx device where the workaround should be applied
550 *
551 * Access to the PCIe slave port that are not 32-bit aligned will result
552 * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
553 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
554 * 0x3.
555 *
556 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
557 */
558static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
559{
560 int ret;
561 struct device_node *np = dev->of_node;
562 struct of_phandle_args args;
563 struct regmap *regmap;
564
565 regmap = syscon_regmap_lookup_by_phandle(np,
566 "ti,syscon-unaligned-access");
567 if (IS_ERR(regmap)) {
568 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
569 return -EINVAL;
570 }
571
572 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
573 2, 0, &args);
574 if (ret) {
575 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
576 return ret;
577 }
578
579 ret = regmap_update_bits(regmap, args.args[0], args.args[1],
580 args.args[1]);
581 if (ret)
582 dev_err(dev, "failed to enable unaligned access\n");
583
584 of_node_put(args.np);
585
586 return ret;
587}
588
47ff3de9
KVA
589static int __init dra7xx_pcie_probe(struct platform_device *pdev)
590{
591 u32 reg;
592 int ret;
593 int irq;
594 int i;
595 int phy_count;
596 struct phy **phy;
597 void __iomem *base;
598 struct resource *res;
442ec4c0 599 struct dw_pcie *pci;
150645b9 600 struct pcie_port *pp;
442ec4c0 601 struct dra7xx_pcie *dra7xx;
47ff3de9
KVA
602 struct device *dev = &pdev->dev;
603 struct device_node *np = dev->of_node;
604 char name[10];
602d38bc 605 struct gpio_desc *reset;
608793e2
KVA
606 const struct of_device_id *match;
607 const struct dra7xx_pcie_of_data *data;
608 enum dw_pcie_device_mode mode;
609
610 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
611 if (!match)
612 return -EINVAL;
613
614 data = (struct dra7xx_pcie_of_data *)match->data;
615 mode = (enum dw_pcie_device_mode)data->mode;
47ff3de9
KVA
616
617 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
618 if (!dra7xx)
619 return -ENOMEM;
620
442ec4c0
KVA
621 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
622 if (!pci)
623 return -ENOMEM;
624
625 pci->dev = dev;
626 pci->ops = &dw_pcie_ops;
627
628 pp = &pci->pp;
150645b9
BH
629 pp->ops = &dra7xx_pcie_host_ops;
630
47ff3de9
KVA
631 irq = platform_get_irq(pdev, 0);
632 if (irq < 0) {
a0d21ba1
GS
633 dev_err(dev, "missing IRQ resource: %d\n", irq);
634 return irq;
47ff3de9
KVA
635 }
636
47ff3de9
KVA
637 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
638 base = devm_ioremap_nocache(dev, res->start, resource_size(res));
639 if (!base)
640 return -ENOMEM;
641
642 phy_count = of_property_count_strings(np, "phy-names");
643 if (phy_count < 0) {
644 dev_err(dev, "unable to find the strings\n");
645 return phy_count;
646 }
647
648 phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
649 if (!phy)
650 return -ENOMEM;
651
652 for (i = 0; i < phy_count; i++) {
653 snprintf(name, sizeof(name), "pcie-phy%d", i);
654 phy[i] = devm_phy_get(dev, name);
655 if (IS_ERR(phy[i]))
656 return PTR_ERR(phy[i]);
47ff3de9
KVA
657 }
658
659 dra7xx->base = base;
660 dra7xx->phy = phy;
442ec4c0 661 dra7xx->pci = pci;
47ff3de9
KVA
662 dra7xx->phy_count = phy_count;
663
1f6c4501
KVA
664 ret = dra7xx_pcie_enable_phy(dra7xx);
665 if (ret) {
666 dev_err(dev, "failed to enable phy\n");
667 return ret;
668 }
669
9bcf0a6f
KVA
670 platform_set_drvdata(pdev, dra7xx);
671
47ff3de9
KVA
672 pm_runtime_enable(dev);
673 ret = pm_runtime_get_sync(dev);
d3f4caa3 674 if (ret < 0) {
47ff3de9 675 dev_err(dev, "pm_runtime_get_sync failed\n");
0e2bdb0e 676 goto err_get_sync;
47ff3de9
KVA
677 }
678
602d38bc
KVA
679 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
680 if (IS_ERR(reset)) {
681 ret = PTR_ERR(reset);
682 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
78bdcad0 683 goto err_gpio;
47ff3de9
KVA
684 }
685
686 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
687 reg &= ~LTSSM_EN;
688 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
689
ab5fe4f4
KVA
690 dra7xx->link_gen = of_pci_get_max_link_speed(np);
691 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
692 dra7xx->link_gen = 2;
693
608793e2
KVA
694 switch (mode) {
695 case DW_PCIE_RC_TYPE:
696 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
697 DEVICE_TYPE_RC);
698 ret = dra7xx_add_pcie_port(dra7xx, pdev);
699 if (ret < 0)
700 goto err_gpio;
701 break;
702 case DW_PCIE_EP_TYPE:
703 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
704 DEVICE_TYPE_EP);
f7a2757f
KVA
705
706 ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
707 if (ret)
708 goto err_gpio;
709
608793e2
KVA
710 ret = dra7xx_add_pcie_ep(dra7xx, pdev);
711 if (ret < 0)
712 goto err_gpio;
713 break;
714 default:
715 dev_err(dev, "INVALID device type %d\n", mode);
716 }
717 dra7xx->mode = mode;
47ff3de9 718
d4c7d1a0
K
719 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
720 IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
721 if (ret) {
722 dev_err(dev, "failed to request irq\n");
723 goto err_gpio;
724 }
725
47ff3de9
KVA
726 return 0;
727
78bdcad0 728err_gpio:
47ff3de9 729 pm_runtime_put(dev);
0e2bdb0e
KVA
730
731err_get_sync:
47ff3de9 732 pm_runtime_disable(dev);
1f6c4501 733 dra7xx_pcie_disable_phy(dra7xx);
47ff3de9
KVA
734
735 return ret;
736}
737
e52eb445 738#ifdef CONFIG_PM_SLEEP
389c7094
KVA
739static int dra7xx_pcie_suspend(struct device *dev)
740{
741 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
442ec4c0 742 struct dw_pcie *pci = dra7xx->pci;
389c7094
KVA
743 u32 val;
744
608793e2
KVA
745 if (dra7xx->mode != DW_PCIE_RC_TYPE)
746 return 0;
747
389c7094 748 /* clear MSE */
442ec4c0 749 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
389c7094 750 val &= ~PCI_COMMAND_MEMORY;
442ec4c0 751 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
389c7094
KVA
752
753 return 0;
754}
755
756static int dra7xx_pcie_resume(struct device *dev)
757{
758 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
442ec4c0 759 struct dw_pcie *pci = dra7xx->pci;
389c7094
KVA
760 u32 val;
761
608793e2
KVA
762 if (dra7xx->mode != DW_PCIE_RC_TYPE)
763 return 0;
764
389c7094 765 /* set MSE */
442ec4c0 766 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
389c7094 767 val |= PCI_COMMAND_MEMORY;
442ec4c0 768 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
389c7094
KVA
769
770 return 0;
771}
772
e52eb445
KVA
773static int dra7xx_pcie_suspend_noirq(struct device *dev)
774{
775 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
e52eb445 776
1f6c4501 777 dra7xx_pcie_disable_phy(dra7xx);
e52eb445
KVA
778
779 return 0;
780}
781
782static int dra7xx_pcie_resume_noirq(struct device *dev)
783{
784 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
e52eb445 785 int ret;
e52eb445 786
1f6c4501
KVA
787 ret = dra7xx_pcie_enable_phy(dra7xx);
788 if (ret) {
789 dev_err(dev, "failed to enable phy\n");
790 return ret;
e52eb445
KVA
791 }
792
793 return 0;
e52eb445
KVA
794}
795#endif
796
797static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
389c7094 798 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
e52eb445
KVA
799 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
800 dra7xx_pcie_resume_noirq)
801};
802
47ff3de9 803static struct platform_driver dra7xx_pcie_driver = {
47ff3de9
KVA
804 .driver = {
805 .name = "dra7-pcie",
47ff3de9 806 .of_match_table = of_dra7xx_pcie_match,
d29438d6 807 .suppress_bind_attrs = true,
e52eb445 808 .pm = &dra7xx_pcie_pm_ops,
47ff3de9
KVA
809 },
810};
d29438d6 811builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);