Commit | Line | Data |
---|---|---|
8cfab3cf | 1 | // SPDX-License-Identifier: GPL-2.0 |
47ff3de9 KVA |
2 | /* |
3 | * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs | |
4 | * | |
5 | * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com | |
6 | * | |
7 | * Authors: Kishon Vijay Abraham I <kishon@ti.com> | |
47ff3de9 KVA |
8 | */ |
9 | ||
608793e2 | 10 | #include <linux/delay.h> |
7a4db656 | 11 | #include <linux/device.h> |
47ff3de9 KVA |
12 | #include <linux/err.h> |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/irq.h> | |
15 | #include <linux/irqdomain.h> | |
16 | #include <linux/kernel.h> | |
d29438d6 | 17 | #include <linux/init.h> |
608793e2 | 18 | #include <linux/of_device.h> |
78bdcad0 | 19 | #include <linux/of_gpio.h> |
ab5fe4f4 | 20 | #include <linux/of_pci.h> |
47ff3de9 KVA |
21 | #include <linux/pci.h> |
22 | #include <linux/phy/phy.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/pm_runtime.h> | |
25 | #include <linux/resource.h> | |
26 | #include <linux/types.h> | |
f7a2757f KVA |
27 | #include <linux/mfd/syscon.h> |
28 | #include <linux/regmap.h> | |
47ff3de9 | 29 | |
9e2aee80 | 30 | #include "../pci.h" |
47ff3de9 KVA |
31 | #include "pcie-designware.h" |
32 | ||
33 | /* PCIe controller wrapper DRA7XX configuration registers */ | |
34 | ||
35 | #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 | |
36 | #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 | |
37 | #define ERR_SYS BIT(0) | |
38 | #define ERR_FATAL BIT(1) | |
39 | #define ERR_NONFATAL BIT(2) | |
40 | #define ERR_COR BIT(3) | |
41 | #define ERR_AXI BIT(4) | |
42 | #define ERR_ECRC BIT(5) | |
43 | #define PME_TURN_OFF BIT(8) | |
44 | #define PME_TO_ACK BIT(9) | |
45 | #define PM_PME BIT(10) | |
46 | #define LINK_REQ_RST BIT(11) | |
47 | #define LINK_UP_EVT BIT(12) | |
48 | #define CFG_BME_EVT BIT(13) | |
49 | #define CFG_MSE_EVT BIT(14) | |
50 | #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ | |
51 | ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ | |
52 | LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) | |
53 | ||
54 | #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 | |
55 | #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 | |
56 | #define INTA BIT(0) | |
57 | #define INTB BIT(1) | |
58 | #define INTC BIT(2) | |
59 | #define INTD BIT(3) | |
60 | #define MSI BIT(4) | |
61 | #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) | |
62 | ||
608793e2 KVA |
63 | #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 |
64 | #define DEVICE_TYPE_EP 0x0 | |
65 | #define DEVICE_TYPE_LEG_EP 0x1 | |
66 | #define DEVICE_TYPE_RC 0x4 | |
67 | ||
47ff3de9 KVA |
68 | #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 |
69 | #define LTSSM_EN 0x1 | |
70 | ||
71 | #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C | |
72 | #define LINK_UP BIT(16) | |
883cc17c | 73 | #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF |
47ff3de9 | 74 | |
ab5fe4f4 KVA |
75 | #define EXP_CAP_ID_OFFSET 0x70 |
76 | ||
608793e2 KVA |
77 | #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 |
78 | #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 | |
79 | ||
80 | #define PCIECTRL_TI_CONF_MSI_XMT 0x012c | |
81 | #define MSI_REQ_GRANT BIT(0) | |
82 | #define MSI_VECTOR_SHIFT 7 | |
83 | ||
47ff3de9 | 84 | struct dra7xx_pcie { |
442ec4c0 | 85 | struct dw_pcie *pci; |
8e5ec414 BH |
86 | void __iomem *base; /* DT ti_conf */ |
87 | int phy_count; /* DT phy-names count */ | |
88 | struct phy **phy; | |
ab5fe4f4 | 89 | int link_gen; |
ebe85a44 | 90 | struct irq_domain *irq_domain; |
608793e2 KVA |
91 | enum dw_pcie_device_mode mode; |
92 | }; | |
93 | ||
94 | struct dra7xx_pcie_of_data { | |
95 | enum dw_pcie_device_mode mode; | |
47ff3de9 KVA |
96 | }; |
97 | ||
442ec4c0 | 98 | #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) |
47ff3de9 KVA |
99 | |
100 | static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) | |
101 | { | |
102 | return readl(pcie->base + offset); | |
103 | } | |
104 | ||
105 | static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, | |
106 | u32 value) | |
107 | { | |
108 | writel(value, pcie->base + offset); | |
109 | } | |
110 | ||
b6900aeb | 111 | static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) |
2ed6cc71 KVA |
112 | { |
113 | return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; | |
114 | } | |
115 | ||
442ec4c0 | 116 | static int dra7xx_pcie_link_up(struct dw_pcie *pci) |
47ff3de9 | 117 | { |
442ec4c0 | 118 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); |
47ff3de9 KVA |
119 | u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); |
120 | ||
121 | return !!(reg & LINK_UP); | |
122 | } | |
123 | ||
608793e2 | 124 | static void dra7xx_pcie_stop_link(struct dw_pcie *pci) |
47ff3de9 | 125 | { |
608793e2 KVA |
126 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); |
127 | u32 reg; | |
128 | ||
129 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); | |
130 | reg &= ~LTSSM_EN; | |
131 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | |
132 | } | |
133 | ||
134 | static int dra7xx_pcie_establish_link(struct dw_pcie *pci) | |
135 | { | |
136 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | |
442ec4c0 | 137 | struct device *dev = pci->dev; |
6cbb247e | 138 | u32 reg; |
ab5fe4f4 | 139 | u32 exp_cap_off = EXP_CAP_ID_OFFSET; |
47ff3de9 | 140 | |
442ec4c0 | 141 | if (dw_pcie_link_up(pci)) { |
c7f8146b | 142 | dev_err(dev, "link is already up\n"); |
47ff3de9 KVA |
143 | return 0; |
144 | } | |
145 | ||
ab5fe4f4 | 146 | if (dra7xx->link_gen == 1) { |
442ec4c0 | 147 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, |
19ce01cc | 148 | 4, ®); |
ab5fe4f4 KVA |
149 | if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { |
150 | reg &= ~((u32)PCI_EXP_LNKCAP_SLS); | |
151 | reg |= PCI_EXP_LNKCAP_SLS_2_5GB; | |
442ec4c0 | 152 | dw_pcie_write(pci->dbi_base + exp_cap_off + |
19ce01cc | 153 | PCI_EXP_LNKCAP, 4, reg); |
ab5fe4f4 KVA |
154 | } |
155 | ||
442ec4c0 | 156 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, |
19ce01cc | 157 | 2, ®); |
ab5fe4f4 KVA |
158 | if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { |
159 | reg &= ~((u32)PCI_EXP_LNKCAP_SLS); | |
160 | reg |= PCI_EXP_LNKCAP_SLS_2_5GB; | |
442ec4c0 | 161 | dw_pcie_write(pci->dbi_base + exp_cap_off + |
19ce01cc | 162 | PCI_EXP_LNKCTL2, 2, reg); |
ab5fe4f4 KVA |
163 | } |
164 | } | |
165 | ||
47ff3de9 KVA |
166 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); |
167 | reg |= LTSSM_EN; | |
168 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | |
169 | ||
608793e2 | 170 | return 0; |
47ff3de9 KVA |
171 | } |
172 | ||
5ffd90a0 | 173 | static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) |
47ff3de9 | 174 | { |
47ff3de9 | 175 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, |
40aa52c4 | 176 | LEG_EP_INTERRUPTS | MSI); |
5ffd90a0 KVA |
177 | |
178 | dra7xx_pcie_writel(dra7xx, | |
179 | PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, | |
ebe85a44 | 180 | MSI | LEG_EP_INTERRUPTS); |
47ff3de9 KVA |
181 | } |
182 | ||
5ffd90a0 KVA |
183 | static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) |
184 | { | |
185 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, | |
40aa52c4 | 186 | INTERRUPTS); |
5ffd90a0 KVA |
187 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, |
188 | INTERRUPTS); | |
189 | } | |
190 | ||
191 | static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) | |
192 | { | |
193 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); | |
194 | dra7xx_pcie_enable_msi_interrupts(dra7xx); | |
195 | } | |
196 | ||
4a301766 | 197 | static int dra7xx_pcie_host_init(struct pcie_port *pp) |
47ff3de9 | 198 | { |
442ec4c0 KVA |
199 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
200 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | |
21baa1c4 | 201 | |
7e57fd14 JZ |
202 | dw_pcie_setup_rc(pp); |
203 | ||
608793e2 KVA |
204 | dra7xx_pcie_establish_link(pci); |
205 | dw_pcie_wait_for_link(pci); | |
ebe85a44 | 206 | dw_pcie_msi_init(pp); |
21baa1c4 | 207 | dra7xx_pcie_enable_interrupts(dra7xx); |
4a301766 BA |
208 | |
209 | return 0; | |
47ff3de9 KVA |
210 | } |
211 | ||
4ab2e7c0 | 212 | static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { |
47ff3de9 KVA |
213 | .host_init = dra7xx_pcie_host_init, |
214 | }; | |
215 | ||
216 | static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, | |
217 | irq_hw_number_t hwirq) | |
218 | { | |
219 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | |
220 | irq_set_chip_data(irq, domain->host_data); | |
47ff3de9 KVA |
221 | |
222 | return 0; | |
223 | } | |
224 | ||
225 | static const struct irq_domain_ops intx_domain_ops = { | |
226 | .map = dra7xx_pcie_intx_map, | |
524d59f6 | 227 | .xlate = pci_irqd_intx_xlate, |
47ff3de9 KVA |
228 | }; |
229 | ||
230 | static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) | |
231 | { | |
442ec4c0 KVA |
232 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
233 | struct device *dev = pci->dev; | |
234 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | |
47ff3de9 KVA |
235 | struct device_node *node = dev->of_node; |
236 | struct device_node *pcie_intc_node = of_get_next_child(node, NULL); | |
237 | ||
238 | if (!pcie_intc_node) { | |
239 | dev_err(dev, "No PCIe Intc node found\n"); | |
991bfef8 | 240 | return -ENODEV; |
47ff3de9 KVA |
241 | } |
242 | ||
61534d1a | 243 | dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, |
ebe85a44 KVA |
244 | &intx_domain_ops, pp); |
245 | if (!dra7xx->irq_domain) { | |
47ff3de9 | 246 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); |
991bfef8 | 247 | return -ENODEV; |
47ff3de9 KVA |
248 | } |
249 | ||
250 | return 0; | |
251 | } | |
252 | ||
253 | static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) | |
254 | { | |
21baa1c4 | 255 | struct dra7xx_pcie *dra7xx = arg; |
442ec4c0 KVA |
256 | struct dw_pcie *pci = dra7xx->pci; |
257 | struct pcie_port *pp = &pci->pp; | |
09b2d203 V |
258 | unsigned long reg; |
259 | u32 virq, bit; | |
47ff3de9 KVA |
260 | |
261 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); | |
262 | ||
263 | switch (reg) { | |
264 | case MSI: | |
265 | dw_handle_msi_irq(pp); | |
266 | break; | |
267 | case INTA: | |
268 | case INTB: | |
269 | case INTC: | |
270 | case INTD: | |
09b2d203 V |
271 | for_each_set_bit(bit, ®, PCI_NUM_INTX) { |
272 | virq = irq_find_mapping(dra7xx->irq_domain, bit); | |
273 | if (virq) | |
274 | generic_handle_irq(virq); | |
275 | } | |
47ff3de9 KVA |
276 | break; |
277 | } | |
278 | ||
279 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); | |
280 | ||
281 | return IRQ_HANDLED; | |
282 | } | |
283 | ||
47ff3de9 KVA |
284 | static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) |
285 | { | |
286 | struct dra7xx_pcie *dra7xx = arg; | |
442ec4c0 KVA |
287 | struct dw_pcie *pci = dra7xx->pci; |
288 | struct device *dev = pci->dev; | |
608793e2 | 289 | struct dw_pcie_ep *ep = &pci->ep; |
47ff3de9 KVA |
290 | u32 reg; |
291 | ||
292 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); | |
293 | ||
294 | if (reg & ERR_SYS) | |
c7f8146b | 295 | dev_dbg(dev, "System Error\n"); |
47ff3de9 KVA |
296 | |
297 | if (reg & ERR_FATAL) | |
c7f8146b | 298 | dev_dbg(dev, "Fatal Error\n"); |
47ff3de9 KVA |
299 | |
300 | if (reg & ERR_NONFATAL) | |
c7f8146b | 301 | dev_dbg(dev, "Non Fatal Error\n"); |
47ff3de9 KVA |
302 | |
303 | if (reg & ERR_COR) | |
c7f8146b | 304 | dev_dbg(dev, "Correctable Error\n"); |
47ff3de9 KVA |
305 | |
306 | if (reg & ERR_AXI) | |
c7f8146b | 307 | dev_dbg(dev, "AXI tag lookup fatal Error\n"); |
47ff3de9 KVA |
308 | |
309 | if (reg & ERR_ECRC) | |
c7f8146b | 310 | dev_dbg(dev, "ECRC Error\n"); |
47ff3de9 KVA |
311 | |
312 | if (reg & PME_TURN_OFF) | |
c7f8146b | 313 | dev_dbg(dev, |
47ff3de9 KVA |
314 | "Power Management Event Turn-Off message received\n"); |
315 | ||
316 | if (reg & PME_TO_ACK) | |
c7f8146b | 317 | dev_dbg(dev, |
47ff3de9 KVA |
318 | "Power Management Turn-Off Ack message received\n"); |
319 | ||
320 | if (reg & PM_PME) | |
c7f8146b | 321 | dev_dbg(dev, "PM Power Management Event message received\n"); |
47ff3de9 KVA |
322 | |
323 | if (reg & LINK_REQ_RST) | |
c7f8146b | 324 | dev_dbg(dev, "Link Request Reset\n"); |
47ff3de9 | 325 | |
608793e2 KVA |
326 | if (reg & LINK_UP_EVT) { |
327 | if (dra7xx->mode == DW_PCIE_EP_TYPE) | |
328 | dw_pcie_ep_linkup(ep); | |
c7f8146b | 329 | dev_dbg(dev, "Link-up state change\n"); |
608793e2 | 330 | } |
47ff3de9 KVA |
331 | |
332 | if (reg & CFG_BME_EVT) | |
c7f8146b | 333 | dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); |
47ff3de9 KVA |
334 | |
335 | if (reg & CFG_MSE_EVT) | |
c7f8146b | 336 | dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); |
47ff3de9 KVA |
337 | |
338 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); | |
339 | ||
340 | return IRQ_HANDLED; | |
341 | } | |
342 | ||
608793e2 KVA |
343 | static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) |
344 | { | |
345 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
346 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | |
85aa1399 KVA |
347 | enum pci_barno bar; |
348 | ||
349 | for (bar = BAR_0; bar <= BAR_5; bar++) | |
350 | dw_pcie_ep_reset_bar(pci, bar); | |
608793e2 KVA |
351 | |
352 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); | |
353 | } | |
354 | ||
355 | static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) | |
356 | { | |
357 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); | |
358 | mdelay(1); | |
359 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); | |
360 | } | |
361 | ||
362 | static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, | |
363 | u8 interrupt_num) | |
364 | { | |
365 | u32 reg; | |
366 | ||
367 | reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; | |
368 | reg |= MSI_REQ_GRANT; | |
369 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); | |
370 | } | |
371 | ||
16093362 | 372 | static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, |
608793e2 KVA |
373 | enum pci_epc_irq_type type, u8 interrupt_num) |
374 | { | |
375 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
376 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | |
377 | ||
378 | switch (type) { | |
379 | case PCI_EPC_IRQ_LEGACY: | |
380 | dra7xx_pcie_raise_legacy_irq(dra7xx); | |
381 | break; | |
382 | case PCI_EPC_IRQ_MSI: | |
383 | dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); | |
384 | break; | |
385 | default: | |
386 | dev_err(pci->dev, "UNKNOWN IRQ type\n"); | |
387 | } | |
388 | ||
389 | return 0; | |
390 | } | |
391 | ||
392 | static struct dw_pcie_ep_ops pcie_ep_ops = { | |
393 | .ep_init = dra7xx_pcie_ep_init, | |
394 | .raise_irq = dra7xx_pcie_raise_irq, | |
395 | }; | |
396 | ||
397 | static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, | |
398 | struct platform_device *pdev) | |
399 | { | |
400 | int ret; | |
401 | struct dw_pcie_ep *ep; | |
402 | struct resource *res; | |
403 | struct device *dev = &pdev->dev; | |
404 | struct dw_pcie *pci = dra7xx->pci; | |
405 | ||
406 | ep = &pci->ep; | |
407 | ep->ops = &pcie_ep_ops; | |
408 | ||
409 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); | |
410 | pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res)); | |
411 | if (!pci->dbi_base) | |
412 | return -ENOMEM; | |
413 | ||
414 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); | |
415 | pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res)); | |
416 | if (!pci->dbi_base2) | |
417 | return -ENOMEM; | |
418 | ||
419 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); | |
420 | if (!res) | |
421 | return -EINVAL; | |
422 | ||
423 | ep->phys_base = res->start; | |
424 | ep->addr_size = resource_size(res); | |
425 | ||
426 | ret = dw_pcie_ep_init(ep); | |
427 | if (ret) { | |
428 | dev_err(dev, "failed to initialize endpoint\n"); | |
429 | return ret; | |
430 | } | |
431 | ||
432 | return 0; | |
433 | } | |
434 | ||
e73044a0 JH |
435 | static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, |
436 | struct platform_device *pdev) | |
47ff3de9 KVA |
437 | { |
438 | int ret; | |
442ec4c0 KVA |
439 | struct dw_pcie *pci = dra7xx->pci; |
440 | struct pcie_port *pp = &pci->pp; | |
441 | struct device *dev = pci->dev; | |
47ff3de9 | 442 | struct resource *res; |
47ff3de9 KVA |
443 | |
444 | pp->irq = platform_get_irq(pdev, 1); | |
445 | if (pp->irq < 0) { | |
446 | dev_err(dev, "missing IRQ resource\n"); | |
2f3ec752 | 447 | return pp->irq; |
47ff3de9 KVA |
448 | } |
449 | ||
c7f8146b | 450 | ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, |
8ff0ef99 | 451 | IRQF_SHARED | IRQF_NO_THREAD, |
21baa1c4 | 452 | "dra7-pcie-msi", dra7xx); |
47ff3de9 | 453 | if (ret) { |
c7f8146b | 454 | dev_err(dev, "failed to request irq\n"); |
47ff3de9 KVA |
455 | return ret; |
456 | } | |
457 | ||
ebe85a44 KVA |
458 | ret = dra7xx_pcie_init_irq_domain(pp); |
459 | if (ret < 0) | |
460 | return ret; | |
47ff3de9 KVA |
461 | |
462 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); | |
442ec4c0 KVA |
463 | pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res)); |
464 | if (!pci->dbi_base) | |
47ff3de9 KVA |
465 | return -ENOMEM; |
466 | ||
71890ea0 NC |
467 | pp->ops = &dra7xx_pcie_host_ops; |
468 | ||
47ff3de9 KVA |
469 | ret = dw_pcie_host_init(pp); |
470 | if (ret) { | |
c7f8146b | 471 | dev_err(dev, "failed to initialize host\n"); |
47ff3de9 KVA |
472 | return ret; |
473 | } | |
474 | ||
475 | return 0; | |
476 | } | |
477 | ||
442ec4c0 | 478 | static const struct dw_pcie_ops dw_pcie_ops = { |
2ed6cc71 | 479 | .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, |
608793e2 KVA |
480 | .start_link = dra7xx_pcie_establish_link, |
481 | .stop_link = dra7xx_pcie_stop_link, | |
442ec4c0 KVA |
482 | .link_up = dra7xx_pcie_link_up, |
483 | }; | |
484 | ||
1f6c4501 KVA |
485 | static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) |
486 | { | |
487 | int phy_count = dra7xx->phy_count; | |
488 | ||
489 | while (phy_count--) { | |
490 | phy_power_off(dra7xx->phy[phy_count]); | |
491 | phy_exit(dra7xx->phy[phy_count]); | |
492 | } | |
493 | } | |
494 | ||
495 | static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) | |
496 | { | |
497 | int phy_count = dra7xx->phy_count; | |
498 | int ret; | |
499 | int i; | |
500 | ||
501 | for (i = 0; i < phy_count; i++) { | |
502 | ret = phy_init(dra7xx->phy[i]); | |
503 | if (ret < 0) | |
504 | goto err_phy; | |
505 | ||
506 | ret = phy_power_on(dra7xx->phy[i]); | |
507 | if (ret < 0) { | |
508 | phy_exit(dra7xx->phy[i]); | |
509 | goto err_phy; | |
510 | } | |
511 | } | |
512 | ||
513 | return 0; | |
514 | ||
515 | err_phy: | |
516 | while (--i >= 0) { | |
517 | phy_power_off(dra7xx->phy[i]); | |
518 | phy_exit(dra7xx->phy[i]); | |
519 | } | |
520 | ||
521 | return ret; | |
522 | } | |
523 | ||
608793e2 KVA |
524 | static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { |
525 | .mode = DW_PCIE_RC_TYPE, | |
526 | }; | |
527 | ||
528 | static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { | |
529 | .mode = DW_PCIE_EP_TYPE, | |
530 | }; | |
531 | ||
532 | static const struct of_device_id of_dra7xx_pcie_match[] = { | |
533 | { | |
534 | .compatible = "ti,dra7-pcie", | |
535 | .data = &dra7xx_pcie_rc_of_data, | |
536 | }, | |
537 | { | |
538 | .compatible = "ti,dra7-pcie-ep", | |
539 | .data = &dra7xx_pcie_ep_of_data, | |
540 | }, | |
541 | {}, | |
542 | }; | |
543 | ||
f7a2757f KVA |
544 | /* |
545 | * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 | |
546 | * @dra7xx: the dra7xx device where the workaround should be applied | |
547 | * | |
548 | * Access to the PCIe slave port that are not 32-bit aligned will result | |
549 | * in incorrect mapping to TLP Address and Byte enable fields. Therefore, | |
550 | * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or | |
551 | * 0x3. | |
552 | * | |
553 | * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. | |
554 | */ | |
555 | static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev) | |
556 | { | |
557 | int ret; | |
558 | struct device_node *np = dev->of_node; | |
559 | struct of_phandle_args args; | |
560 | struct regmap *regmap; | |
561 | ||
562 | regmap = syscon_regmap_lookup_by_phandle(np, | |
563 | "ti,syscon-unaligned-access"); | |
564 | if (IS_ERR(regmap)) { | |
565 | dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); | |
566 | return -EINVAL; | |
567 | } | |
568 | ||
569 | ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", | |
570 | 2, 0, &args); | |
571 | if (ret) { | |
572 | dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); | |
573 | return ret; | |
574 | } | |
575 | ||
576 | ret = regmap_update_bits(regmap, args.args[0], args.args[1], | |
577 | args.args[1]); | |
578 | if (ret) | |
579 | dev_err(dev, "failed to enable unaligned access\n"); | |
580 | ||
581 | of_node_put(args.np); | |
582 | ||
583 | return ret; | |
584 | } | |
585 | ||
47ff3de9 KVA |
586 | static int __init dra7xx_pcie_probe(struct platform_device *pdev) |
587 | { | |
588 | u32 reg; | |
589 | int ret; | |
590 | int irq; | |
591 | int i; | |
592 | int phy_count; | |
593 | struct phy **phy; | |
7a4db656 | 594 | struct device_link **link; |
47ff3de9 KVA |
595 | void __iomem *base; |
596 | struct resource *res; | |
442ec4c0 | 597 | struct dw_pcie *pci; |
442ec4c0 | 598 | struct dra7xx_pcie *dra7xx; |
47ff3de9 KVA |
599 | struct device *dev = &pdev->dev; |
600 | struct device_node *np = dev->of_node; | |
601 | char name[10]; | |
602d38bc | 602 | struct gpio_desc *reset; |
608793e2 KVA |
603 | const struct of_device_id *match; |
604 | const struct dra7xx_pcie_of_data *data; | |
605 | enum dw_pcie_device_mode mode; | |
606 | ||
607 | match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); | |
608 | if (!match) | |
609 | return -EINVAL; | |
610 | ||
611 | data = (struct dra7xx_pcie_of_data *)match->data; | |
612 | mode = (enum dw_pcie_device_mode)data->mode; | |
47ff3de9 KVA |
613 | |
614 | dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); | |
615 | if (!dra7xx) | |
616 | return -ENOMEM; | |
617 | ||
442ec4c0 KVA |
618 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); |
619 | if (!pci) | |
620 | return -ENOMEM; | |
621 | ||
622 | pci->dev = dev; | |
623 | pci->ops = &dw_pcie_ops; | |
624 | ||
47ff3de9 KVA |
625 | irq = platform_get_irq(pdev, 0); |
626 | if (irq < 0) { | |
a0d21ba1 GS |
627 | dev_err(dev, "missing IRQ resource: %d\n", irq); |
628 | return irq; | |
47ff3de9 KVA |
629 | } |
630 | ||
47ff3de9 KVA |
631 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); |
632 | base = devm_ioremap_nocache(dev, res->start, resource_size(res)); | |
633 | if (!base) | |
634 | return -ENOMEM; | |
635 | ||
636 | phy_count = of_property_count_strings(np, "phy-names"); | |
637 | if (phy_count < 0) { | |
638 | dev_err(dev, "unable to find the strings\n"); | |
639 | return phy_count; | |
640 | } | |
641 | ||
642 | phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL); | |
643 | if (!phy) | |
644 | return -ENOMEM; | |
645 | ||
7a4db656 KVA |
646 | link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL); |
647 | if (!link) | |
648 | return -ENOMEM; | |
649 | ||
47ff3de9 KVA |
650 | for (i = 0; i < phy_count; i++) { |
651 | snprintf(name, sizeof(name), "pcie-phy%d", i); | |
652 | phy[i] = devm_phy_get(dev, name); | |
653 | if (IS_ERR(phy[i])) | |
654 | return PTR_ERR(phy[i]); | |
7a4db656 KVA |
655 | |
656 | link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); | |
657 | if (!link[i]) { | |
658 | ret = -EINVAL; | |
659 | goto err_link; | |
660 | } | |
47ff3de9 KVA |
661 | } |
662 | ||
663 | dra7xx->base = base; | |
664 | dra7xx->phy = phy; | |
442ec4c0 | 665 | dra7xx->pci = pci; |
47ff3de9 KVA |
666 | dra7xx->phy_count = phy_count; |
667 | ||
1f6c4501 KVA |
668 | ret = dra7xx_pcie_enable_phy(dra7xx); |
669 | if (ret) { | |
670 | dev_err(dev, "failed to enable phy\n"); | |
671 | return ret; | |
672 | } | |
673 | ||
9bcf0a6f KVA |
674 | platform_set_drvdata(pdev, dra7xx); |
675 | ||
47ff3de9 KVA |
676 | pm_runtime_enable(dev); |
677 | ret = pm_runtime_get_sync(dev); | |
d3f4caa3 | 678 | if (ret < 0) { |
47ff3de9 | 679 | dev_err(dev, "pm_runtime_get_sync failed\n"); |
0e2bdb0e | 680 | goto err_get_sync; |
47ff3de9 KVA |
681 | } |
682 | ||
602d38bc KVA |
683 | reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); |
684 | if (IS_ERR(reset)) { | |
685 | ret = PTR_ERR(reset); | |
686 | dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); | |
78bdcad0 | 687 | goto err_gpio; |
47ff3de9 KVA |
688 | } |
689 | ||
690 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); | |
691 | reg &= ~LTSSM_EN; | |
692 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | |
693 | ||
ab5fe4f4 KVA |
694 | dra7xx->link_gen = of_pci_get_max_link_speed(np); |
695 | if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) | |
696 | dra7xx->link_gen = 2; | |
697 | ||
608793e2 KVA |
698 | switch (mode) { |
699 | case DW_PCIE_RC_TYPE: | |
f1aba0a0 NC |
700 | if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { |
701 | ret = -ENODEV; | |
702 | goto err_gpio; | |
703 | } | |
704 | ||
608793e2 KVA |
705 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, |
706 | DEVICE_TYPE_RC); | |
707 | ret = dra7xx_add_pcie_port(dra7xx, pdev); | |
708 | if (ret < 0) | |
709 | goto err_gpio; | |
710 | break; | |
711 | case DW_PCIE_EP_TYPE: | |
f1aba0a0 NC |
712 | if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { |
713 | ret = -ENODEV; | |
714 | goto err_gpio; | |
715 | } | |
716 | ||
608793e2 KVA |
717 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, |
718 | DEVICE_TYPE_EP); | |
f7a2757f KVA |
719 | |
720 | ret = dra7xx_pcie_ep_unaligned_memaccess(dev); | |
721 | if (ret) | |
722 | goto err_gpio; | |
723 | ||
608793e2 KVA |
724 | ret = dra7xx_add_pcie_ep(dra7xx, pdev); |
725 | if (ret < 0) | |
726 | goto err_gpio; | |
727 | break; | |
728 | default: | |
729 | dev_err(dev, "INVALID device type %d\n", mode); | |
730 | } | |
731 | dra7xx->mode = mode; | |
47ff3de9 | 732 | |
d4c7d1a0 K |
733 | ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, |
734 | IRQF_SHARED, "dra7xx-pcie-main", dra7xx); | |
735 | if (ret) { | |
736 | dev_err(dev, "failed to request irq\n"); | |
737 | goto err_gpio; | |
738 | } | |
739 | ||
47ff3de9 KVA |
740 | return 0; |
741 | ||
78bdcad0 | 742 | err_gpio: |
47ff3de9 | 743 | pm_runtime_put(dev); |
0e2bdb0e KVA |
744 | |
745 | err_get_sync: | |
47ff3de9 | 746 | pm_runtime_disable(dev); |
1f6c4501 | 747 | dra7xx_pcie_disable_phy(dra7xx); |
47ff3de9 | 748 | |
7a4db656 KVA |
749 | err_link: |
750 | while (--i >= 0) | |
751 | device_link_del(link[i]); | |
752 | ||
47ff3de9 KVA |
753 | return ret; |
754 | } | |
755 | ||
e52eb445 | 756 | #ifdef CONFIG_PM_SLEEP |
389c7094 KVA |
757 | static int dra7xx_pcie_suspend(struct device *dev) |
758 | { | |
759 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | |
442ec4c0 | 760 | struct dw_pcie *pci = dra7xx->pci; |
389c7094 KVA |
761 | u32 val; |
762 | ||
608793e2 KVA |
763 | if (dra7xx->mode != DW_PCIE_RC_TYPE) |
764 | return 0; | |
765 | ||
389c7094 | 766 | /* clear MSE */ |
442ec4c0 | 767 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); |
389c7094 | 768 | val &= ~PCI_COMMAND_MEMORY; |
442ec4c0 | 769 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); |
389c7094 KVA |
770 | |
771 | return 0; | |
772 | } | |
773 | ||
774 | static int dra7xx_pcie_resume(struct device *dev) | |
775 | { | |
776 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | |
442ec4c0 | 777 | struct dw_pcie *pci = dra7xx->pci; |
389c7094 KVA |
778 | u32 val; |
779 | ||
608793e2 KVA |
780 | if (dra7xx->mode != DW_PCIE_RC_TYPE) |
781 | return 0; | |
782 | ||
389c7094 | 783 | /* set MSE */ |
442ec4c0 | 784 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); |
389c7094 | 785 | val |= PCI_COMMAND_MEMORY; |
442ec4c0 | 786 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); |
389c7094 KVA |
787 | |
788 | return 0; | |
789 | } | |
790 | ||
e52eb445 KVA |
791 | static int dra7xx_pcie_suspend_noirq(struct device *dev) |
792 | { | |
793 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | |
e52eb445 | 794 | |
1f6c4501 | 795 | dra7xx_pcie_disable_phy(dra7xx); |
e52eb445 KVA |
796 | |
797 | return 0; | |
798 | } | |
799 | ||
800 | static int dra7xx_pcie_resume_noirq(struct device *dev) | |
801 | { | |
802 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | |
e52eb445 | 803 | int ret; |
e52eb445 | 804 | |
1f6c4501 KVA |
805 | ret = dra7xx_pcie_enable_phy(dra7xx); |
806 | if (ret) { | |
807 | dev_err(dev, "failed to enable phy\n"); | |
808 | return ret; | |
e52eb445 KVA |
809 | } |
810 | ||
811 | return 0; | |
e52eb445 KVA |
812 | } |
813 | #endif | |
814 | ||
4751fac7 | 815 | static void dra7xx_pcie_shutdown(struct platform_device *pdev) |
9c049bea K |
816 | { |
817 | struct device *dev = &pdev->dev; | |
818 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | |
819 | int ret; | |
820 | ||
821 | dra7xx_pcie_stop_link(dra7xx->pci); | |
822 | ||
823 | ret = pm_runtime_put_sync(dev); | |
824 | if (ret < 0) | |
825 | dev_dbg(dev, "pm_runtime_put_sync failed\n"); | |
826 | ||
827 | pm_runtime_disable(dev); | |
828 | dra7xx_pcie_disable_phy(dra7xx); | |
829 | } | |
830 | ||
e52eb445 | 831 | static const struct dev_pm_ops dra7xx_pcie_pm_ops = { |
389c7094 | 832 | SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) |
e52eb445 KVA |
833 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, |
834 | dra7xx_pcie_resume_noirq) | |
835 | }; | |
836 | ||
47ff3de9 | 837 | static struct platform_driver dra7xx_pcie_driver = { |
47ff3de9 KVA |
838 | .driver = { |
839 | .name = "dra7-pcie", | |
47ff3de9 | 840 | .of_match_table = of_dra7xx_pcie_match, |
d29438d6 | 841 | .suppress_bind_attrs = true, |
e52eb445 | 842 | .pm = &dra7xx_pcie_pm_ops, |
47ff3de9 | 843 | }, |
9c049bea | 844 | .shutdown = dra7xx_pcie_shutdown, |
47ff3de9 | 845 | }; |
d29438d6 | 846 | builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); |