Commit | Line | Data |
---|---|---|
8cfab3cf | 1 | // SPDX-License-Identifier: GPL-2.0 |
47ff3de9 KVA |
2 | /* |
3 | * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs | |
4 | * | |
5 | * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com | |
6 | * | |
7 | * Authors: Kishon Vijay Abraham I <kishon@ti.com> | |
47ff3de9 KVA |
8 | */ |
9 | ||
608793e2 | 10 | #include <linux/delay.h> |
7a4db656 | 11 | #include <linux/device.h> |
47ff3de9 KVA |
12 | #include <linux/err.h> |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/irq.h> | |
15 | #include <linux/irqdomain.h> | |
16 | #include <linux/kernel.h> | |
d29438d6 | 17 | #include <linux/init.h> |
608793e2 | 18 | #include <linux/of_device.h> |
78bdcad0 | 19 | #include <linux/of_gpio.h> |
ab5fe4f4 | 20 | #include <linux/of_pci.h> |
47ff3de9 KVA |
21 | #include <linux/pci.h> |
22 | #include <linux/phy/phy.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/pm_runtime.h> | |
25 | #include <linux/resource.h> | |
26 | #include <linux/types.h> | |
f7a2757f KVA |
27 | #include <linux/mfd/syscon.h> |
28 | #include <linux/regmap.h> | |
47ff3de9 | 29 | |
6e0832fa | 30 | #include "../../pci.h" |
47ff3de9 KVA |
31 | #include "pcie-designware.h" |
32 | ||
33 | /* PCIe controller wrapper DRA7XX configuration registers */ | |
34 | ||
35 | #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 | |
36 | #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 | |
37 | #define ERR_SYS BIT(0) | |
38 | #define ERR_FATAL BIT(1) | |
39 | #define ERR_NONFATAL BIT(2) | |
40 | #define ERR_COR BIT(3) | |
41 | #define ERR_AXI BIT(4) | |
42 | #define ERR_ECRC BIT(5) | |
43 | #define PME_TURN_OFF BIT(8) | |
44 | #define PME_TO_ACK BIT(9) | |
45 | #define PM_PME BIT(10) | |
46 | #define LINK_REQ_RST BIT(11) | |
47 | #define LINK_UP_EVT BIT(12) | |
48 | #define CFG_BME_EVT BIT(13) | |
49 | #define CFG_MSE_EVT BIT(14) | |
50 | #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ | |
51 | ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ | |
52 | LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) | |
53 | ||
54 | #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 | |
55 | #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 | |
56 | #define INTA BIT(0) | |
57 | #define INTB BIT(1) | |
58 | #define INTC BIT(2) | |
59 | #define INTD BIT(3) | |
60 | #define MSI BIT(4) | |
61 | #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) | |
62 | ||
608793e2 KVA |
63 | #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 |
64 | #define DEVICE_TYPE_EP 0x0 | |
65 | #define DEVICE_TYPE_LEG_EP 0x1 | |
66 | #define DEVICE_TYPE_RC 0x4 | |
67 | ||
47ff3de9 KVA |
68 | #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 |
69 | #define LTSSM_EN 0x1 | |
70 | ||
71 | #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C | |
72 | #define LINK_UP BIT(16) | |
883cc17c | 73 | #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF |
47ff3de9 | 74 | |
ab5fe4f4 KVA |
75 | #define EXP_CAP_ID_OFFSET 0x70 |
76 | ||
608793e2 KVA |
77 | #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 |
78 | #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 | |
79 | ||
80 | #define PCIECTRL_TI_CONF_MSI_XMT 0x012c | |
81 | #define MSI_REQ_GRANT BIT(0) | |
82 | #define MSI_VECTOR_SHIFT 7 | |
83 | ||
c232c0df KVA |
84 | #define PCIE_1LANE_2LANE_SELECTION BIT(13) |
85 | #define PCIE_B1C0_MODE_SEL BIT(2) | |
86 | #define PCIE_B0_B1_TSYNCEN BIT(0) | |
87 | ||
47ff3de9 | 88 | struct dra7xx_pcie { |
442ec4c0 | 89 | struct dw_pcie *pci; |
8e5ec414 BH |
90 | void __iomem *base; /* DT ti_conf */ |
91 | int phy_count; /* DT phy-names count */ | |
92 | struct phy **phy; | |
ab5fe4f4 | 93 | int link_gen; |
ebe85a44 | 94 | struct irq_domain *irq_domain; |
608793e2 KVA |
95 | enum dw_pcie_device_mode mode; |
96 | }; | |
97 | ||
98 | struct dra7xx_pcie_of_data { | |
99 | enum dw_pcie_device_mode mode; | |
c232c0df | 100 | u32 b1co_mode_sel_mask; |
47ff3de9 KVA |
101 | }; |
102 | ||
442ec4c0 | 103 | #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) |
47ff3de9 KVA |
104 | |
105 | static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) | |
106 | { | |
107 | return readl(pcie->base + offset); | |
108 | } | |
109 | ||
110 | static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, | |
111 | u32 value) | |
112 | { | |
113 | writel(value, pcie->base + offset); | |
114 | } | |
115 | ||
b6900aeb | 116 | static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) |
2ed6cc71 KVA |
117 | { |
118 | return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; | |
119 | } | |
120 | ||
442ec4c0 | 121 | static int dra7xx_pcie_link_up(struct dw_pcie *pci) |
47ff3de9 | 122 | { |
442ec4c0 | 123 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); |
47ff3de9 KVA |
124 | u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); |
125 | ||
126 | return !!(reg & LINK_UP); | |
127 | } | |
128 | ||
608793e2 | 129 | static void dra7xx_pcie_stop_link(struct dw_pcie *pci) |
47ff3de9 | 130 | { |
608793e2 KVA |
131 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); |
132 | u32 reg; | |
133 | ||
134 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); | |
135 | reg &= ~LTSSM_EN; | |
136 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | |
137 | } | |
138 | ||
139 | static int dra7xx_pcie_establish_link(struct dw_pcie *pci) | |
140 | { | |
141 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | |
442ec4c0 | 142 | struct device *dev = pci->dev; |
6cbb247e | 143 | u32 reg; |
ab5fe4f4 | 144 | u32 exp_cap_off = EXP_CAP_ID_OFFSET; |
47ff3de9 | 145 | |
442ec4c0 | 146 | if (dw_pcie_link_up(pci)) { |
c7f8146b | 147 | dev_err(dev, "link is already up\n"); |
47ff3de9 KVA |
148 | return 0; |
149 | } | |
150 | ||
ab5fe4f4 | 151 | if (dra7xx->link_gen == 1) { |
442ec4c0 | 152 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, |
19ce01cc | 153 | 4, ®); |
ab5fe4f4 KVA |
154 | if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { |
155 | reg &= ~((u32)PCI_EXP_LNKCAP_SLS); | |
156 | reg |= PCI_EXP_LNKCAP_SLS_2_5GB; | |
442ec4c0 | 157 | dw_pcie_write(pci->dbi_base + exp_cap_off + |
19ce01cc | 158 | PCI_EXP_LNKCAP, 4, reg); |
ab5fe4f4 KVA |
159 | } |
160 | ||
442ec4c0 | 161 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, |
19ce01cc | 162 | 2, ®); |
ab5fe4f4 KVA |
163 | if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { |
164 | reg &= ~((u32)PCI_EXP_LNKCAP_SLS); | |
165 | reg |= PCI_EXP_LNKCAP_SLS_2_5GB; | |
442ec4c0 | 166 | dw_pcie_write(pci->dbi_base + exp_cap_off + |
19ce01cc | 167 | PCI_EXP_LNKCTL2, 2, reg); |
ab5fe4f4 KVA |
168 | } |
169 | } | |
170 | ||
47ff3de9 KVA |
171 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); |
172 | reg |= LTSSM_EN; | |
173 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | |
174 | ||
608793e2 | 175 | return 0; |
47ff3de9 KVA |
176 | } |
177 | ||
5ffd90a0 | 178 | static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) |
47ff3de9 | 179 | { |
47ff3de9 | 180 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, |
40aa52c4 | 181 | LEG_EP_INTERRUPTS | MSI); |
5ffd90a0 KVA |
182 | |
183 | dra7xx_pcie_writel(dra7xx, | |
184 | PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, | |
ebe85a44 | 185 | MSI | LEG_EP_INTERRUPTS); |
47ff3de9 KVA |
186 | } |
187 | ||
5ffd90a0 KVA |
188 | static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) |
189 | { | |
190 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, | |
40aa52c4 | 191 | INTERRUPTS); |
5ffd90a0 KVA |
192 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, |
193 | INTERRUPTS); | |
194 | } | |
195 | ||
196 | static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) | |
197 | { | |
198 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); | |
199 | dra7xx_pcie_enable_msi_interrupts(dra7xx); | |
200 | } | |
201 | ||
4a301766 | 202 | static int dra7xx_pcie_host_init(struct pcie_port *pp) |
47ff3de9 | 203 | { |
442ec4c0 KVA |
204 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
205 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | |
21baa1c4 | 206 | |
7e57fd14 JZ |
207 | dw_pcie_setup_rc(pp); |
208 | ||
608793e2 KVA |
209 | dra7xx_pcie_establish_link(pci); |
210 | dw_pcie_wait_for_link(pci); | |
ebe85a44 | 211 | dw_pcie_msi_init(pp); |
21baa1c4 | 212 | dra7xx_pcie_enable_interrupts(dra7xx); |
4a301766 BA |
213 | |
214 | return 0; | |
47ff3de9 KVA |
215 | } |
216 | ||
4ab2e7c0 | 217 | static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { |
47ff3de9 KVA |
218 | .host_init = dra7xx_pcie_host_init, |
219 | }; | |
220 | ||
221 | static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, | |
222 | irq_hw_number_t hwirq) | |
223 | { | |
224 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | |
225 | irq_set_chip_data(irq, domain->host_data); | |
47ff3de9 KVA |
226 | |
227 | return 0; | |
228 | } | |
229 | ||
230 | static const struct irq_domain_ops intx_domain_ops = { | |
231 | .map = dra7xx_pcie_intx_map, | |
524d59f6 | 232 | .xlate = pci_irqd_intx_xlate, |
47ff3de9 KVA |
233 | }; |
234 | ||
235 | static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) | |
236 | { | |
442ec4c0 KVA |
237 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
238 | struct device *dev = pci->dev; | |
239 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | |
47ff3de9 KVA |
240 | struct device_node *node = dev->of_node; |
241 | struct device_node *pcie_intc_node = of_get_next_child(node, NULL); | |
242 | ||
243 | if (!pcie_intc_node) { | |
244 | dev_err(dev, "No PCIe Intc node found\n"); | |
991bfef8 | 245 | return -ENODEV; |
47ff3de9 KVA |
246 | } |
247 | ||
61534d1a | 248 | dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, |
ebe85a44 KVA |
249 | &intx_domain_ops, pp); |
250 | if (!dra7xx->irq_domain) { | |
47ff3de9 | 251 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); |
991bfef8 | 252 | return -ENODEV; |
47ff3de9 KVA |
253 | } |
254 | ||
255 | return 0; | |
256 | } | |
257 | ||
258 | static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) | |
259 | { | |
21baa1c4 | 260 | struct dra7xx_pcie *dra7xx = arg; |
442ec4c0 KVA |
261 | struct dw_pcie *pci = dra7xx->pci; |
262 | struct pcie_port *pp = &pci->pp; | |
09b2d203 V |
263 | unsigned long reg; |
264 | u32 virq, bit; | |
47ff3de9 KVA |
265 | |
266 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); | |
267 | ||
268 | switch (reg) { | |
269 | case MSI: | |
270 | dw_handle_msi_irq(pp); | |
271 | break; | |
272 | case INTA: | |
273 | case INTB: | |
274 | case INTC: | |
275 | case INTD: | |
09b2d203 V |
276 | for_each_set_bit(bit, ®, PCI_NUM_INTX) { |
277 | virq = irq_find_mapping(dra7xx->irq_domain, bit); | |
278 | if (virq) | |
279 | generic_handle_irq(virq); | |
280 | } | |
47ff3de9 KVA |
281 | break; |
282 | } | |
283 | ||
284 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); | |
285 | ||
286 | return IRQ_HANDLED; | |
287 | } | |
288 | ||
47ff3de9 KVA |
289 | static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) |
290 | { | |
291 | struct dra7xx_pcie *dra7xx = arg; | |
442ec4c0 KVA |
292 | struct dw_pcie *pci = dra7xx->pci; |
293 | struct device *dev = pci->dev; | |
608793e2 | 294 | struct dw_pcie_ep *ep = &pci->ep; |
47ff3de9 KVA |
295 | u32 reg; |
296 | ||
297 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); | |
298 | ||
299 | if (reg & ERR_SYS) | |
c7f8146b | 300 | dev_dbg(dev, "System Error\n"); |
47ff3de9 KVA |
301 | |
302 | if (reg & ERR_FATAL) | |
c7f8146b | 303 | dev_dbg(dev, "Fatal Error\n"); |
47ff3de9 KVA |
304 | |
305 | if (reg & ERR_NONFATAL) | |
c7f8146b | 306 | dev_dbg(dev, "Non Fatal Error\n"); |
47ff3de9 KVA |
307 | |
308 | if (reg & ERR_COR) | |
c7f8146b | 309 | dev_dbg(dev, "Correctable Error\n"); |
47ff3de9 KVA |
310 | |
311 | if (reg & ERR_AXI) | |
c7f8146b | 312 | dev_dbg(dev, "AXI tag lookup fatal Error\n"); |
47ff3de9 KVA |
313 | |
314 | if (reg & ERR_ECRC) | |
c7f8146b | 315 | dev_dbg(dev, "ECRC Error\n"); |
47ff3de9 KVA |
316 | |
317 | if (reg & PME_TURN_OFF) | |
c7f8146b | 318 | dev_dbg(dev, |
47ff3de9 KVA |
319 | "Power Management Event Turn-Off message received\n"); |
320 | ||
321 | if (reg & PME_TO_ACK) | |
c7f8146b | 322 | dev_dbg(dev, |
47ff3de9 KVA |
323 | "Power Management Turn-Off Ack message received\n"); |
324 | ||
325 | if (reg & PM_PME) | |
c7f8146b | 326 | dev_dbg(dev, "PM Power Management Event message received\n"); |
47ff3de9 KVA |
327 | |
328 | if (reg & LINK_REQ_RST) | |
c7f8146b | 329 | dev_dbg(dev, "Link Request Reset\n"); |
47ff3de9 | 330 | |
608793e2 KVA |
331 | if (reg & LINK_UP_EVT) { |
332 | if (dra7xx->mode == DW_PCIE_EP_TYPE) | |
333 | dw_pcie_ep_linkup(ep); | |
c7f8146b | 334 | dev_dbg(dev, "Link-up state change\n"); |
608793e2 | 335 | } |
47ff3de9 KVA |
336 | |
337 | if (reg & CFG_BME_EVT) | |
c7f8146b | 338 | dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); |
47ff3de9 KVA |
339 | |
340 | if (reg & CFG_MSE_EVT) | |
c7f8146b | 341 | dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); |
47ff3de9 KVA |
342 | |
343 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); | |
344 | ||
345 | return IRQ_HANDLED; | |
346 | } | |
347 | ||
608793e2 KVA |
348 | static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) |
349 | { | |
350 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
351 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | |
85aa1399 KVA |
352 | enum pci_barno bar; |
353 | ||
354 | for (bar = BAR_0; bar <= BAR_5; bar++) | |
355 | dw_pcie_ep_reset_bar(pci, bar); | |
608793e2 KVA |
356 | |
357 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); | |
358 | } | |
359 | ||
360 | static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) | |
361 | { | |
362 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); | |
363 | mdelay(1); | |
364 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); | |
365 | } | |
366 | ||
367 | static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, | |
368 | u8 interrupt_num) | |
369 | { | |
370 | u32 reg; | |
371 | ||
372 | reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; | |
373 | reg |= MSI_REQ_GRANT; | |
374 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); | |
375 | } | |
376 | ||
16093362 | 377 | static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, |
d3c70a98 | 378 | enum pci_epc_irq_type type, u16 interrupt_num) |
608793e2 KVA |
379 | { |
380 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
381 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | |
382 | ||
383 | switch (type) { | |
384 | case PCI_EPC_IRQ_LEGACY: | |
385 | dra7xx_pcie_raise_legacy_irq(dra7xx); | |
386 | break; | |
387 | case PCI_EPC_IRQ_MSI: | |
388 | dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); | |
389 | break; | |
390 | default: | |
391 | dev_err(pci->dev, "UNKNOWN IRQ type\n"); | |
392 | } | |
393 | ||
394 | return 0; | |
395 | } | |
396 | ||
4894467e KVA |
397 | static const struct pci_epc_features dra7xx_pcie_epc_features = { |
398 | .linkup_notifier = true, | |
399 | .msi_capable = true, | |
400 | .msix_capable = false, | |
401 | }; | |
402 | ||
403 | static const struct pci_epc_features* | |
404 | dra7xx_pcie_get_features(struct dw_pcie_ep *ep) | |
405 | { | |
406 | return &dra7xx_pcie_epc_features; | |
407 | } | |
408 | ||
626961dd | 409 | static const struct dw_pcie_ep_ops pcie_ep_ops = { |
608793e2 KVA |
410 | .ep_init = dra7xx_pcie_ep_init, |
411 | .raise_irq = dra7xx_pcie_raise_irq, | |
4894467e | 412 | .get_features = dra7xx_pcie_get_features, |
608793e2 KVA |
413 | }; |
414 | ||
415 | static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, | |
416 | struct platform_device *pdev) | |
417 | { | |
418 | int ret; | |
419 | struct dw_pcie_ep *ep; | |
420 | struct resource *res; | |
421 | struct device *dev = &pdev->dev; | |
422 | struct dw_pcie *pci = dra7xx->pci; | |
423 | ||
424 | ep = &pci->ep; | |
425 | ep->ops = &pcie_ep_ops; | |
426 | ||
427 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); | |
011cb23c GP |
428 | pci->dbi_base = devm_ioremap_resource(dev, res); |
429 | if (IS_ERR(pci->dbi_base)) | |
430 | return PTR_ERR(pci->dbi_base); | |
608793e2 KVA |
431 | |
432 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); | |
011cb23c GP |
433 | pci->dbi_base2 = devm_ioremap_resource(dev, res); |
434 | if (IS_ERR(pci->dbi_base2)) | |
435 | return PTR_ERR(pci->dbi_base2); | |
608793e2 KVA |
436 | |
437 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); | |
438 | if (!res) | |
439 | return -EINVAL; | |
440 | ||
441 | ep->phys_base = res->start; | |
442 | ep->addr_size = resource_size(res); | |
443 | ||
444 | ret = dw_pcie_ep_init(ep); | |
445 | if (ret) { | |
446 | dev_err(dev, "failed to initialize endpoint\n"); | |
447 | return ret; | |
448 | } | |
449 | ||
450 | return 0; | |
451 | } | |
452 | ||
e73044a0 JH |
453 | static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, |
454 | struct platform_device *pdev) | |
47ff3de9 KVA |
455 | { |
456 | int ret; | |
442ec4c0 KVA |
457 | struct dw_pcie *pci = dra7xx->pci; |
458 | struct pcie_port *pp = &pci->pp; | |
459 | struct device *dev = pci->dev; | |
47ff3de9 | 460 | struct resource *res; |
47ff3de9 KVA |
461 | |
462 | pp->irq = platform_get_irq(pdev, 1); | |
463 | if (pp->irq < 0) { | |
464 | dev_err(dev, "missing IRQ resource\n"); | |
2f3ec752 | 465 | return pp->irq; |
47ff3de9 KVA |
466 | } |
467 | ||
c7f8146b | 468 | ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, |
8ff0ef99 | 469 | IRQF_SHARED | IRQF_NO_THREAD, |
21baa1c4 | 470 | "dra7-pcie-msi", dra7xx); |
47ff3de9 | 471 | if (ret) { |
c7f8146b | 472 | dev_err(dev, "failed to request irq\n"); |
47ff3de9 KVA |
473 | return ret; |
474 | } | |
475 | ||
ebe85a44 KVA |
476 | ret = dra7xx_pcie_init_irq_domain(pp); |
477 | if (ret < 0) | |
478 | return ret; | |
47ff3de9 KVA |
479 | |
480 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); | |
011cb23c GP |
481 | pci->dbi_base = devm_ioremap_resource(dev, res); |
482 | if (IS_ERR(pci->dbi_base)) | |
483 | return PTR_ERR(pci->dbi_base); | |
47ff3de9 | 484 | |
71890ea0 NC |
485 | pp->ops = &dra7xx_pcie_host_ops; |
486 | ||
47ff3de9 KVA |
487 | ret = dw_pcie_host_init(pp); |
488 | if (ret) { | |
c7f8146b | 489 | dev_err(dev, "failed to initialize host\n"); |
47ff3de9 KVA |
490 | return ret; |
491 | } | |
492 | ||
493 | return 0; | |
494 | } | |
495 | ||
442ec4c0 | 496 | static const struct dw_pcie_ops dw_pcie_ops = { |
2ed6cc71 | 497 | .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, |
608793e2 KVA |
498 | .start_link = dra7xx_pcie_establish_link, |
499 | .stop_link = dra7xx_pcie_stop_link, | |
442ec4c0 KVA |
500 | .link_up = dra7xx_pcie_link_up, |
501 | }; | |
502 | ||
1f6c4501 KVA |
503 | static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) |
504 | { | |
505 | int phy_count = dra7xx->phy_count; | |
506 | ||
507 | while (phy_count--) { | |
508 | phy_power_off(dra7xx->phy[phy_count]); | |
509 | phy_exit(dra7xx->phy[phy_count]); | |
510 | } | |
511 | } | |
512 | ||
513 | static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) | |
514 | { | |
515 | int phy_count = dra7xx->phy_count; | |
516 | int ret; | |
517 | int i; | |
518 | ||
519 | for (i = 0; i < phy_count; i++) { | |
65c839a1 KVA |
520 | ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE); |
521 | if (ret < 0) | |
522 | goto err_phy; | |
523 | ||
1f6c4501 KVA |
524 | ret = phy_init(dra7xx->phy[i]); |
525 | if (ret < 0) | |
526 | goto err_phy; | |
527 | ||
528 | ret = phy_power_on(dra7xx->phy[i]); | |
529 | if (ret < 0) { | |
530 | phy_exit(dra7xx->phy[i]); | |
531 | goto err_phy; | |
532 | } | |
533 | } | |
534 | ||
535 | return 0; | |
536 | ||
537 | err_phy: | |
538 | while (--i >= 0) { | |
539 | phy_power_off(dra7xx->phy[i]); | |
540 | phy_exit(dra7xx->phy[i]); | |
541 | } | |
542 | ||
543 | return ret; | |
544 | } | |
545 | ||
608793e2 KVA |
546 | static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { |
547 | .mode = DW_PCIE_RC_TYPE, | |
548 | }; | |
549 | ||
550 | static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { | |
551 | .mode = DW_PCIE_EP_TYPE, | |
552 | }; | |
553 | ||
c232c0df KVA |
554 | static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = { |
555 | .b1co_mode_sel_mask = BIT(2), | |
556 | .mode = DW_PCIE_RC_TYPE, | |
557 | }; | |
558 | ||
559 | static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = { | |
560 | .b1co_mode_sel_mask = GENMASK(3, 2), | |
561 | .mode = DW_PCIE_RC_TYPE, | |
562 | }; | |
563 | ||
564 | static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = { | |
565 | .b1co_mode_sel_mask = BIT(2), | |
566 | .mode = DW_PCIE_EP_TYPE, | |
567 | }; | |
568 | ||
569 | static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = { | |
570 | .b1co_mode_sel_mask = GENMASK(3, 2), | |
571 | .mode = DW_PCIE_EP_TYPE, | |
572 | }; | |
573 | ||
608793e2 KVA |
574 | static const struct of_device_id of_dra7xx_pcie_match[] = { |
575 | { | |
576 | .compatible = "ti,dra7-pcie", | |
577 | .data = &dra7xx_pcie_rc_of_data, | |
578 | }, | |
579 | { | |
580 | .compatible = "ti,dra7-pcie-ep", | |
581 | .data = &dra7xx_pcie_ep_of_data, | |
582 | }, | |
c232c0df KVA |
583 | { |
584 | .compatible = "ti,dra746-pcie-rc", | |
585 | .data = &dra746_pcie_rc_of_data, | |
586 | }, | |
587 | { | |
588 | .compatible = "ti,dra726-pcie-rc", | |
589 | .data = &dra726_pcie_rc_of_data, | |
590 | }, | |
591 | { | |
592 | .compatible = "ti,dra746-pcie-ep", | |
593 | .data = &dra746_pcie_ep_of_data, | |
594 | }, | |
595 | { | |
596 | .compatible = "ti,dra726-pcie-ep", | |
597 | .data = &dra726_pcie_ep_of_data, | |
598 | }, | |
608793e2 KVA |
599 | {}, |
600 | }; | |
601 | ||
f7a2757f | 602 | /* |
726d75a6 | 603 | * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 |
f7a2757f KVA |
604 | * @dra7xx: the dra7xx device where the workaround should be applied |
605 | * | |
606 | * Access to the PCIe slave port that are not 32-bit aligned will result | |
607 | * in incorrect mapping to TLP Address and Byte enable fields. Therefore, | |
608 | * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or | |
609 | * 0x3. | |
610 | * | |
611 | * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. | |
612 | */ | |
726d75a6 | 613 | static int dra7xx_pcie_unaligned_memaccess(struct device *dev) |
f7a2757f KVA |
614 | { |
615 | int ret; | |
616 | struct device_node *np = dev->of_node; | |
617 | struct of_phandle_args args; | |
618 | struct regmap *regmap; | |
619 | ||
620 | regmap = syscon_regmap_lookup_by_phandle(np, | |
621 | "ti,syscon-unaligned-access"); | |
622 | if (IS_ERR(regmap)) { | |
623 | dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); | |
624 | return -EINVAL; | |
625 | } | |
626 | ||
627 | ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", | |
628 | 2, 0, &args); | |
629 | if (ret) { | |
630 | dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); | |
631 | return ret; | |
632 | } | |
633 | ||
634 | ret = regmap_update_bits(regmap, args.args[0], args.args[1], | |
635 | args.args[1]); | |
636 | if (ret) | |
637 | dev_err(dev, "failed to enable unaligned access\n"); | |
638 | ||
639 | of_node_put(args.np); | |
640 | ||
641 | return ret; | |
642 | } | |
643 | ||
c232c0df KVA |
644 | static int dra7xx_pcie_configure_two_lane(struct device *dev, |
645 | u32 b1co_mode_sel_mask) | |
646 | { | |
647 | struct device_node *np = dev->of_node; | |
648 | struct regmap *pcie_syscon; | |
649 | unsigned int pcie_reg; | |
650 | u32 mask; | |
651 | u32 val; | |
652 | ||
653 | pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); | |
654 | if (IS_ERR(pcie_syscon)) { | |
655 | dev_err(dev, "unable to get ti,syscon-lane-sel\n"); | |
656 | return -EINVAL; | |
657 | } | |
658 | ||
659 | if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, | |
660 | &pcie_reg)) { | |
661 | dev_err(dev, "couldn't get lane selection reg offset\n"); | |
662 | return -EINVAL; | |
663 | } | |
664 | ||
665 | mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; | |
666 | val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; | |
667 | regmap_update_bits(pcie_syscon, pcie_reg, mask, val); | |
668 | ||
669 | return 0; | |
670 | } | |
671 | ||
47ff3de9 KVA |
672 | static int __init dra7xx_pcie_probe(struct platform_device *pdev) |
673 | { | |
674 | u32 reg; | |
675 | int ret; | |
676 | int irq; | |
677 | int i; | |
678 | int phy_count; | |
679 | struct phy **phy; | |
7a4db656 | 680 | struct device_link **link; |
47ff3de9 KVA |
681 | void __iomem *base; |
682 | struct resource *res; | |
442ec4c0 | 683 | struct dw_pcie *pci; |
442ec4c0 | 684 | struct dra7xx_pcie *dra7xx; |
47ff3de9 KVA |
685 | struct device *dev = &pdev->dev; |
686 | struct device_node *np = dev->of_node; | |
687 | char name[10]; | |
602d38bc | 688 | struct gpio_desc *reset; |
608793e2 KVA |
689 | const struct of_device_id *match; |
690 | const struct dra7xx_pcie_of_data *data; | |
691 | enum dw_pcie_device_mode mode; | |
c232c0df | 692 | u32 b1co_mode_sel_mask; |
608793e2 KVA |
693 | |
694 | match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); | |
695 | if (!match) | |
696 | return -EINVAL; | |
697 | ||
698 | data = (struct dra7xx_pcie_of_data *)match->data; | |
699 | mode = (enum dw_pcie_device_mode)data->mode; | |
c232c0df | 700 | b1co_mode_sel_mask = data->b1co_mode_sel_mask; |
47ff3de9 KVA |
701 | |
702 | dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); | |
703 | if (!dra7xx) | |
704 | return -ENOMEM; | |
705 | ||
442ec4c0 KVA |
706 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); |
707 | if (!pci) | |
708 | return -ENOMEM; | |
709 | ||
710 | pci->dev = dev; | |
711 | pci->ops = &dw_pcie_ops; | |
712 | ||
47ff3de9 KVA |
713 | irq = platform_get_irq(pdev, 0); |
714 | if (irq < 0) { | |
a0d21ba1 GS |
715 | dev_err(dev, "missing IRQ resource: %d\n", irq); |
716 | return irq; | |
47ff3de9 KVA |
717 | } |
718 | ||
47ff3de9 KVA |
719 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); |
720 | base = devm_ioremap_nocache(dev, res->start, resource_size(res)); | |
721 | if (!base) | |
722 | return -ENOMEM; | |
723 | ||
724 | phy_count = of_property_count_strings(np, "phy-names"); | |
725 | if (phy_count < 0) { | |
726 | dev_err(dev, "unable to find the strings\n"); | |
727 | return phy_count; | |
728 | } | |
729 | ||
a86854d0 | 730 | phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); |
47ff3de9 KVA |
731 | if (!phy) |
732 | return -ENOMEM; | |
733 | ||
a86854d0 | 734 | link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); |
7a4db656 KVA |
735 | if (!link) |
736 | return -ENOMEM; | |
737 | ||
47ff3de9 KVA |
738 | for (i = 0; i < phy_count; i++) { |
739 | snprintf(name, sizeof(name), "pcie-phy%d", i); | |
740 | phy[i] = devm_phy_get(dev, name); | |
741 | if (IS_ERR(phy[i])) | |
742 | return PTR_ERR(phy[i]); | |
7a4db656 KVA |
743 | |
744 | link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); | |
745 | if (!link[i]) { | |
746 | ret = -EINVAL; | |
747 | goto err_link; | |
748 | } | |
47ff3de9 KVA |
749 | } |
750 | ||
751 | dra7xx->base = base; | |
752 | dra7xx->phy = phy; | |
442ec4c0 | 753 | dra7xx->pci = pci; |
47ff3de9 KVA |
754 | dra7xx->phy_count = phy_count; |
755 | ||
c232c0df KVA |
756 | if (phy_count == 2) { |
757 | ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask); | |
758 | if (ret < 0) | |
759 | dra7xx->phy_count = 1; /* Fallback to x1 lane mode */ | |
760 | } | |
761 | ||
1f6c4501 KVA |
762 | ret = dra7xx_pcie_enable_phy(dra7xx); |
763 | if (ret) { | |
764 | dev_err(dev, "failed to enable phy\n"); | |
765 | return ret; | |
766 | } | |
767 | ||
9bcf0a6f KVA |
768 | platform_set_drvdata(pdev, dra7xx); |
769 | ||
47ff3de9 KVA |
770 | pm_runtime_enable(dev); |
771 | ret = pm_runtime_get_sync(dev); | |
d3f4caa3 | 772 | if (ret < 0) { |
47ff3de9 | 773 | dev_err(dev, "pm_runtime_get_sync failed\n"); |
0e2bdb0e | 774 | goto err_get_sync; |
47ff3de9 KVA |
775 | } |
776 | ||
602d38bc KVA |
777 | reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); |
778 | if (IS_ERR(reset)) { | |
779 | ret = PTR_ERR(reset); | |
780 | dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); | |
78bdcad0 | 781 | goto err_gpio; |
47ff3de9 KVA |
782 | } |
783 | ||
784 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); | |
785 | reg &= ~LTSSM_EN; | |
786 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | |
787 | ||
ab5fe4f4 KVA |
788 | dra7xx->link_gen = of_pci_get_max_link_speed(np); |
789 | if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) | |
790 | dra7xx->link_gen = 2; | |
791 | ||
608793e2 KVA |
792 | switch (mode) { |
793 | case DW_PCIE_RC_TYPE: | |
f1aba0a0 NC |
794 | if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { |
795 | ret = -ENODEV; | |
796 | goto err_gpio; | |
797 | } | |
798 | ||
608793e2 KVA |
799 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, |
800 | DEVICE_TYPE_RC); | |
726d75a6 V |
801 | |
802 | ret = dra7xx_pcie_unaligned_memaccess(dev); | |
803 | if (ret) | |
804 | dev_err(dev, "WA for Errata i870 not applied\n"); | |
805 | ||
608793e2 KVA |
806 | ret = dra7xx_add_pcie_port(dra7xx, pdev); |
807 | if (ret < 0) | |
808 | goto err_gpio; | |
809 | break; | |
810 | case DW_PCIE_EP_TYPE: | |
f1aba0a0 NC |
811 | if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { |
812 | ret = -ENODEV; | |
813 | goto err_gpio; | |
814 | } | |
815 | ||
608793e2 KVA |
816 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, |
817 | DEVICE_TYPE_EP); | |
f7a2757f | 818 | |
726d75a6 | 819 | ret = dra7xx_pcie_unaligned_memaccess(dev); |
f7a2757f KVA |
820 | if (ret) |
821 | goto err_gpio; | |
822 | ||
608793e2 KVA |
823 | ret = dra7xx_add_pcie_ep(dra7xx, pdev); |
824 | if (ret < 0) | |
825 | goto err_gpio; | |
826 | break; | |
827 | default: | |
828 | dev_err(dev, "INVALID device type %d\n", mode); | |
829 | } | |
830 | dra7xx->mode = mode; | |
47ff3de9 | 831 | |
d4c7d1a0 K |
832 | ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, |
833 | IRQF_SHARED, "dra7xx-pcie-main", dra7xx); | |
834 | if (ret) { | |
835 | dev_err(dev, "failed to request irq\n"); | |
836 | goto err_gpio; | |
837 | } | |
838 | ||
47ff3de9 KVA |
839 | return 0; |
840 | ||
78bdcad0 | 841 | err_gpio: |
47ff3de9 | 842 | pm_runtime_put(dev); |
0e2bdb0e KVA |
843 | |
844 | err_get_sync: | |
47ff3de9 | 845 | pm_runtime_disable(dev); |
1f6c4501 | 846 | dra7xx_pcie_disable_phy(dra7xx); |
47ff3de9 | 847 | |
7a4db656 KVA |
848 | err_link: |
849 | while (--i >= 0) | |
850 | device_link_del(link[i]); | |
851 | ||
47ff3de9 KVA |
852 | return ret; |
853 | } | |
854 | ||
e52eb445 | 855 | #ifdef CONFIG_PM_SLEEP |
389c7094 KVA |
856 | static int dra7xx_pcie_suspend(struct device *dev) |
857 | { | |
858 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | |
442ec4c0 | 859 | struct dw_pcie *pci = dra7xx->pci; |
389c7094 KVA |
860 | u32 val; |
861 | ||
608793e2 KVA |
862 | if (dra7xx->mode != DW_PCIE_RC_TYPE) |
863 | return 0; | |
864 | ||
389c7094 | 865 | /* clear MSE */ |
442ec4c0 | 866 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); |
389c7094 | 867 | val &= ~PCI_COMMAND_MEMORY; |
442ec4c0 | 868 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); |
389c7094 KVA |
869 | |
870 | return 0; | |
871 | } | |
872 | ||
873 | static int dra7xx_pcie_resume(struct device *dev) | |
874 | { | |
875 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | |
442ec4c0 | 876 | struct dw_pcie *pci = dra7xx->pci; |
389c7094 KVA |
877 | u32 val; |
878 | ||
608793e2 KVA |
879 | if (dra7xx->mode != DW_PCIE_RC_TYPE) |
880 | return 0; | |
881 | ||
389c7094 | 882 | /* set MSE */ |
442ec4c0 | 883 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); |
389c7094 | 884 | val |= PCI_COMMAND_MEMORY; |
442ec4c0 | 885 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); |
389c7094 KVA |
886 | |
887 | return 0; | |
888 | } | |
889 | ||
e52eb445 KVA |
890 | static int dra7xx_pcie_suspend_noirq(struct device *dev) |
891 | { | |
892 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | |
e52eb445 | 893 | |
1f6c4501 | 894 | dra7xx_pcie_disable_phy(dra7xx); |
e52eb445 KVA |
895 | |
896 | return 0; | |
897 | } | |
898 | ||
899 | static int dra7xx_pcie_resume_noirq(struct device *dev) | |
900 | { | |
901 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | |
e52eb445 | 902 | int ret; |
e52eb445 | 903 | |
1f6c4501 KVA |
904 | ret = dra7xx_pcie_enable_phy(dra7xx); |
905 | if (ret) { | |
906 | dev_err(dev, "failed to enable phy\n"); | |
907 | return ret; | |
e52eb445 KVA |
908 | } |
909 | ||
910 | return 0; | |
e52eb445 KVA |
911 | } |
912 | #endif | |
913 | ||
4751fac7 | 914 | static void dra7xx_pcie_shutdown(struct platform_device *pdev) |
9c049bea K |
915 | { |
916 | struct device *dev = &pdev->dev; | |
917 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | |
918 | int ret; | |
919 | ||
920 | dra7xx_pcie_stop_link(dra7xx->pci); | |
921 | ||
922 | ret = pm_runtime_put_sync(dev); | |
923 | if (ret < 0) | |
924 | dev_dbg(dev, "pm_runtime_put_sync failed\n"); | |
925 | ||
926 | pm_runtime_disable(dev); | |
927 | dra7xx_pcie_disable_phy(dra7xx); | |
928 | } | |
929 | ||
e52eb445 | 930 | static const struct dev_pm_ops dra7xx_pcie_pm_ops = { |
389c7094 | 931 | SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) |
e52eb445 KVA |
932 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, |
933 | dra7xx_pcie_resume_noirq) | |
934 | }; | |
935 | ||
47ff3de9 | 936 | static struct platform_driver dra7xx_pcie_driver = { |
47ff3de9 KVA |
937 | .driver = { |
938 | .name = "dra7-pcie", | |
47ff3de9 | 939 | .of_match_table = of_dra7xx_pcie_match, |
d29438d6 | 940 | .suppress_bind_attrs = true, |
e52eb445 | 941 | .pm = &dra7xx_pcie_pm_ops, |
47ff3de9 | 942 | }, |
9c049bea | 943 | .shutdown = dra7xx_pcie_shutdown, |
47ff3de9 | 944 | }; |
d29438d6 | 945 | builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); |