Commit | Line | Data |
---|---|---|
8cfab3cf | 1 | // SPDX-License-Identifier: GPL-2.0 |
0c4ffcfe MK |
2 | /* |
3 | * PCIe host controller driver for Texas Instruments Keystone SoCs | |
4 | * | |
5 | * Copyright (C) 2013-2014 Texas Instruments., Ltd. | |
6 | * http://www.ti.com | |
7 | * | |
8 | * Author: Murali Karicheri <m-karicheri2@ti.com> | |
9 | * Implementation based on pci-exynos.c and pcie-designware.c | |
0c4ffcfe MK |
10 | */ |
11 | ||
0c4ffcfe MK |
12 | #include <linux/clk.h> |
13 | #include <linux/delay.h> | |
c0b85586 | 14 | #include <linux/init.h> |
025dd3da | 15 | #include <linux/interrupt.h> |
c0b85586 | 16 | #include <linux/irqchip/chained_irq.h> |
0c4ffcfe | 17 | #include <linux/irqdomain.h> |
b51a625b | 18 | #include <linux/mfd/syscon.h> |
0c4ffcfe | 19 | #include <linux/msi.h> |
0c4ffcfe | 20 | #include <linux/of.h> |
c0b85586 | 21 | #include <linux/of_irq.h> |
0c4ffcfe | 22 | #include <linux/of_pci.h> |
0c4ffcfe | 23 | #include <linux/phy/phy.h> |
c0b85586 | 24 | #include <linux/platform_device.h> |
b51a625b | 25 | #include <linux/regmap.h> |
0c4ffcfe MK |
26 | #include <linux/resource.h> |
27 | #include <linux/signal.h> | |
28 | ||
29 | #include "pcie-designware.h" | |
0c4ffcfe | 30 | |
b51a625b KVA |
31 | #define PCIE_VENDORID_MASK 0xffff |
32 | #define PCIE_DEVICEID_SHIFT 16 | |
33 | ||
b492aca3 KVA |
34 | /* Application registers */ |
35 | #define CMD_STATUS 0x004 | |
261de72f KVA |
36 | #define LTSSM_EN_VAL BIT(0) |
37 | #define OB_XLAT_EN_VAL BIT(1) | |
38 | #define DBI_CS2 BIT(5) | |
44c747af | 39 | |
b492aca3 | 40 | #define CFG_SETUP 0x008 |
44c747af KVA |
41 | #define CFG_BUS(x) (((x) & 0xff) << 16) |
42 | #define CFG_DEVICE(x) (((x) & 0x1f) << 8) | |
43 | #define CFG_FUNC(x) ((x) & 0x7) | |
44 | #define CFG_TYPE1 BIT(24) | |
45 | ||
b492aca3 | 46 | #define OB_SIZE 0x030 |
b492aca3 KVA |
47 | #define SPACE0_REMOTE_CFG_OFFSET 0x1000 |
48 | #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n))) | |
49 | #define OB_OFFSET_HI(n) (0x204 + (8 * (n))) | |
e75043ad KVA |
50 | #define OB_ENABLEN BIT(0) |
51 | #define OB_WIN_SIZE 8 /* 8MB */ | |
b492aca3 KVA |
52 | |
53 | /* IRQ register defines */ | |
54 | #define IRQ_EOI 0x050 | |
b492aca3 KVA |
55 | |
56 | #define MSI_IRQ 0x054 | |
1beb5512 KVA |
57 | #define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4)) |
58 | #define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4)) | |
59 | #define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4)) | |
b492aca3 KVA |
60 | #define MSI_IRQ_OFFSET 4 |
61 | ||
1beb5512 KVA |
62 | #define IRQ_STATUS(n) (0x184 + ((n) << 4)) |
63 | #define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4)) | |
64 | #define INTx_EN BIT(0) | |
65 | ||
b492aca3 KVA |
66 | #define ERR_IRQ_STATUS 0x1c4 |
67 | #define ERR_IRQ_ENABLE_SET 0x1c8 | |
261de72f KVA |
68 | #define ERR_AER BIT(5) /* ECRC error */ |
69 | #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ | |
70 | #define ERR_CORR BIT(3) /* Correctable error */ | |
71 | #define ERR_NONFATAL BIT(2) /* Non-fatal error */ | |
72 | #define ERR_FATAL BIT(1) /* Fatal error */ | |
73 | #define ERR_SYS BIT(0) /* System error */ | |
74 | #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ | |
75 | ERR_NONFATAL | ERR_FATAL | ERR_SYS) | |
b492aca3 | 76 | |
c15982df | 77 | /* PCIE controller device IDs */ |
261de72f KVA |
78 | #define PCIE_RC_K2HK 0xb008 |
79 | #define PCIE_RC_K2E 0xb009 | |
80 | #define PCIE_RC_K2L 0xb00a | |
81 | #define PCIE_RC_K2G 0xb00b | |
c15982df | 82 | |
261de72f | 83 | #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) |
0c4ffcfe | 84 | |
b492aca3 KVA |
85 | struct keystone_pcie { |
86 | struct dw_pcie *pci; | |
b492aca3 KVA |
87 | /* PCI Device ID */ |
88 | u32 device_id; | |
b492aca3 KVA |
89 | int legacy_host_irqs[PCI_NUM_INTX]; |
90 | struct device_node *legacy_intc_np; | |
91 | ||
f6f2900c | 92 | int msi_host_irq; |
49229238 | 93 | int num_lanes; |
b4f1af83 | 94 | u32 num_viewport; |
49229238 KVA |
95 | struct phy **phy; |
96 | struct device_link **link; | |
b492aca3 KVA |
97 | struct device_node *msi_intc_np; |
98 | struct irq_domain *legacy_irq_domain; | |
99 | struct device_node *np; | |
100 | ||
101 | int error_irq; | |
102 | ||
103 | /* Application register space */ | |
104 | void __iomem *va_app_base; /* DT 1st resource */ | |
105 | struct resource app; | |
106 | }; | |
107 | ||
a1cabd2b | 108 | static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset) |
b492aca3 KVA |
109 | { |
110 | return readl(ks_pcie->va_app_base + offset); | |
111 | } | |
112 | ||
a1cabd2b KVA |
113 | static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset, |
114 | u32 val) | |
b492aca3 KVA |
115 | { |
116 | writel(val, ks_pcie->va_app_base + offset); | |
117 | } | |
118 | ||
117c3b60 | 119 | static void ks_pcie_msi_irq_ack(struct irq_data *data) |
b492aca3 | 120 | { |
117c3b60 | 121 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); |
b492aca3 | 122 | struct keystone_pcie *ks_pcie; |
117c3b60 | 123 | u32 irq = data->hwirq; |
b492aca3 | 124 | struct dw_pcie *pci; |
117c3b60 KVA |
125 | u32 reg_offset; |
126 | u32 bit_pos; | |
b492aca3 KVA |
127 | |
128 | pci = to_dw_pcie_from_pp(pp); | |
129 | ks_pcie = to_keystone_pcie(pci); | |
66c10eca KVA |
130 | |
131 | reg_offset = irq % 8; | |
132 | bit_pos = irq >> 3; | |
b492aca3 | 133 | |
1beb5512 | 134 | ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset), |
a1cabd2b KVA |
135 | BIT(bit_pos)); |
136 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); | |
b492aca3 KVA |
137 | } |
138 | ||
117c3b60 | 139 | static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
b492aca3 | 140 | { |
117c3b60 KVA |
141 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); |
142 | struct keystone_pcie *ks_pcie; | |
143 | struct dw_pcie *pci; | |
144 | u64 msi_target; | |
145 | ||
146 | pci = to_dw_pcie_from_pp(pp); | |
147 | ks_pcie = to_keystone_pcie(pci); | |
148 | ||
149 | msi_target = ks_pcie->app.start + MSI_IRQ; | |
150 | msg->address_lo = lower_32_bits(msi_target); | |
151 | msg->address_hi = upper_32_bits(msi_target); | |
152 | msg->data = data->hwirq; | |
153 | ||
154 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", | |
155 | (int)data->hwirq, msg->address_hi, msg->address_lo); | |
156 | } | |
157 | ||
158 | static int ks_pcie_msi_set_affinity(struct irq_data *irq_data, | |
159 | const struct cpumask *mask, bool force) | |
160 | { | |
161 | return -EINVAL; | |
162 | } | |
163 | ||
164 | static void ks_pcie_msi_mask(struct irq_data *data) | |
165 | { | |
166 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | |
167 | struct keystone_pcie *ks_pcie; | |
168 | u32 irq = data->hwirq; | |
169 | struct dw_pcie *pci; | |
170 | unsigned long flags; | |
171 | u32 reg_offset; | |
172 | u32 bit_pos; | |
173 | ||
174 | raw_spin_lock_irqsave(&pp->lock, flags); | |
175 | ||
176 | pci = to_dw_pcie_from_pp(pp); | |
177 | ks_pcie = to_keystone_pcie(pci); | |
b492aca3 | 178 | |
66c10eca KVA |
179 | reg_offset = irq % 8; |
180 | bit_pos = irq >> 3; | |
181 | ||
117c3b60 | 182 | ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset), |
a1cabd2b | 183 | BIT(bit_pos)); |
117c3b60 KVA |
184 | |
185 | raw_spin_unlock_irqrestore(&pp->lock, flags); | |
b492aca3 KVA |
186 | } |
187 | ||
117c3b60 | 188 | static void ks_pcie_msi_unmask(struct irq_data *data) |
b492aca3 | 189 | { |
117c3b60 KVA |
190 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); |
191 | struct keystone_pcie *ks_pcie; | |
192 | u32 irq = data->hwirq; | |
193 | struct dw_pcie *pci; | |
194 | unsigned long flags; | |
195 | u32 reg_offset; | |
196 | u32 bit_pos; | |
197 | ||
198 | raw_spin_lock_irqsave(&pp->lock, flags); | |
199 | ||
200 | pci = to_dw_pcie_from_pp(pp); | |
201 | ks_pcie = to_keystone_pcie(pci); | |
b492aca3 | 202 | |
66c10eca KVA |
203 | reg_offset = irq % 8; |
204 | bit_pos = irq >> 3; | |
205 | ||
117c3b60 | 206 | ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset), |
a1cabd2b | 207 | BIT(bit_pos)); |
117c3b60 KVA |
208 | |
209 | raw_spin_unlock_irqrestore(&pp->lock, flags); | |
b492aca3 KVA |
210 | } |
211 | ||
117c3b60 KVA |
212 | static struct irq_chip ks_pcie_msi_irq_chip = { |
213 | .name = "KEYSTONE-PCI-MSI", | |
214 | .irq_ack = ks_pcie_msi_irq_ack, | |
215 | .irq_compose_msi_msg = ks_pcie_compose_msi_msg, | |
216 | .irq_set_affinity = ks_pcie_msi_set_affinity, | |
217 | .irq_mask = ks_pcie_msi_mask, | |
218 | .irq_unmask = ks_pcie_msi_unmask, | |
219 | }; | |
220 | ||
a1cabd2b | 221 | static int ks_pcie_msi_host_init(struct pcie_port *pp) |
b492aca3 | 222 | { |
117c3b60 | 223 | pp->msi_irq_chip = &ks_pcie_msi_irq_chip; |
b492aca3 KVA |
224 | return dw_pcie_allocate_domains(pp); |
225 | } | |
226 | ||
a1cabd2b KVA |
227 | static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, |
228 | int offset) | |
b492aca3 KVA |
229 | { |
230 | struct dw_pcie *pci = ks_pcie->pci; | |
231 | struct device *dev = pci->dev; | |
232 | u32 pending; | |
233 | int virq; | |
234 | ||
1beb5512 | 235 | pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset)); |
b492aca3 KVA |
236 | |
237 | if (BIT(0) & pending) { | |
238 | virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); | |
239 | dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq); | |
240 | generic_handle_irq(virq); | |
241 | } | |
242 | ||
243 | /* EOI the INTx interrupt */ | |
a1cabd2b | 244 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset); |
b492aca3 KVA |
245 | } |
246 | ||
a1cabd2b | 247 | static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) |
b492aca3 | 248 | { |
a1cabd2b | 249 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); |
b492aca3 KVA |
250 | } |
251 | ||
a1cabd2b | 252 | static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) |
b492aca3 | 253 | { |
daaaa665 KVA |
254 | u32 reg; |
255 | struct device *dev = ks_pcie->pci->dev; | |
b492aca3 | 256 | |
daaaa665 KVA |
257 | reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS); |
258 | if (!reg) | |
b492aca3 KVA |
259 | return IRQ_NONE; |
260 | ||
daaaa665 KVA |
261 | if (reg & ERR_SYS) |
262 | dev_err(dev, "System Error\n"); | |
263 | ||
264 | if (reg & ERR_FATAL) | |
265 | dev_err(dev, "Fatal Error\n"); | |
266 | ||
267 | if (reg & ERR_NONFATAL) | |
268 | dev_dbg(dev, "Non Fatal Error\n"); | |
269 | ||
270 | if (reg & ERR_CORR) | |
271 | dev_dbg(dev, "Correctable Error\n"); | |
272 | ||
273 | if (reg & ERR_AXI) | |
274 | dev_err(dev, "AXI tag lookup fatal Error\n"); | |
275 | ||
276 | if (reg & ERR_AER) | |
277 | dev_err(dev, "ECRC Error\n"); | |
278 | ||
279 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg); | |
b492aca3 | 280 | |
b492aca3 KVA |
281 | return IRQ_HANDLED; |
282 | } | |
283 | ||
a1cabd2b | 284 | static void ks_pcie_ack_legacy_irq(struct irq_data *d) |
b492aca3 KVA |
285 | { |
286 | } | |
287 | ||
a1cabd2b | 288 | static void ks_pcie_mask_legacy_irq(struct irq_data *d) |
b492aca3 KVA |
289 | { |
290 | } | |
291 | ||
a1cabd2b | 292 | static void ks_pcie_unmask_legacy_irq(struct irq_data *d) |
b492aca3 KVA |
293 | { |
294 | } | |
295 | ||
a1cabd2b | 296 | static struct irq_chip ks_pcie_legacy_irq_chip = { |
b492aca3 | 297 | .name = "Keystone-PCI-Legacy-IRQ", |
a1cabd2b KVA |
298 | .irq_ack = ks_pcie_ack_legacy_irq, |
299 | .irq_mask = ks_pcie_mask_legacy_irq, | |
300 | .irq_unmask = ks_pcie_unmask_legacy_irq, | |
b492aca3 KVA |
301 | }; |
302 | ||
a1cabd2b KVA |
303 | static int ks_pcie_init_legacy_irq_map(struct irq_domain *d, |
304 | unsigned int irq, | |
305 | irq_hw_number_t hw_irq) | |
b492aca3 | 306 | { |
a1cabd2b | 307 | irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip, |
b492aca3 KVA |
308 | handle_level_irq); |
309 | irq_set_chip_data(irq, d->host_data); | |
310 | ||
311 | return 0; | |
312 | } | |
313 | ||
a1cabd2b KVA |
314 | static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = { |
315 | .map = ks_pcie_init_legacy_irq_map, | |
b492aca3 KVA |
316 | .xlate = irq_domain_xlate_onetwocell, |
317 | }; | |
318 | ||
319 | /** | |
a1cabd2b | 320 | * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask |
b492aca3 KVA |
321 | * registers |
322 | * | |
323 | * Since modification of dbi_cs2 involves different clock domain, read the | |
324 | * status back to ensure the transition is complete. | |
325 | */ | |
a1cabd2b | 326 | static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) |
b492aca3 KVA |
327 | { |
328 | u32 val; | |
329 | ||
a1cabd2b | 330 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
f9127db9 KVA |
331 | val |= DBI_CS2; |
332 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); | |
b492aca3 KVA |
333 | |
334 | do { | |
a1cabd2b | 335 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
f9127db9 | 336 | } while (!(val & DBI_CS2)); |
b492aca3 KVA |
337 | } |
338 | ||
339 | /** | |
a1cabd2b | 340 | * ks_pcie_clear_dbi_mode() - Disable DBI mode |
b492aca3 KVA |
341 | * |
342 | * Since modification of dbi_cs2 involves different clock domain, read the | |
343 | * status back to ensure the transition is complete. | |
344 | */ | |
a1cabd2b | 345 | static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) |
b492aca3 KVA |
346 | { |
347 | u32 val; | |
348 | ||
a1cabd2b | 349 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
f9127db9 KVA |
350 | val &= ~DBI_CS2; |
351 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); | |
b492aca3 KVA |
352 | |
353 | do { | |
a1cabd2b | 354 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
f9127db9 | 355 | } while (val & DBI_CS2); |
b492aca3 KVA |
356 | } |
357 | ||
a1cabd2b | 358 | static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) |
b492aca3 | 359 | { |
e75043ad | 360 | u32 val; |
b4f1af83 | 361 | u32 num_viewport = ks_pcie->num_viewport; |
b492aca3 KVA |
362 | struct dw_pcie *pci = ks_pcie->pci; |
363 | struct pcie_port *pp = &pci->pp; | |
e75043ad KVA |
364 | u64 start = pp->mem->start; |
365 | u64 end = pp->mem->end; | |
366 | int i; | |
b492aca3 KVA |
367 | |
368 | /* Disable BARs for inbound access */ | |
a1cabd2b | 369 | ks_pcie_set_dbi_mode(ks_pcie); |
b492aca3 KVA |
370 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); |
371 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); | |
a1cabd2b | 372 | ks_pcie_clear_dbi_mode(ks_pcie); |
b492aca3 | 373 | |
e75043ad KVA |
374 | val = ilog2(OB_WIN_SIZE); |
375 | ks_pcie_app_writel(ks_pcie, OB_SIZE, val); | |
b492aca3 KVA |
376 | |
377 | /* Using Direct 1:1 mapping of RC <-> PCI memory space */ | |
e75043ad KVA |
378 | for (i = 0; i < num_viewport && (start < end); i++) { |
379 | ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i), | |
380 | lower_32_bits(start) | OB_ENABLEN); | |
381 | ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i), | |
382 | upper_32_bits(start)); | |
383 | start += OB_WIN_SIZE; | |
b492aca3 KVA |
384 | } |
385 | ||
a1cabd2b | 386 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
e75043ad KVA |
387 | val |= OB_XLAT_EN_VAL; |
388 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); | |
b492aca3 KVA |
389 | } |
390 | ||
a1cabd2b KVA |
391 | static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, |
392 | unsigned int devfn, int where, int size, | |
393 | u32 *val) | |
b492aca3 KVA |
394 | { |
395 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | |
396 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | |
44c747af | 397 | u32 reg; |
b492aca3 | 398 | |
44c747af KVA |
399 | reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | |
400 | CFG_FUNC(PCI_FUNC(devfn)); | |
401 | if (bus->parent->number != pp->root_bus_nr) | |
402 | reg |= CFG_TYPE1; | |
403 | ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); | |
b492aca3 | 404 | |
44c747af | 405 | return dw_pcie_read(pp->va_cfg0_base + where, size, val); |
b492aca3 KVA |
406 | } |
407 | ||
a1cabd2b KVA |
408 | static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, |
409 | unsigned int devfn, int where, int size, | |
410 | u32 val) | |
b492aca3 KVA |
411 | { |
412 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | |
413 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | |
44c747af | 414 | u32 reg; |
b492aca3 | 415 | |
44c747af KVA |
416 | reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | |
417 | CFG_FUNC(PCI_FUNC(devfn)); | |
418 | if (bus->parent->number != pp->root_bus_nr) | |
419 | reg |= CFG_TYPE1; | |
420 | ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); | |
b492aca3 | 421 | |
44c747af | 422 | return dw_pcie_write(pp->va_cfg0_base + where, size, val); |
b492aca3 KVA |
423 | } |
424 | ||
425 | /** | |
a1cabd2b | 426 | * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization |
b492aca3 KVA |
427 | * |
428 | * This sets BAR0 to enable inbound access for MSI_IRQ register | |
429 | */ | |
a1cabd2b | 430 | static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp) |
b492aca3 KVA |
431 | { |
432 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | |
433 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | |
434 | ||
435 | /* Configure and set up BAR0 */ | |
a1cabd2b | 436 | ks_pcie_set_dbi_mode(ks_pcie); |
b492aca3 KVA |
437 | |
438 | /* Enable BAR0 */ | |
439 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); | |
440 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); | |
441 | ||
a1cabd2b | 442 | ks_pcie_clear_dbi_mode(ks_pcie); |
b492aca3 KVA |
443 | |
444 | /* | |
445 | * For BAR0, just setting bus address for inbound writes (MSI) should | |
446 | * be sufficient. Use physical address to avoid any conflicts. | |
447 | */ | |
448 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); | |
449 | } | |
450 | ||
451 | /** | |
a1cabd2b | 452 | * ks_pcie_link_up() - Check if link up |
b492aca3 | 453 | */ |
a1cabd2b | 454 | static int ks_pcie_link_up(struct dw_pcie *pci) |
b492aca3 KVA |
455 | { |
456 | u32 val; | |
457 | ||
23fe5bd4 KVA |
458 | val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0); |
459 | val &= PORT_LOGIC_LTSSM_STATE_MASK; | |
460 | return (val == PORT_LOGIC_LTSSM_STATE_L0); | |
b492aca3 KVA |
461 | } |
462 | ||
a1cabd2b | 463 | static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) |
b492aca3 KVA |
464 | { |
465 | u32 val; | |
466 | ||
467 | /* Disable Link training */ | |
a1cabd2b | 468 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
b492aca3 | 469 | val &= ~LTSSM_EN_VAL; |
a1cabd2b | 470 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); |
b492aca3 KVA |
471 | |
472 | /* Initiate Link Training */ | |
a1cabd2b KVA |
473 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
474 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | |
b492aca3 KVA |
475 | } |
476 | ||
477 | /** | |
a1cabd2b | 478 | * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware |
b492aca3 KVA |
479 | * |
480 | * Ioremap the register resources, initialize legacy irq domain | |
481 | * and call dw_pcie_v3_65_host_init() API to initialize the Keystone | |
482 | * PCI host controller. | |
483 | */ | |
a1cabd2b | 484 | static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie) |
b492aca3 KVA |
485 | { |
486 | struct dw_pcie *pci = ks_pcie->pci; | |
487 | struct pcie_port *pp = &pci->pp; | |
488 | struct device *dev = pci->dev; | |
489 | struct platform_device *pdev = to_platform_device(dev); | |
490 | struct resource *res; | |
491 | ||
492 | /* Index 0 is the config reg. space address */ | |
493 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
494 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); | |
495 | if (IS_ERR(pci->dbi_base)) | |
496 | return PTR_ERR(pci->dbi_base); | |
497 | ||
498 | /* | |
499 | * We set these same and is used in pcie rd/wr_other_conf | |
500 | * functions | |
501 | */ | |
502 | pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; | |
503 | pp->va_cfg1_base = pp->va_cfg0_base; | |
504 | ||
505 | /* Index 1 is the application reg. space address */ | |
506 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
507 | ks_pcie->va_app_base = devm_ioremap_resource(dev, res); | |
508 | if (IS_ERR(ks_pcie->va_app_base)) | |
509 | return PTR_ERR(ks_pcie->va_app_base); | |
510 | ||
511 | ks_pcie->app = *res; | |
512 | ||
b492aca3 KVA |
513 | return dw_pcie_host_init(pp); |
514 | } | |
515 | ||
a1cabd2b | 516 | static void ks_pcie_quirk(struct pci_dev *dev) |
c15982df MK |
517 | { |
518 | struct pci_bus *bus = dev->bus; | |
00a2c409 | 519 | struct pci_dev *bridge; |
c15982df MK |
520 | static const struct pci_device_id rc_pci_devids[] = { |
521 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), | |
522 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | |
523 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), | |
524 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | |
525 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), | |
526 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | |
148e340c KVA |
527 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G), |
528 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | |
c15982df MK |
529 | { 0, }, |
530 | }; | |
531 | ||
532 | if (pci_is_root_bus(bus)) | |
00a2c409 | 533 | bridge = dev; |
c15982df MK |
534 | |
535 | /* look for the host bridge */ | |
536 | while (!pci_is_root_bus(bus)) { | |
537 | bridge = bus->self; | |
538 | bus = bus->parent; | |
539 | } | |
540 | ||
00a2c409 KVA |
541 | if (!bridge) |
542 | return; | |
543 | ||
544 | /* | |
545 | * Keystone PCI controller has a h/w limitation of | |
546 | * 256 bytes maximum read request size. It can't handle | |
547 | * anything higher than this. So force this limit on | |
548 | * all downstream devices. | |
549 | */ | |
550 | if (pci_match_id(rc_pci_devids, bridge)) { | |
551 | if (pcie_get_readrq(dev) > 256) { | |
552 | dev_info(&dev->dev, "limiting MRRS to 256\n"); | |
553 | pcie_set_readrq(dev, 256); | |
c15982df MK |
554 | } |
555 | } | |
556 | } | |
a1cabd2b | 557 | DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); |
c15982df | 558 | |
0c4ffcfe MK |
559 | static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) |
560 | { | |
442ec4c0 | 561 | struct dw_pcie *pci = ks_pcie->pci; |
442ec4c0 | 562 | struct device *dev = pci->dev; |
0c4ffcfe | 563 | |
442ec4c0 | 564 | if (dw_pcie_link_up(pci)) { |
795e79df | 565 | dev_info(dev, "Link already up\n"); |
0c4ffcfe MK |
566 | return 0; |
567 | } | |
568 | ||
a1cabd2b | 569 | ks_pcie_initiate_link_train(ks_pcie); |
2433a182 | 570 | |
0c4ffcfe | 571 | /* check if the link is up or not */ |
2433a182 KVA |
572 | if (!dw_pcie_wait_for_link(pci)) |
573 | return 0; | |
0c4ffcfe | 574 | |
21fa0c51 | 575 | dev_err(dev, "phy link never came up\n"); |
886bc5ce | 576 | return -ETIMEDOUT; |
0c4ffcfe MK |
577 | } |
578 | ||
bd0b9ac4 | 579 | static void ks_pcie_msi_irq_handler(struct irq_desc *desc) |
0c4ffcfe | 580 | { |
f6f2900c | 581 | unsigned int irq = desc->irq_data.hwirq; |
0c4ffcfe | 582 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); |
f6f2900c | 583 | u32 offset = irq - ks_pcie->msi_host_irq; |
442ec4c0 | 584 | struct dw_pcie *pci = ks_pcie->pci; |
66c10eca | 585 | struct pcie_port *pp = &pci->pp; |
442ec4c0 | 586 | struct device *dev = pci->dev; |
0c4ffcfe | 587 | struct irq_chip *chip = irq_desc_get_chip(desc); |
66c10eca | 588 | u32 vector, virq, reg, pos; |
0c4ffcfe | 589 | |
21fa0c51 | 590 | dev_dbg(dev, "%s, irq %d\n", __func__, irq); |
0c4ffcfe MK |
591 | |
592 | /* | |
593 | * The chained irq handler installation would have replaced normal | |
594 | * interrupt driver handler so we need to take care of mask/unmask and | |
595 | * ack operation. | |
596 | */ | |
597 | chained_irq_enter(chip, desc); | |
66c10eca KVA |
598 | |
599 | reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset)); | |
600 | /* | |
601 | * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit | |
602 | * shows 1, 9, 17, 25 and so forth | |
603 | */ | |
604 | for (pos = 0; pos < 4; pos++) { | |
605 | if (!(reg & BIT(pos))) | |
606 | continue; | |
607 | ||
608 | vector = offset + (pos << 3); | |
609 | virq = irq_linear_revmap(pp->irq_domain, vector); | |
610 | dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector, | |
611 | virq); | |
612 | generic_handle_irq(virq); | |
613 | } | |
614 | ||
0c4ffcfe MK |
615 | chained_irq_exit(chip, desc); |
616 | } | |
617 | ||
618 | /** | |
619 | * ks_pcie_legacy_irq_handler() - Handle legacy interrupt | |
620 | * @irq: IRQ line for legacy interrupts | |
621 | * @desc: Pointer to irq descriptor | |
622 | * | |
623 | * Traverse through pending legacy interrupts and invoke handler for each. Also | |
624 | * takes care of interrupt controller level mask/ack operation. | |
625 | */ | |
bd0b9ac4 | 626 | static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) |
0c4ffcfe | 627 | { |
97a85964 | 628 | unsigned int irq = irq_desc_get_irq(desc); |
0c4ffcfe | 629 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); |
442ec4c0 KVA |
630 | struct dw_pcie *pci = ks_pcie->pci; |
631 | struct device *dev = pci->dev; | |
0c4ffcfe MK |
632 | u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; |
633 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
634 | ||
21fa0c51 | 635 | dev_dbg(dev, ": Handling legacy irq %d\n", irq); |
0c4ffcfe MK |
636 | |
637 | /* | |
638 | * The chained irq handler installation would have replaced normal | |
639 | * interrupt driver handler so we need to take care of mask/unmask and | |
640 | * ack operation. | |
641 | */ | |
642 | chained_irq_enter(chip, desc); | |
a1cabd2b | 643 | ks_pcie_handle_legacy_irq(ks_pcie, irq_offset); |
0c4ffcfe MK |
644 | chained_irq_exit(chip, desc); |
645 | } | |
646 | ||
1146c295 | 647 | static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie) |
0c4ffcfe | 648 | { |
442ec4c0 | 649 | struct device *dev = ks_pcie->pci->dev; |
1146c295 KVA |
650 | struct device_node *np = ks_pcie->np; |
651 | struct device_node *intc_np; | |
f6f2900c | 652 | struct irq_data *irq_data; |
1146c295 | 653 | int irq_count, irq, ret, i; |
0c4ffcfe | 654 | |
1146c295 KVA |
655 | if (!IS_ENABLED(CONFIG_PCI_MSI)) |
656 | return 0; | |
1e9f8dcf | 657 | |
1146c295 KVA |
658 | intc_np = of_get_child_by_name(np, "msi-interrupt-controller"); |
659 | if (!intc_np) { | |
660 | dev_warn(dev, "msi-interrupt-controller node is absent\n"); | |
1e9f8dcf MK |
661 | return -EINVAL; |
662 | } | |
663 | ||
1146c295 KVA |
664 | irq_count = of_irq_count(intc_np); |
665 | if (!irq_count) { | |
666 | dev_err(dev, "No IRQ entries in msi-interrupt-controller\n"); | |
667 | ret = -EINVAL; | |
668 | goto err; | |
669 | } | |
0c4ffcfe | 670 | |
1146c295 KVA |
671 | for (i = 0; i < irq_count; i++) { |
672 | irq = irq_of_parse_and_map(intc_np, i); | |
673 | if (!irq) { | |
674 | ret = -EINVAL; | |
675 | goto err; | |
676 | } | |
f6f2900c KVA |
677 | |
678 | if (!ks_pcie->msi_host_irq) { | |
679 | irq_data = irq_get_irq_data(irq); | |
680 | if (!irq_data) { | |
681 | ret = -EINVAL; | |
682 | goto err; | |
683 | } | |
684 | ks_pcie->msi_host_irq = irq_data->hwirq; | |
685 | } | |
eac56aa3 | 686 | |
1146c295 KVA |
687 | irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler, |
688 | ks_pcie); | |
0c4ffcfe | 689 | } |
1e9f8dcf | 690 | |
1146c295 KVA |
691 | of_node_put(intc_np); |
692 | return 0; | |
693 | ||
694 | err: | |
695 | of_node_put(intc_np); | |
696 | return ret; | |
0c4ffcfe MK |
697 | } |
698 | ||
1146c295 | 699 | static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie) |
0c4ffcfe | 700 | { |
1146c295 KVA |
701 | struct device *dev = ks_pcie->pci->dev; |
702 | struct irq_domain *legacy_irq_domain; | |
703 | struct device_node *np = ks_pcie->np; | |
704 | struct device_node *intc_np; | |
705 | int irq_count, irq, ret = 0, i; | |
706 | ||
707 | intc_np = of_get_child_by_name(np, "legacy-interrupt-controller"); | |
708 | if (!intc_np) { | |
709 | dev_warn(dev, "legacy-interrupt-controller node is absent\n"); | |
710 | return -EINVAL; | |
711 | } | |
712 | ||
713 | irq_count = of_irq_count(intc_np); | |
714 | if (!irq_count) { | |
715 | dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n"); | |
716 | ret = -EINVAL; | |
717 | goto err; | |
718 | } | |
0c4ffcfe | 719 | |
1146c295 KVA |
720 | for (i = 0; i < irq_count; i++) { |
721 | irq = irq_of_parse_and_map(intc_np, i); | |
722 | if (!irq) { | |
723 | ret = -EINVAL; | |
724 | goto err; | |
725 | } | |
726 | ks_pcie->legacy_host_irqs[i] = irq; | |
727 | ||
728 | irq_set_chained_handler_and_data(irq, | |
5168a73c TG |
729 | ks_pcie_legacy_irq_handler, |
730 | ks_pcie); | |
0c4ffcfe | 731 | } |
1146c295 KVA |
732 | |
733 | legacy_irq_domain = | |
734 | irq_domain_add_linear(intc_np, PCI_NUM_INTX, | |
735 | &ks_pcie_legacy_irq_domain_ops, NULL); | |
736 | if (!legacy_irq_domain) { | |
737 | dev_err(dev, "Failed to add irq domain for legacy irqs\n"); | |
738 | ret = -EINVAL; | |
739 | goto err; | |
0c4ffcfe | 740 | } |
1146c295 KVA |
741 | ks_pcie->legacy_irq_domain = legacy_irq_domain; |
742 | ||
743 | for (i = 0; i < PCI_NUM_INTX; i++) | |
744 | ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN); | |
025dd3da | 745 | |
1146c295 KVA |
746 | err: |
747 | of_node_put(intc_np); | |
748 | return ret; | |
749 | } | |
750 | ||
751 | static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) | |
752 | { | |
025dd3da | 753 | if (ks_pcie->error_irq > 0) |
a1cabd2b | 754 | ks_pcie_enable_error_irq(ks_pcie); |
0c4ffcfe MK |
755 | } |
756 | ||
757 | /* | |
758 | * When a PCI device does not exist during config cycles, keystone host gets a | |
759 | * bus error instead of returning 0xffffffff. This handler always returns 0 | |
760 | * for this kind of faults. | |
761 | */ | |
a1cabd2b KVA |
762 | static int ks_pcie_fault(unsigned long addr, unsigned int fsr, |
763 | struct pt_regs *regs) | |
0c4ffcfe MK |
764 | { |
765 | unsigned long instr = *(unsigned long *) instruction_pointer(regs); | |
766 | ||
767 | if ((instr & 0x0e100090) == 0x00100090) { | |
768 | int reg = (instr >> 12) & 15; | |
769 | ||
770 | regs->uregs[reg] = -1; | |
771 | regs->ARM_pc += 4; | |
772 | } | |
773 | ||
774 | return 0; | |
775 | } | |
776 | ||
b51a625b KVA |
777 | static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) |
778 | { | |
779 | int ret; | |
780 | unsigned int id; | |
781 | struct regmap *devctrl_regs; | |
782 | struct dw_pcie *pci = ks_pcie->pci; | |
783 | struct device *dev = pci->dev; | |
784 | struct device_node *np = dev->of_node; | |
785 | ||
786 | devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id"); | |
787 | if (IS_ERR(devctrl_regs)) | |
788 | return PTR_ERR(devctrl_regs); | |
789 | ||
790 | ret = regmap_read(devctrl_regs, 0, &id); | |
791 | if (ret) | |
792 | return ret; | |
793 | ||
794 | dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK); | |
795 | dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT); | |
796 | ||
797 | return 0; | |
798 | } | |
799 | ||
4a301766 | 800 | static int __init ks_pcie_host_init(struct pcie_port *pp) |
0c4ffcfe | 801 | { |
442ec4c0 KVA |
802 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
803 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | |
b51a625b | 804 | int ret; |
0c4ffcfe | 805 | |
1146c295 KVA |
806 | ret = ks_pcie_config_legacy_irq(ks_pcie); |
807 | if (ret) | |
808 | return ret; | |
809 | ||
810 | ret = ks_pcie_config_msi_irq(ks_pcie); | |
811 | if (ret) | |
812 | return ret; | |
813 | ||
1e10f73e KVA |
814 | dw_pcie_setup_rc(pp); |
815 | ||
0c4ffcfe | 816 | ks_pcie_establish_link(ks_pcie); |
a1cabd2b | 817 | ks_pcie_setup_rc_app_regs(ks_pcie); |
0c4ffcfe MK |
818 | ks_pcie_setup_interrupts(ks_pcie); |
819 | writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), | |
442ec4c0 | 820 | pci->dbi_base + PCI_IO_BASE); |
0c4ffcfe | 821 | |
b51a625b KVA |
822 | ret = ks_pcie_init_id(ks_pcie); |
823 | if (ret < 0) | |
824 | return ret; | |
0c4ffcfe | 825 | |
0c4ffcfe MK |
826 | /* |
827 | * PCIe access errors that result into OCP errors are caught by ARM as | |
828 | * "External aborts" | |
829 | */ | |
a1cabd2b | 830 | hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, |
0c4ffcfe | 831 | "Asynchronous external abort"); |
4a301766 BA |
832 | |
833 | return 0; | |
0c4ffcfe MK |
834 | } |
835 | ||
a1cabd2b KVA |
836 | static const struct dw_pcie_host_ops ks_pcie_host_ops = { |
837 | .rd_other_conf = ks_pcie_rd_other_conf, | |
838 | .wr_other_conf = ks_pcie_wr_other_conf, | |
0c4ffcfe | 839 | .host_init = ks_pcie_host_init, |
a1cabd2b | 840 | .msi_host_init = ks_pcie_msi_host_init, |
a1cabd2b | 841 | .scan_bus = ks_pcie_v3_65_scan_bus, |
0c4ffcfe MK |
842 | }; |
843 | ||
a1cabd2b | 844 | static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) |
025dd3da MK |
845 | { |
846 | struct keystone_pcie *ks_pcie = priv; | |
847 | ||
a1cabd2b | 848 | return ks_pcie_handle_error_irq(ks_pcie); |
025dd3da MK |
849 | } |
850 | ||
a1cabd2b KVA |
851 | static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie, |
852 | struct platform_device *pdev) | |
0c4ffcfe | 853 | { |
442ec4c0 KVA |
854 | struct dw_pcie *pci = ks_pcie->pci; |
855 | struct pcie_port *pp = &pci->pp; | |
856 | struct device *dev = &pdev->dev; | |
0c4ffcfe MK |
857 | int ret; |
858 | ||
025dd3da MK |
859 | /* |
860 | * Index 0 is the platform interrupt for error interrupt | |
861 | * from RC. This is optional. | |
862 | */ | |
863 | ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); | |
864 | if (ks_pcie->error_irq <= 0) | |
21fa0c51 | 865 | dev_info(dev, "no error IRQ defined\n"); |
025dd3da | 866 | else { |
a1cabd2b | 867 | ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler, |
8116acce WY |
868 | IRQF_SHARED, "pcie-error-irq", ks_pcie); |
869 | if (ret < 0) { | |
21fa0c51 | 870 | dev_err(dev, "failed to request error IRQ %d\n", |
025dd3da MK |
871 | ks_pcie->error_irq); |
872 | return ret; | |
873 | } | |
874 | } | |
875 | ||
a1cabd2b KVA |
876 | pp->ops = &ks_pcie_host_ops; |
877 | ret = ks_pcie_dw_host_init(ks_pcie); | |
0c4ffcfe | 878 | if (ret) { |
21fa0c51 | 879 | dev_err(dev, "failed to initialize host\n"); |
0c4ffcfe MK |
880 | return ret; |
881 | } | |
882 | ||
1e9f8dcf | 883 | return 0; |
0c4ffcfe MK |
884 | } |
885 | ||
886 | static const struct of_device_id ks_pcie_of_match[] = { | |
887 | { | |
888 | .type = "pci", | |
889 | .compatible = "ti,keystone-pcie", | |
890 | }, | |
891 | { }, | |
892 | }; | |
0c4ffcfe | 893 | |
a1cabd2b KVA |
894 | static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = { |
895 | .link_up = ks_pcie_link_up, | |
442ec4c0 KVA |
896 | }; |
897 | ||
49229238 | 898 | static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie) |
0c4ffcfe | 899 | { |
49229238 | 900 | int num_lanes = ks_pcie->num_lanes; |
0c4ffcfe | 901 | |
49229238 KVA |
902 | while (num_lanes--) { |
903 | phy_power_off(ks_pcie->phy[num_lanes]); | |
904 | phy_exit(ks_pcie->phy[num_lanes]); | |
905 | } | |
906 | } | |
907 | ||
908 | static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie) | |
909 | { | |
910 | int i; | |
911 | int ret; | |
912 | int num_lanes = ks_pcie->num_lanes; | |
913 | ||
914 | for (i = 0; i < num_lanes; i++) { | |
915 | ret = phy_init(ks_pcie->phy[i]); | |
916 | if (ret < 0) | |
917 | goto err_phy; | |
918 | ||
919 | ret = phy_power_on(ks_pcie->phy[i]); | |
920 | if (ret < 0) { | |
921 | phy_exit(ks_pcie->phy[i]); | |
922 | goto err_phy; | |
923 | } | |
924 | } | |
0c4ffcfe MK |
925 | |
926 | return 0; | |
49229238 KVA |
927 | |
928 | err_phy: | |
929 | while (--i >= 0) { | |
930 | phy_power_off(ks_pcie->phy[i]); | |
931 | phy_exit(ks_pcie->phy[i]); | |
932 | } | |
933 | ||
934 | return ret; | |
0c4ffcfe MK |
935 | } |
936 | ||
937 | static int __init ks_pcie_probe(struct platform_device *pdev) | |
938 | { | |
939 | struct device *dev = &pdev->dev; | |
49229238 | 940 | struct device_node *np = dev->of_node; |
442ec4c0 | 941 | struct dw_pcie *pci; |
0c4ffcfe | 942 | struct keystone_pcie *ks_pcie; |
49229238 | 943 | struct device_link **link; |
b4f1af83 | 944 | u32 num_viewport; |
49229238 KVA |
945 | struct phy **phy; |
946 | u32 num_lanes; | |
947 | char name[10]; | |
1e9f8dcf | 948 | int ret; |
49229238 | 949 | int i; |
0c4ffcfe | 950 | |
21fa0c51 | 951 | ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); |
66700707 | 952 | if (!ks_pcie) |
0c4ffcfe | 953 | return -ENOMEM; |
66700707 | 954 | |
442ec4c0 KVA |
955 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); |
956 | if (!pci) | |
957 | return -ENOMEM; | |
958 | ||
959 | pci->dev = dev; | |
a1cabd2b | 960 | pci->ops = &ks_pcie_dw_pcie_ops; |
0c4ffcfe | 961 | |
b4f1af83 KVA |
962 | ret = of_property_read_u32(np, "num-viewport", &num_viewport); |
963 | if (ret < 0) { | |
964 | dev_err(dev, "unable to read *num-viewport* property\n"); | |
965 | return ret; | |
966 | } | |
967 | ||
49229238 KVA |
968 | ret = of_property_read_u32(np, "num-lanes", &num_lanes); |
969 | if (ret) | |
970 | num_lanes = 1; | |
c0464062 | 971 | |
49229238 KVA |
972 | phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL); |
973 | if (!phy) | |
974 | return -ENOMEM; | |
25de15c9 | 975 | |
49229238 KVA |
976 | link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL); |
977 | if (!link) | |
978 | return -ENOMEM; | |
979 | ||
980 | for (i = 0; i < num_lanes; i++) { | |
981 | snprintf(name, sizeof(name), "pcie-phy%d", i); | |
982 | phy[i] = devm_phy_optional_get(dev, name); | |
983 | if (IS_ERR(phy[i])) { | |
984 | ret = PTR_ERR(phy[i]); | |
985 | goto err_link; | |
986 | } | |
987 | ||
988 | if (!phy[i]) | |
989 | continue; | |
990 | ||
991 | link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); | |
992 | if (!link[i]) { | |
993 | ret = -EINVAL; | |
994 | goto err_link; | |
995 | } | |
996 | } | |
997 | ||
998 | ks_pcie->np = np; | |
999 | ks_pcie->pci = pci; | |
1000 | ks_pcie->link = link; | |
1001 | ks_pcie->num_lanes = num_lanes; | |
b4f1af83 | 1002 | ks_pcie->num_viewport = num_viewport; |
49229238 KVA |
1003 | ks_pcie->phy = phy; |
1004 | ||
1005 | ret = ks_pcie_enable_phy(ks_pcie); | |
1006 | if (ret) { | |
1007 | dev_err(dev, "failed to enable phy\n"); | |
1008 | goto err_link; | |
0c4ffcfe MK |
1009 | } |
1010 | ||
0c4ffcfe | 1011 | platform_set_drvdata(pdev, ks_pcie); |
8047eb55 KVA |
1012 | pm_runtime_enable(dev); |
1013 | ret = pm_runtime_get_sync(dev); | |
1014 | if (ret < 0) { | |
1015 | dev_err(dev, "pm_runtime_get_sync failed\n"); | |
1016 | goto err_get_sync; | |
0c4ffcfe | 1017 | } |
49229238 | 1018 | |
a1cabd2b | 1019 | ret = ks_pcie_add_pcie_port(ks_pcie, pdev); |
0c4ffcfe | 1020 | if (ret < 0) |
8047eb55 | 1021 | goto err_get_sync; |
0c4ffcfe MK |
1022 | |
1023 | return 0; | |
0c4ffcfe | 1024 | |
8047eb55 KVA |
1025 | err_get_sync: |
1026 | pm_runtime_put(dev); | |
1027 | pm_runtime_disable(dev); | |
49229238 KVA |
1028 | ks_pcie_disable_phy(ks_pcie); |
1029 | ||
1030 | err_link: | |
1031 | while (--i >= 0 && link[i]) | |
1032 | device_link_del(link[i]); | |
1033 | ||
0c4ffcfe MK |
1034 | return ret; |
1035 | } | |
1036 | ||
49229238 KVA |
1037 | static int __exit ks_pcie_remove(struct platform_device *pdev) |
1038 | { | |
1039 | struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); | |
1040 | struct device_link **link = ks_pcie->link; | |
1041 | int num_lanes = ks_pcie->num_lanes; | |
8047eb55 | 1042 | struct device *dev = &pdev->dev; |
49229238 | 1043 | |
8047eb55 KVA |
1044 | pm_runtime_put(dev); |
1045 | pm_runtime_disable(dev); | |
49229238 | 1046 | ks_pcie_disable_phy(ks_pcie); |
49229238 KVA |
1047 | while (num_lanes--) |
1048 | device_link_del(link[num_lanes]); | |
1049 | ||
1050 | return 0; | |
1051 | } | |
1052 | ||
0c4ffcfe MK |
1053 | static struct platform_driver ks_pcie_driver __refdata = { |
1054 | .probe = ks_pcie_probe, | |
1055 | .remove = __exit_p(ks_pcie_remove), | |
1056 | .driver = { | |
1057 | .name = "keystone-pcie", | |
0c4ffcfe MK |
1058 | .of_match_table = of_match_ptr(ks_pcie_of_match), |
1059 | }, | |
1060 | }; | |
1481bf21 | 1061 | builtin_platform_driver(ks_pcie_driver); |