Commit | Line | Data |
---|---|---|
8961def5 ST |
1 | /* |
2 | * PCIe host controller driver for Xilinx AXI PCIe Bridge | |
3 | * | |
4 | * Copyright (c) 2012 - 2014 Xilinx, Inc. | |
5 | * | |
6 | * Based on the Tegra PCIe driver | |
7 | * | |
8 | * Bits taken from Synopsys Designware Host controller driver and | |
9 | * ARM PCI Host generic driver. | |
10 | * | |
11 | * This program is free software: you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License as published by | |
13 | * the Free Software Foundation, either version 2 of the License, or | |
14 | * (at your option) any later version. | |
15 | */ | |
16 | ||
17 | #include <linux/interrupt.h> | |
18 | #include <linux/irq.h> | |
19 | #include <linux/irqdomain.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/msi.h> | |
23 | #include <linux/of_address.h> | |
24 | #include <linux/of_pci.h> | |
25 | #include <linux/of_platform.h> | |
26 | #include <linux/of_irq.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/platform_device.h> | |
29 | ||
30 | /* Register definitions */ | |
31 | #define XILINX_PCIE_REG_BIR 0x00000130 | |
32 | #define XILINX_PCIE_REG_IDR 0x00000138 | |
33 | #define XILINX_PCIE_REG_IMR 0x0000013c | |
34 | #define XILINX_PCIE_REG_PSCR 0x00000144 | |
35 | #define XILINX_PCIE_REG_RPSC 0x00000148 | |
36 | #define XILINX_PCIE_REG_MSIBASE1 0x0000014c | |
37 | #define XILINX_PCIE_REG_MSIBASE2 0x00000150 | |
38 | #define XILINX_PCIE_REG_RPEFR 0x00000154 | |
39 | #define XILINX_PCIE_REG_RPIFR1 0x00000158 | |
40 | #define XILINX_PCIE_REG_RPIFR2 0x0000015c | |
41 | ||
42 | /* Interrupt registers definitions */ | |
43 | #define XILINX_PCIE_INTR_LINK_DOWN BIT(0) | |
44 | #define XILINX_PCIE_INTR_ECRC_ERR BIT(1) | |
45 | #define XILINX_PCIE_INTR_STR_ERR BIT(2) | |
46 | #define XILINX_PCIE_INTR_HOT_RESET BIT(3) | |
47 | #define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8) | |
48 | #define XILINX_PCIE_INTR_CORRECTABLE BIT(9) | |
49 | #define XILINX_PCIE_INTR_NONFATAL BIT(10) | |
50 | #define XILINX_PCIE_INTR_FATAL BIT(11) | |
51 | #define XILINX_PCIE_INTR_INTX BIT(16) | |
52 | #define XILINX_PCIE_INTR_MSI BIT(17) | |
53 | #define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20) | |
54 | #define XILINX_PCIE_INTR_SLV_UNEXP BIT(21) | |
55 | #define XILINX_PCIE_INTR_SLV_COMPL BIT(22) | |
56 | #define XILINX_PCIE_INTR_SLV_ERRP BIT(23) | |
57 | #define XILINX_PCIE_INTR_SLV_CMPABT BIT(24) | |
58 | #define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25) | |
59 | #define XILINX_PCIE_INTR_MST_DECERR BIT(26) | |
60 | #define XILINX_PCIE_INTR_MST_SLVERR BIT(27) | |
61 | #define XILINX_PCIE_INTR_MST_ERRP BIT(28) | |
62 | #define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED | |
63 | #define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF | |
64 | ||
65 | /* Root Port Error FIFO Read Register definitions */ | |
66 | #define XILINX_PCIE_RPEFR_ERR_VALID BIT(18) | |
67 | #define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0) | |
68 | #define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF | |
69 | ||
70 | /* Root Port Interrupt FIFO Read Register 1 definitions */ | |
71 | #define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31) | |
72 | #define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30) | |
73 | #define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27) | |
74 | #define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF | |
75 | #define XILINX_PCIE_RPIFR1_INTR_SHIFT 27 | |
76 | ||
77 | /* Bridge Info Register definitions */ | |
78 | #define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16) | |
79 | #define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16 | |
80 | ||
81 | /* Root Port Interrupt FIFO Read Register 2 definitions */ | |
82 | #define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0) | |
83 | ||
84 | /* Root Port Status/control Register definitions */ | |
85 | #define XILINX_PCIE_REG_RPSC_BEN BIT(0) | |
86 | ||
87 | /* Phy Status/Control Register definitions */ | |
88 | #define XILINX_PCIE_REG_PSCR_LNKUP BIT(11) | |
89 | ||
90 | /* ECAM definitions */ | |
91 | #define ECAM_BUS_NUM_SHIFT 20 | |
92 | #define ECAM_DEV_NUM_SHIFT 12 | |
93 | ||
94 | /* Number of MSI IRQs */ | |
95 | #define XILINX_NUM_MSI_IRQS 128 | |
96 | ||
97 | /* Number of Memory Resources */ | |
98 | #define XILINX_MAX_NUM_RESOURCES 3 | |
99 | ||
100 | /** | |
101 | * struct xilinx_pcie_port - PCIe port information | |
102 | * @reg_base: IO Mapped Register Base | |
103 | * @irq: Interrupt number | |
104 | * @msi_pages: MSI pages | |
105 | * @root_busno: Root Bus number | |
106 | * @dev: Device pointer | |
107 | * @irq_domain: IRQ domain pointer | |
108 | * @bus_range: Bus range | |
109 | * @resources: Bus Resources | |
110 | */ | |
111 | struct xilinx_pcie_port { | |
112 | void __iomem *reg_base; | |
113 | u32 irq; | |
114 | unsigned long msi_pages; | |
115 | u8 root_busno; | |
116 | struct device *dev; | |
117 | struct irq_domain *irq_domain; | |
118 | struct resource bus_range; | |
119 | struct list_head resources; | |
120 | }; | |
121 | ||
122 | static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS); | |
123 | ||
124 | static inline struct xilinx_pcie_port *sys_to_pcie(struct pci_sys_data *sys) | |
125 | { | |
126 | return sys->private_data; | |
127 | } | |
128 | ||
129 | static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg) | |
130 | { | |
131 | return readl(port->reg_base + reg); | |
132 | } | |
133 | ||
134 | static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg) | |
135 | { | |
136 | writel(val, port->reg_base + reg); | |
137 | } | |
138 | ||
139 | static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port) | |
140 | { | |
141 | return (pcie_read(port, XILINX_PCIE_REG_PSCR) & | |
142 | XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0; | |
143 | } | |
144 | ||
145 | /** | |
146 | * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts | |
147 | * @port: PCIe port information | |
148 | */ | |
149 | static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) | |
150 | { | |
151 | u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR); | |
152 | ||
153 | if (val & XILINX_PCIE_RPEFR_ERR_VALID) { | |
154 | dev_dbg(port->dev, "Requester ID %d\n", | |
155 | val & XILINX_PCIE_RPEFR_REQ_ID); | |
156 | pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, | |
157 | XILINX_PCIE_REG_RPEFR); | |
158 | } | |
159 | } | |
160 | ||
161 | /** | |
162 | * xilinx_pcie_valid_device - Check if a valid device is present on bus | |
163 | * @bus: PCI Bus structure | |
164 | * @devfn: device/function | |
165 | * | |
166 | * Return: 'true' on success and 'false' if invalid device is found | |
167 | */ | |
168 | static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) | |
169 | { | |
170 | struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); | |
171 | ||
172 | /* Check if link is up when trying to access downstream ports */ | |
173 | if (bus->number != port->root_busno) | |
174 | if (!xilinx_pcie_link_is_up(port)) | |
175 | return false; | |
176 | ||
177 | /* Only one device down on each root port */ | |
178 | if (bus->number == port->root_busno && devfn > 0) | |
179 | return false; | |
180 | ||
181 | /* | |
182 | * Do not read more than one device on the bus directly attached | |
183 | * to RC. | |
184 | */ | |
185 | if (bus->primary == port->root_busno && devfn > 0) | |
186 | return false; | |
187 | ||
188 | return true; | |
189 | } | |
190 | ||
191 | /** | |
192 | * xilinx_pcie_config_base - Get configuration base | |
193 | * @bus: PCI Bus structure | |
194 | * @devfn: Device/function | |
195 | * @where: Offset from base | |
196 | * | |
197 | * Return: Base address of the configuration space needed to be | |
198 | * accessed. | |
199 | */ | |
200 | static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus, | |
201 | unsigned int devfn, int where) | |
202 | { | |
203 | struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); | |
204 | int relbus; | |
205 | ||
206 | relbus = (bus->number << ECAM_BUS_NUM_SHIFT) | | |
207 | (devfn << ECAM_DEV_NUM_SHIFT); | |
208 | ||
209 | return port->reg_base + relbus + where; | |
210 | } | |
211 | ||
212 | /** | |
213 | * xilinx_pcie_read_config - Read configuration space | |
214 | * @bus: PCI Bus structure | |
215 | * @devfn: Device/function | |
216 | * @where: Offset from base | |
217 | * @size: Byte/word/dword | |
218 | * @val: Value to be read | |
219 | * | |
220 | * Return: PCIBIOS_SUCCESSFUL on success | |
221 | * PCIBIOS_DEVICE_NOT_FOUND on failure | |
222 | */ | |
223 | static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn, | |
224 | int where, int size, u32 *val) | |
225 | { | |
226 | void __iomem *addr; | |
227 | ||
228 | if (!xilinx_pcie_valid_device(bus, devfn)) { | |
229 | *val = 0xFFFFFFFF; | |
230 | return PCIBIOS_DEVICE_NOT_FOUND; | |
231 | } | |
232 | ||
233 | addr = xilinx_pcie_config_base(bus, devfn, where); | |
234 | ||
235 | switch (size) { | |
236 | case 1: | |
237 | *val = readb(addr); | |
238 | break; | |
239 | case 2: | |
240 | *val = readw(addr); | |
241 | break; | |
242 | default: | |
243 | *val = readl(addr); | |
244 | break; | |
245 | } | |
246 | ||
247 | return PCIBIOS_SUCCESSFUL; | |
248 | } | |
249 | ||
250 | /** | |
251 | * xilinx_pcie_write_config - Write configuration space | |
252 | * @bus: PCI Bus structure | |
253 | * @devfn: Device/function | |
254 | * @where: Offset from base | |
255 | * @size: Byte/word/dword | |
256 | * @val: Value to be written to device | |
257 | * | |
258 | * Return: PCIBIOS_SUCCESSFUL on success | |
259 | * PCIBIOS_DEVICE_NOT_FOUND on failure | |
260 | */ | |
261 | static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, | |
262 | int where, int size, u32 val) | |
263 | { | |
264 | void __iomem *addr; | |
265 | ||
266 | if (!xilinx_pcie_valid_device(bus, devfn)) | |
267 | return PCIBIOS_DEVICE_NOT_FOUND; | |
268 | ||
269 | addr = xilinx_pcie_config_base(bus, devfn, where); | |
270 | ||
271 | switch (size) { | |
272 | case 1: | |
273 | writeb(val, addr); | |
274 | break; | |
275 | case 2: | |
276 | writew(val, addr); | |
277 | break; | |
278 | default: | |
279 | writel(val, addr); | |
280 | break; | |
281 | } | |
282 | ||
283 | return PCIBIOS_SUCCESSFUL; | |
284 | } | |
285 | ||
286 | /* PCIe operations */ | |
287 | static struct pci_ops xilinx_pcie_ops = { | |
288 | .read = xilinx_pcie_read_config, | |
289 | .write = xilinx_pcie_write_config, | |
290 | }; | |
291 | ||
292 | /* MSI functions */ | |
293 | ||
294 | /** | |
295 | * xilinx_pcie_destroy_msi - Free MSI number | |
296 | * @irq: IRQ to be freed | |
297 | */ | |
298 | static void xilinx_pcie_destroy_msi(unsigned int irq) | |
299 | { | |
300 | struct irq_desc *desc; | |
301 | struct msi_desc *msi; | |
302 | struct xilinx_pcie_port *port; | |
303 | ||
304 | desc = irq_to_desc(irq); | |
305 | msi = irq_desc_get_msi_desc(desc); | |
306 | port = sys_to_pcie(msi->dev->bus->sysdata); | |
307 | ||
308 | if (!test_bit(irq, msi_irq_in_use)) | |
309 | dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); | |
310 | else | |
311 | clear_bit(irq, msi_irq_in_use); | |
312 | } | |
313 | ||
314 | /** | |
315 | * xilinx_pcie_assign_msi - Allocate MSI number | |
316 | * @port: PCIe port structure | |
317 | * | |
318 | * Return: A valid IRQ on success and error value on failure. | |
319 | */ | |
320 | static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port) | |
321 | { | |
322 | int pos; | |
323 | ||
324 | pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS); | |
325 | if (pos < XILINX_NUM_MSI_IRQS) | |
326 | set_bit(pos, msi_irq_in_use); | |
327 | else | |
328 | return -ENOSPC; | |
329 | ||
330 | return pos; | |
331 | } | |
332 | ||
333 | /** | |
334 | * xilinx_msi_teardown_irq - Destroy the MSI | |
335 | * @chip: MSI Chip descriptor | |
336 | * @irq: MSI IRQ to destroy | |
337 | */ | |
c2791b80 YW |
338 | static void xilinx_msi_teardown_irq(struct msi_controller *chip, |
339 | unsigned int irq) | |
8961def5 ST |
340 | { |
341 | xilinx_pcie_destroy_msi(irq); | |
342 | } | |
343 | ||
344 | /** | |
345 | * xilinx_pcie_msi_setup_irq - Setup MSI request | |
346 | * @chip: MSI chip pointer | |
347 | * @pdev: PCIe device pointer | |
348 | * @desc: MSI descriptor pointer | |
349 | * | |
350 | * Return: '0' on success and error value on failure | |
351 | */ | |
c2791b80 | 352 | static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip, |
8961def5 ST |
353 | struct pci_dev *pdev, |
354 | struct msi_desc *desc) | |
355 | { | |
356 | struct xilinx_pcie_port *port = sys_to_pcie(pdev->bus->sysdata); | |
357 | unsigned int irq; | |
358 | int hwirq; | |
359 | struct msi_msg msg; | |
360 | phys_addr_t msg_addr; | |
361 | ||
362 | hwirq = xilinx_pcie_assign_msi(port); | |
f9dd0ce6 DC |
363 | if (hwirq < 0) |
364 | return hwirq; | |
8961def5 ST |
365 | |
366 | irq = irq_create_mapping(port->irq_domain, hwirq); | |
367 | if (!irq) | |
368 | return -EINVAL; | |
369 | ||
370 | irq_set_msi_desc(irq, desc); | |
371 | ||
372 | msg_addr = virt_to_phys((void *)port->msi_pages); | |
373 | ||
374 | msg.address_hi = 0; | |
375 | msg.address_lo = msg_addr; | |
376 | msg.data = irq; | |
377 | ||
83a18912 | 378 | pci_write_msi_msg(irq, &msg); |
8961def5 ST |
379 | |
380 | return 0; | |
381 | } | |
382 | ||
383 | /* MSI Chip Descriptor */ | |
c2791b80 | 384 | static struct msi_controller xilinx_pcie_msi_chip = { |
8961def5 ST |
385 | .setup_irq = xilinx_pcie_msi_setup_irq, |
386 | .teardown_irq = xilinx_msi_teardown_irq, | |
387 | }; | |
388 | ||
389 | /* HW Interrupt Chip Descriptor */ | |
390 | static struct irq_chip xilinx_msi_irq_chip = { | |
391 | .name = "Xilinx PCIe MSI", | |
392 | .irq_enable = unmask_msi_irq, | |
393 | .irq_disable = mask_msi_irq, | |
394 | .irq_mask = mask_msi_irq, | |
395 | .irq_unmask = unmask_msi_irq, | |
396 | }; | |
397 | ||
398 | /** | |
399 | * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid | |
400 | * @domain: IRQ domain | |
401 | * @irq: Virtual IRQ number | |
402 | * @hwirq: HW interrupt number | |
403 | * | |
404 | * Return: Always returns 0. | |
405 | */ | |
406 | static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq, | |
407 | irq_hw_number_t hwirq) | |
408 | { | |
409 | irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq); | |
410 | irq_set_chip_data(irq, domain->host_data); | |
411 | set_irq_flags(irq, IRQF_VALID); | |
412 | ||
413 | return 0; | |
414 | } | |
415 | ||
416 | /* IRQ Domain operations */ | |
417 | static const struct irq_domain_ops msi_domain_ops = { | |
418 | .map = xilinx_pcie_msi_map, | |
419 | }; | |
420 | ||
421 | /** | |
422 | * xilinx_pcie_enable_msi - Enable MSI support | |
423 | * @port: PCIe port information | |
424 | */ | |
425 | static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) | |
426 | { | |
427 | phys_addr_t msg_addr; | |
428 | ||
429 | port->msi_pages = __get_free_pages(GFP_KERNEL, 0); | |
430 | msg_addr = virt_to_phys((void *)port->msi_pages); | |
431 | pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); | |
432 | pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); | |
433 | } | |
434 | ||
8961def5 ST |
435 | /* INTx Functions */ |
436 | ||
437 | /** | |
438 | * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid | |
439 | * @domain: IRQ domain | |
440 | * @irq: Virtual IRQ number | |
441 | * @hwirq: HW interrupt number | |
442 | * | |
443 | * Return: Always returns 0. | |
444 | */ | |
445 | static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, | |
446 | irq_hw_number_t hwirq) | |
447 | { | |
448 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | |
449 | irq_set_chip_data(irq, domain->host_data); | |
450 | set_irq_flags(irq, IRQF_VALID); | |
451 | ||
452 | return 0; | |
453 | } | |
454 | ||
455 | /* INTx IRQ Domain operations */ | |
456 | static const struct irq_domain_ops intx_domain_ops = { | |
457 | .map = xilinx_pcie_intx_map, | |
458 | }; | |
459 | ||
460 | /* PCIe HW Functions */ | |
461 | ||
462 | /** | |
463 | * xilinx_pcie_intr_handler - Interrupt Service Handler | |
464 | * @irq: IRQ number | |
465 | * @data: PCIe port information | |
466 | * | |
467 | * Return: IRQ_HANDLED on success and IRQ_NONE on failure | |
468 | */ | |
469 | static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) | |
470 | { | |
471 | struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data; | |
472 | u32 val, mask, status, msi_data; | |
473 | ||
474 | /* Read interrupt decode and mask registers */ | |
475 | val = pcie_read(port, XILINX_PCIE_REG_IDR); | |
476 | mask = pcie_read(port, XILINX_PCIE_REG_IMR); | |
477 | ||
478 | status = val & mask; | |
479 | if (!status) | |
480 | return IRQ_NONE; | |
481 | ||
482 | if (status & XILINX_PCIE_INTR_LINK_DOWN) | |
483 | dev_warn(port->dev, "Link Down\n"); | |
484 | ||
485 | if (status & XILINX_PCIE_INTR_ECRC_ERR) | |
486 | dev_warn(port->dev, "ECRC failed\n"); | |
487 | ||
488 | if (status & XILINX_PCIE_INTR_STR_ERR) | |
489 | dev_warn(port->dev, "Streaming error\n"); | |
490 | ||
491 | if (status & XILINX_PCIE_INTR_HOT_RESET) | |
492 | dev_info(port->dev, "Hot reset\n"); | |
493 | ||
494 | if (status & XILINX_PCIE_INTR_CFG_TIMEOUT) | |
495 | dev_warn(port->dev, "ECAM access timeout\n"); | |
496 | ||
497 | if (status & XILINX_PCIE_INTR_CORRECTABLE) { | |
498 | dev_warn(port->dev, "Correctable error message\n"); | |
499 | xilinx_pcie_clear_err_interrupts(port); | |
500 | } | |
501 | ||
502 | if (status & XILINX_PCIE_INTR_NONFATAL) { | |
503 | dev_warn(port->dev, "Non fatal error message\n"); | |
504 | xilinx_pcie_clear_err_interrupts(port); | |
505 | } | |
506 | ||
507 | if (status & XILINX_PCIE_INTR_FATAL) { | |
508 | dev_warn(port->dev, "Fatal error message\n"); | |
509 | xilinx_pcie_clear_err_interrupts(port); | |
510 | } | |
511 | ||
512 | if (status & XILINX_PCIE_INTR_INTX) { | |
513 | /* INTx interrupt received */ | |
514 | val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); | |
515 | ||
516 | /* Check whether interrupt valid */ | |
517 | if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { | |
518 | dev_warn(port->dev, "RP Intr FIFO1 read error\n"); | |
519 | return IRQ_HANDLED; | |
520 | } | |
521 | ||
522 | /* Clear interrupt FIFO register 1 */ | |
523 | pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, | |
524 | XILINX_PCIE_REG_RPIFR1); | |
525 | ||
526 | /* Handle INTx Interrupt */ | |
527 | val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >> | |
528 | XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1; | |
529 | generic_handle_irq(irq_find_mapping(port->irq_domain, val)); | |
530 | } | |
531 | ||
532 | if (status & XILINX_PCIE_INTR_MSI) { | |
533 | /* MSI Interrupt */ | |
534 | val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); | |
535 | ||
536 | if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { | |
537 | dev_warn(port->dev, "RP Intr FIFO1 read error\n"); | |
538 | return IRQ_HANDLED; | |
539 | } | |
540 | ||
541 | if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { | |
542 | msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) & | |
543 | XILINX_PCIE_RPIFR2_MSG_DATA; | |
544 | ||
545 | /* Clear interrupt FIFO register 1 */ | |
546 | pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, | |
547 | XILINX_PCIE_REG_RPIFR1); | |
548 | ||
549 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
550 | /* Handle MSI Interrupt */ | |
551 | generic_handle_irq(msi_data); | |
552 | } | |
553 | } | |
554 | } | |
555 | ||
556 | if (status & XILINX_PCIE_INTR_SLV_UNSUPP) | |
557 | dev_warn(port->dev, "Slave unsupported request\n"); | |
558 | ||
559 | if (status & XILINX_PCIE_INTR_SLV_UNEXP) | |
560 | dev_warn(port->dev, "Slave unexpected completion\n"); | |
561 | ||
562 | if (status & XILINX_PCIE_INTR_SLV_COMPL) | |
563 | dev_warn(port->dev, "Slave completion timeout\n"); | |
564 | ||
565 | if (status & XILINX_PCIE_INTR_SLV_ERRP) | |
566 | dev_warn(port->dev, "Slave Error Poison\n"); | |
567 | ||
568 | if (status & XILINX_PCIE_INTR_SLV_CMPABT) | |
569 | dev_warn(port->dev, "Slave Completer Abort\n"); | |
570 | ||
571 | if (status & XILINX_PCIE_INTR_SLV_ILLBUR) | |
572 | dev_warn(port->dev, "Slave Illegal Burst\n"); | |
573 | ||
574 | if (status & XILINX_PCIE_INTR_MST_DECERR) | |
575 | dev_warn(port->dev, "Master decode error\n"); | |
576 | ||
577 | if (status & XILINX_PCIE_INTR_MST_SLVERR) | |
578 | dev_warn(port->dev, "Master slave error\n"); | |
579 | ||
580 | if (status & XILINX_PCIE_INTR_MST_ERRP) | |
581 | dev_warn(port->dev, "Master error poison\n"); | |
582 | ||
583 | /* Clear the Interrupt Decode register */ | |
584 | pcie_write(port, status, XILINX_PCIE_REG_IDR); | |
585 | ||
586 | return IRQ_HANDLED; | |
587 | } | |
588 | ||
589 | /** | |
590 | * xilinx_pcie_free_irq_domain - Free IRQ domain | |
591 | * @port: PCIe port information | |
592 | */ | |
593 | static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port) | |
594 | { | |
595 | int i; | |
596 | u32 irq, num_irqs; | |
597 | ||
598 | /* Free IRQ Domain */ | |
599 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
600 | ||
601 | free_pages(port->msi_pages, 0); | |
602 | ||
603 | num_irqs = XILINX_NUM_MSI_IRQS; | |
604 | } else { | |
605 | /* INTx */ | |
606 | num_irqs = 4; | |
607 | } | |
608 | ||
609 | for (i = 0; i < num_irqs; i++) { | |
610 | irq = irq_find_mapping(port->irq_domain, i); | |
611 | if (irq > 0) | |
612 | irq_dispose_mapping(irq); | |
613 | } | |
614 | ||
615 | irq_domain_remove(port->irq_domain); | |
616 | } | |
617 | ||
618 | /** | |
619 | * xilinx_pcie_init_irq_domain - Initialize IRQ domain | |
620 | * @port: PCIe port information | |
621 | * | |
622 | * Return: '0' on success and error value on failure | |
623 | */ | |
624 | static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) | |
625 | { | |
626 | struct device *dev = port->dev; | |
627 | struct device_node *node = dev->of_node; | |
628 | struct device_node *pcie_intc_node; | |
629 | ||
630 | /* Setup INTx */ | |
631 | pcie_intc_node = of_get_next_child(node, NULL); | |
632 | if (!pcie_intc_node) { | |
633 | dev_err(dev, "No PCIe Intc node found\n"); | |
634 | return PTR_ERR(pcie_intc_node); | |
635 | } | |
636 | ||
637 | port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, | |
638 | &intx_domain_ops, | |
639 | port); | |
640 | if (!port->irq_domain) { | |
641 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | |
642 | return PTR_ERR(port->irq_domain); | |
643 | } | |
644 | ||
645 | /* Setup MSI */ | |
646 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
647 | port->irq_domain = irq_domain_add_linear(node, | |
648 | XILINX_NUM_MSI_IRQS, | |
649 | &msi_domain_ops, | |
650 | &xilinx_pcie_msi_chip); | |
651 | if (!port->irq_domain) { | |
652 | dev_err(dev, "Failed to get a MSI IRQ domain\n"); | |
653 | return PTR_ERR(port->irq_domain); | |
654 | } | |
655 | ||
656 | xilinx_pcie_enable_msi(port); | |
657 | } | |
658 | ||
659 | return 0; | |
660 | } | |
661 | ||
662 | /** | |
663 | * xilinx_pcie_init_port - Initialize hardware | |
664 | * @port: PCIe port information | |
665 | */ | |
666 | static void xilinx_pcie_init_port(struct xilinx_pcie_port *port) | |
667 | { | |
668 | if (xilinx_pcie_link_is_up(port)) | |
669 | dev_info(port->dev, "PCIe Link is UP\n"); | |
670 | else | |
671 | dev_info(port->dev, "PCIe Link is DOWN\n"); | |
672 | ||
673 | /* Disable all interrupts */ | |
674 | pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK, | |
675 | XILINX_PCIE_REG_IMR); | |
676 | ||
677 | /* Clear pending interrupts */ | |
678 | pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) & | |
679 | XILINX_PCIE_IMR_ALL_MASK, | |
680 | XILINX_PCIE_REG_IDR); | |
681 | ||
682 | /* Enable all interrupts */ | |
683 | pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR); | |
684 | ||
685 | /* Enable the Bridge enable bit */ | |
686 | pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) | | |
687 | XILINX_PCIE_REG_RPSC_BEN, | |
688 | XILINX_PCIE_REG_RPSC); | |
689 | } | |
690 | ||
691 | /** | |
692 | * xilinx_pcie_setup - Setup memory resources | |
693 | * @nr: Bus number | |
694 | * @sys: Per controller structure | |
695 | * | |
696 | * Return: '1' on success and error value on failure | |
697 | */ | |
698 | static int xilinx_pcie_setup(int nr, struct pci_sys_data *sys) | |
699 | { | |
700 | struct xilinx_pcie_port *port = sys_to_pcie(sys); | |
701 | ||
702 | list_splice_init(&port->resources, &sys->resources); | |
703 | ||
704 | return 1; | |
705 | } | |
706 | ||
707 | /** | |
708 | * xilinx_pcie_scan_bus - Scan PCIe bus for devices | |
709 | * @nr: Bus number | |
710 | * @sys: Per controller structure | |
711 | * | |
712 | * Return: Valid Bus pointer on success and NULL on failure | |
713 | */ | |
714 | static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys) | |
715 | { | |
716 | struct xilinx_pcie_port *port = sys_to_pcie(sys); | |
717 | struct pci_bus *bus; | |
718 | ||
719 | port->root_busno = sys->busnr; | |
720 | bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops, | |
721 | sys, &sys->resources); | |
722 | ||
723 | return bus; | |
724 | } | |
725 | ||
726 | /** | |
727 | * xilinx_pcie_parse_and_add_res - Add resources by parsing ranges | |
728 | * @port: PCIe port information | |
729 | * | |
730 | * Return: '0' on success and error value on failure | |
731 | */ | |
732 | static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port) | |
733 | { | |
734 | struct device *dev = port->dev; | |
735 | struct device_node *node = dev->of_node; | |
736 | struct resource *mem; | |
737 | resource_size_t offset; | |
738 | struct of_pci_range_parser parser; | |
739 | struct of_pci_range range; | |
740 | struct pci_host_bridge_window *win; | |
741 | int err = 0, mem_resno = 0; | |
742 | ||
743 | /* Get the ranges */ | |
744 | if (of_pci_range_parser_init(&parser, node)) { | |
745 | dev_err(dev, "missing \"ranges\" property\n"); | |
746 | return -EINVAL; | |
747 | } | |
748 | ||
749 | /* Parse the ranges and add the resources found to the list */ | |
750 | for_each_of_pci_range(&parser, &range) { | |
751 | ||
752 | if (mem_resno >= XILINX_MAX_NUM_RESOURCES) { | |
753 | dev_err(dev, "Maximum memory resources exceeded\n"); | |
754 | return -EINVAL; | |
755 | } | |
756 | ||
757 | mem = devm_kmalloc(dev, sizeof(*mem), GFP_KERNEL); | |
758 | if (!mem) { | |
759 | err = -ENOMEM; | |
760 | goto free_resources; | |
761 | } | |
762 | ||
763 | of_pci_range_to_resource(&range, node, mem); | |
764 | ||
765 | switch (mem->flags & IORESOURCE_TYPE_BITS) { | |
766 | case IORESOURCE_MEM: | |
767 | offset = range.cpu_addr - range.pci_addr; | |
768 | mem_resno++; | |
769 | break; | |
770 | default: | |
771 | err = -EINVAL; | |
772 | break; | |
773 | } | |
774 | ||
775 | if (err < 0) { | |
776 | dev_warn(dev, "Invalid resource found %pR\n", mem); | |
777 | continue; | |
778 | } | |
779 | ||
780 | err = request_resource(&iomem_resource, mem); | |
781 | if (err) | |
782 | goto free_resources; | |
783 | ||
784 | pci_add_resource_offset(&port->resources, mem, offset); | |
785 | } | |
786 | ||
787 | /* Get the bus range */ | |
788 | if (of_pci_parse_bus_range(node, &port->bus_range)) { | |
789 | u32 val = pcie_read(port, XILINX_PCIE_REG_BIR); | |
790 | u8 last; | |
791 | ||
792 | last = (val & XILINX_PCIE_BIR_ECAM_SZ_MASK) >> | |
793 | XILINX_PCIE_BIR_ECAM_SZ_SHIFT; | |
794 | ||
795 | port->bus_range = (struct resource) { | |
796 | .name = node->name, | |
797 | .start = 0, | |
798 | .end = last, | |
799 | .flags = IORESOURCE_BUS, | |
800 | }; | |
801 | } | |
802 | ||
803 | /* Register bus resource */ | |
804 | pci_add_resource(&port->resources, &port->bus_range); | |
805 | ||
806 | return 0; | |
807 | ||
808 | free_resources: | |
809 | release_child_resources(&iomem_resource); | |
810 | list_for_each_entry(win, &port->resources, list) | |
811 | devm_kfree(dev, win->res); | |
812 | pci_free_resource_list(&port->resources); | |
813 | ||
814 | return err; | |
815 | } | |
816 | ||
817 | /** | |
818 | * xilinx_pcie_parse_dt - Parse Device tree | |
819 | * @port: PCIe port information | |
820 | * | |
821 | * Return: '0' on success and error value on failure | |
822 | */ | |
823 | static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) | |
824 | { | |
825 | struct device *dev = port->dev; | |
826 | struct device_node *node = dev->of_node; | |
827 | struct resource regs; | |
828 | const char *type; | |
829 | int err; | |
830 | ||
831 | type = of_get_property(node, "device_type", NULL); | |
832 | if (!type || strcmp(type, "pci")) { | |
833 | dev_err(dev, "invalid \"device_type\" %s\n", type); | |
834 | return -EINVAL; | |
835 | } | |
836 | ||
837 | err = of_address_to_resource(node, 0, ®s); | |
838 | if (err) { | |
839 | dev_err(dev, "missing \"reg\" property\n"); | |
840 | return err; | |
841 | } | |
842 | ||
843 | port->reg_base = devm_ioremap_resource(dev, ®s); | |
844 | if (IS_ERR(port->reg_base)) | |
845 | return PTR_ERR(port->reg_base); | |
846 | ||
847 | port->irq = irq_of_parse_and_map(node, 0); | |
848 | err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler, | |
849 | IRQF_SHARED, "xilinx-pcie", port); | |
850 | if (err) { | |
851 | dev_err(dev, "unable to request irq %d\n", port->irq); | |
852 | return err; | |
853 | } | |
854 | ||
855 | return 0; | |
856 | } | |
857 | ||
858 | /** | |
859 | * xilinx_pcie_probe - Probe function | |
860 | * @pdev: Platform device pointer | |
861 | * | |
862 | * Return: '0' on success and error value on failure | |
863 | */ | |
864 | static int xilinx_pcie_probe(struct platform_device *pdev) | |
865 | { | |
866 | struct xilinx_pcie_port *port; | |
867 | struct hw_pci hw; | |
868 | struct device *dev = &pdev->dev; | |
869 | int err; | |
870 | ||
871 | if (!dev->of_node) | |
872 | return -ENODEV; | |
873 | ||
874 | port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); | |
875 | if (!port) | |
876 | return -ENOMEM; | |
877 | ||
878 | port->dev = dev; | |
879 | ||
880 | err = xilinx_pcie_parse_dt(port); | |
881 | if (err) { | |
882 | dev_err(dev, "Parsing DT failed\n"); | |
883 | return err; | |
884 | } | |
885 | ||
886 | xilinx_pcie_init_port(port); | |
887 | ||
888 | err = xilinx_pcie_init_irq_domain(port); | |
889 | if (err) { | |
890 | dev_err(dev, "Failed creating IRQ Domain\n"); | |
891 | return err; | |
892 | } | |
893 | ||
894 | /* | |
895 | * Parse PCI ranges, configuration bus range and | |
896 | * request their resources | |
897 | */ | |
898 | INIT_LIST_HEAD(&port->resources); | |
899 | err = xilinx_pcie_parse_and_add_res(port); | |
900 | if (err) { | |
901 | dev_err(dev, "Failed adding resources\n"); | |
902 | return err; | |
903 | } | |
904 | ||
905 | platform_set_drvdata(pdev, port); | |
906 | ||
907 | /* Register the device */ | |
908 | memset(&hw, 0, sizeof(hw)); | |
909 | hw = (struct hw_pci) { | |
910 | .nr_controllers = 1, | |
911 | .private_data = (void **)&port, | |
912 | .setup = xilinx_pcie_setup, | |
913 | .map_irq = of_irq_parse_and_map_pci, | |
8961def5 ST |
914 | .scan = xilinx_pcie_scan_bus, |
915 | .ops = &xilinx_pcie_ops, | |
916 | }; | |
8dd26dc8 YW |
917 | |
918 | #ifdef CONFIG_PCI_MSI | |
919 | xilinx_pcie_msi_chip.dev = port->dev; | |
920 | hw.msi_ctrl = &xilinx_pcie_msi_chip; | |
921 | #endif | |
8961def5 ST |
922 | pci_common_init_dev(dev, &hw); |
923 | ||
924 | return 0; | |
925 | } | |
926 | ||
927 | /** | |
928 | * xilinx_pcie_remove - Remove function | |
929 | * @pdev: Platform device pointer | |
930 | * | |
931 | * Return: '0' always | |
932 | */ | |
933 | static int xilinx_pcie_remove(struct platform_device *pdev) | |
934 | { | |
935 | struct xilinx_pcie_port *port = platform_get_drvdata(pdev); | |
936 | ||
937 | xilinx_pcie_free_irq_domain(port); | |
938 | ||
939 | return 0; | |
940 | } | |
941 | ||
942 | static struct of_device_id xilinx_pcie_of_match[] = { | |
943 | { .compatible = "xlnx,axi-pcie-host-1.00.a", }, | |
944 | {} | |
945 | }; | |
946 | ||
947 | static struct platform_driver xilinx_pcie_driver = { | |
948 | .driver = { | |
949 | .name = "xilinx-pcie", | |
950 | .owner = THIS_MODULE, | |
951 | .of_match_table = xilinx_pcie_of_match, | |
952 | .suppress_bind_attrs = true, | |
953 | }, | |
954 | .probe = xilinx_pcie_probe, | |
955 | .remove = xilinx_pcie_remove, | |
956 | }; | |
957 | module_platform_driver(xilinx_pcie_driver); | |
958 | ||
959 | MODULE_AUTHOR("Xilinx Inc"); | |
960 | MODULE_DESCRIPTION("Xilinx AXI PCIe driver"); | |
961 | MODULE_LICENSE("GPL v2"); |