Commit | Line | Data |
---|---|---|
8cfab3cf | 1 | // SPDX-License-Identifier: GPL-2.0 |
feb85d9b | 2 | /* |
96291d56 | 3 | * Synopsys DesignWare PCIe host controller driver |
feb85d9b KVA |
4 | * |
5 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | |
6 | * http://www.samsung.com | |
7 | * | |
8 | * Author: Jingoo Han <jg1.han@samsung.com> | |
feb85d9b KVA |
9 | */ |
10 | ||
11 | #include <linux/irqdomain.h> | |
12 | #include <linux/of_address.h> | |
13 | #include <linux/of_pci.h> | |
14 | #include <linux/pci_regs.h> | |
15 | #include <linux/platform_device.h> | |
16 | ||
17 | #include "pcie-designware.h" | |
18 | ||
19 | static struct pci_ops dw_pcie_ops; | |
20 | ||
21 | static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | |
22 | u32 *val) | |
23 | { | |
24 | struct dw_pcie *pci; | |
25 | ||
26 | if (pp->ops->rd_own_conf) | |
27 | return pp->ops->rd_own_conf(pp, where, size, val); | |
28 | ||
29 | pci = to_dw_pcie_from_pp(pp); | |
30 | return dw_pcie_read(pci->dbi_base + where, size, val); | |
31 | } | |
32 | ||
33 | static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, | |
34 | u32 val) | |
35 | { | |
36 | struct dw_pcie *pci; | |
37 | ||
38 | if (pp->ops->wr_own_conf) | |
39 | return pp->ops->wr_own_conf(pp, where, size, val); | |
40 | ||
41 | pci = to_dw_pcie_from_pp(pp); | |
42 | return dw_pcie_write(pci->dbi_base + where, size, val); | |
43 | } | |
44 | ||
45 | static struct irq_chip dw_msi_irq_chip = { | |
46 | .name = "PCI-MSI", | |
47 | .irq_enable = pci_msi_unmask_irq, | |
48 | .irq_disable = pci_msi_mask_irq, | |
49 | .irq_mask = pci_msi_mask_irq, | |
50 | .irq_unmask = pci_msi_unmask_irq, | |
51 | }; | |
52 | ||
53 | /* MSI int handler */ | |
54 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) | |
55 | { | |
1b497e64 | 56 | u32 val; |
feb85d9b KVA |
57 | int i, pos, irq; |
58 | irqreturn_t ret = IRQ_NONE; | |
59 | ||
60 | for (i = 0; i < MAX_MSI_CTRLS; i++) { | |
61 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, | |
1b497e64 | 62 | &val); |
dbe4a09e BH |
63 | if (!val) |
64 | continue; | |
65 | ||
66 | ret = IRQ_HANDLED; | |
67 | pos = 0; | |
1b497e64 DC |
68 | while ((pos = find_next_bit((unsigned long *) &val, 32, |
69 | pos)) != 32) { | |
dbe4a09e | 70 | irq = irq_find_mapping(pp->irq_domain, i * 32 + pos); |
8c934095 | 71 | generic_handle_irq(irq); |
dbe4a09e BH |
72 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, |
73 | 4, 1 << pos); | |
dbe4a09e | 74 | pos++; |
feb85d9b KVA |
75 | } |
76 | } | |
77 | ||
78 | return ret; | |
79 | } | |
80 | ||
81 | void dw_pcie_msi_init(struct pcie_port *pp) | |
82 | { | |
111111a7 NC |
83 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
84 | struct device *dev = pci->dev; | |
85 | struct page *page; | |
feb85d9b KVA |
86 | u64 msi_target; |
87 | ||
111111a7 NC |
88 | page = alloc_page(GFP_KERNEL); |
89 | pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); | |
90 | if (dma_mapping_error(dev, pp->msi_data)) { | |
91 | dev_err(dev, "failed to map MSI data\n"); | |
92 | __free_page(page); | |
93 | return; | |
94 | } | |
95 | msi_target = (u64)pp->msi_data; | |
feb85d9b KVA |
96 | |
97 | /* program the msi_data */ | |
98 | dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, | |
99 | (u32)(msi_target & 0xffffffff)); | |
100 | dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, | |
101 | (u32)(msi_target >> 32 & 0xffffffff)); | |
102 | } | |
103 | ||
104 | static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) | |
105 | { | |
106 | unsigned int res, bit, val; | |
107 | ||
108 | res = (irq / 32) * 12; | |
109 | bit = irq % 32; | |
110 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); | |
111 | val &= ~(1 << bit); | |
112 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); | |
113 | } | |
114 | ||
115 | static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base, | |
116 | unsigned int nvec, unsigned int pos) | |
117 | { | |
118 | unsigned int i; | |
119 | ||
120 | for (i = 0; i < nvec; i++) { | |
121 | irq_set_msi_desc_off(irq_base, i, NULL); | |
122 | /* Disable corresponding interrupt on MSI controller */ | |
123 | if (pp->ops->msi_clear_irq) | |
124 | pp->ops->msi_clear_irq(pp, pos + i); | |
125 | else | |
126 | dw_pcie_msi_clear_irq(pp, pos + i); | |
127 | } | |
128 | ||
129 | bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec)); | |
130 | } | |
131 | ||
132 | static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) | |
133 | { | |
134 | unsigned int res, bit, val; | |
135 | ||
136 | res = (irq / 32) * 12; | |
137 | bit = irq % 32; | |
138 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); | |
139 | val |= 1 << bit; | |
140 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); | |
141 | } | |
142 | ||
143 | static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) | |
144 | { | |
145 | int irq, pos0, i; | |
146 | struct pcie_port *pp; | |
147 | ||
148 | pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc); | |
149 | pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS, | |
150 | order_base_2(no_irqs)); | |
151 | if (pos0 < 0) | |
152 | goto no_valid_irq; | |
153 | ||
154 | irq = irq_find_mapping(pp->irq_domain, pos0); | |
155 | if (!irq) | |
156 | goto no_valid_irq; | |
157 | ||
158 | /* | |
159 | * irq_create_mapping (called from dw_pcie_host_init) pre-allocates | |
160 | * descs so there is no need to allocate descs here. We can therefore | |
161 | * assume that if irq_find_mapping above returns non-zero, then the | |
162 | * descs are also successfully allocated. | |
163 | */ | |
164 | ||
165 | for (i = 0; i < no_irqs; i++) { | |
166 | if (irq_set_msi_desc_off(irq, i, desc) != 0) { | |
167 | clear_irq_range(pp, irq, i, pos0); | |
168 | goto no_valid_irq; | |
169 | } | |
170 | /*Enable corresponding interrupt in MSI interrupt controller */ | |
171 | if (pp->ops->msi_set_irq) | |
172 | pp->ops->msi_set_irq(pp, pos0 + i); | |
173 | else | |
174 | dw_pcie_msi_set_irq(pp, pos0 + i); | |
175 | } | |
176 | ||
177 | *pos = pos0; | |
178 | desc->nvec_used = no_irqs; | |
179 | desc->msi_attrib.multiple = order_base_2(no_irqs); | |
180 | ||
181 | return irq; | |
182 | ||
183 | no_valid_irq: | |
184 | *pos = pos0; | |
185 | return -ENOSPC; | |
186 | } | |
187 | ||
188 | static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos) | |
189 | { | |
190 | struct msi_msg msg; | |
191 | u64 msi_target; | |
192 | ||
193 | if (pp->ops->get_msi_addr) | |
194 | msi_target = pp->ops->get_msi_addr(pp); | |
195 | else | |
111111a7 | 196 | msi_target = (u64)pp->msi_data; |
feb85d9b KVA |
197 | |
198 | msg.address_lo = (u32)(msi_target & 0xffffffff); | |
199 | msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff); | |
200 | ||
201 | if (pp->ops->get_msi_data) | |
202 | msg.data = pp->ops->get_msi_data(pp, pos); | |
203 | else | |
204 | msg.data = pos; | |
205 | ||
206 | pci_write_msi_msg(irq, &msg); | |
207 | } | |
208 | ||
209 | static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, | |
210 | struct msi_desc *desc) | |
211 | { | |
212 | int irq, pos; | |
213 | struct pcie_port *pp = pdev->bus->sysdata; | |
214 | ||
215 | if (desc->msi_attrib.is_msix) | |
216 | return -EINVAL; | |
217 | ||
218 | irq = assign_irq(1, desc, &pos); | |
219 | if (irq < 0) | |
220 | return irq; | |
221 | ||
222 | dw_msi_setup_msg(pp, irq, pos); | |
223 | ||
224 | return 0; | |
225 | } | |
226 | ||
227 | static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev, | |
228 | int nvec, int type) | |
229 | { | |
230 | #ifdef CONFIG_PCI_MSI | |
231 | int irq, pos; | |
232 | struct msi_desc *desc; | |
233 | struct pcie_port *pp = pdev->bus->sysdata; | |
234 | ||
235 | /* MSI-X interrupts are not supported */ | |
236 | if (type == PCI_CAP_ID_MSIX) | |
237 | return -EINVAL; | |
238 | ||
239 | WARN_ON(!list_is_singular(&pdev->dev.msi_list)); | |
240 | desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list); | |
241 | ||
242 | irq = assign_irq(nvec, desc, &pos); | |
243 | if (irq < 0) | |
244 | return irq; | |
245 | ||
246 | dw_msi_setup_msg(pp, irq, pos); | |
247 | ||
248 | return 0; | |
249 | #else | |
250 | return -EINVAL; | |
251 | #endif | |
252 | } | |
253 | ||
254 | static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) | |
255 | { | |
256 | struct irq_data *data = irq_get_irq_data(irq); | |
257 | struct msi_desc *msi = irq_data_get_msi_desc(data); | |
258 | struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi); | |
259 | ||
260 | clear_irq_range(pp, irq, 1, data->hwirq); | |
261 | } | |
262 | ||
263 | static struct msi_controller dw_pcie_msi_chip = { | |
264 | .setup_irq = dw_msi_setup_irq, | |
265 | .setup_irqs = dw_msi_setup_irqs, | |
266 | .teardown_irq = dw_msi_teardown_irq, | |
267 | }; | |
268 | ||
269 | static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, | |
270 | irq_hw_number_t hwirq) | |
271 | { | |
272 | irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq); | |
273 | irq_set_chip_data(irq, domain->host_data); | |
274 | ||
275 | return 0; | |
276 | } | |
277 | ||
278 | static const struct irq_domain_ops msi_domain_ops = { | |
279 | .map = dw_pcie_msi_map, | |
280 | }; | |
281 | ||
282 | int dw_pcie_host_init(struct pcie_port *pp) | |
283 | { | |
284 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | |
285 | struct device *dev = pci->dev; | |
286 | struct device_node *np = dev->of_node; | |
287 | struct platform_device *pdev = to_platform_device(dev); | |
288 | struct pci_bus *bus, *child; | |
295aeb98 | 289 | struct pci_host_bridge *bridge; |
feb85d9b KVA |
290 | struct resource *cfg_res; |
291 | int i, ret; | |
feb85d9b KVA |
292 | struct resource_entry *win, *tmp; |
293 | ||
294 | cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); | |
295 | if (cfg_res) { | |
296 | pp->cfg0_size = resource_size(cfg_res) / 2; | |
297 | pp->cfg1_size = resource_size(cfg_res) / 2; | |
298 | pp->cfg0_base = cfg_res->start; | |
299 | pp->cfg1_base = cfg_res->start + pp->cfg0_size; | |
300 | } else if (!pp->va_cfg0_base) { | |
301 | dev_err(dev, "missing *config* reg space\n"); | |
302 | } | |
303 | ||
295aeb98 LP |
304 | bridge = pci_alloc_host_bridge(0); |
305 | if (!bridge) | |
306 | return -ENOMEM; | |
307 | ||
308 | ret = of_pci_get_host_bridge_resources(np, 0, 0xff, | |
309 | &bridge->windows, &pp->io_base); | |
feb85d9b KVA |
310 | if (ret) |
311 | return ret; | |
312 | ||
295aeb98 | 313 | ret = devm_request_pci_bus_resources(dev, &bridge->windows); |
feb85d9b KVA |
314 | if (ret) |
315 | goto error; | |
316 | ||
317 | /* Get the I/O and memory ranges from DT */ | |
295aeb98 | 318 | resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { |
feb85d9b KVA |
319 | switch (resource_type(win->res)) { |
320 | case IORESOURCE_IO: | |
321 | ret = pci_remap_iospace(win->res, pp->io_base); | |
322 | if (ret) { | |
323 | dev_warn(dev, "error %d: failed to map resource %pR\n", | |
324 | ret, win->res); | |
325 | resource_list_destroy_entry(win); | |
326 | } else { | |
327 | pp->io = win->res; | |
328 | pp->io->name = "I/O"; | |
329 | pp->io_size = resource_size(pp->io); | |
330 | pp->io_bus_addr = pp->io->start - win->offset; | |
331 | } | |
332 | break; | |
333 | case IORESOURCE_MEM: | |
334 | pp->mem = win->res; | |
335 | pp->mem->name = "MEM"; | |
336 | pp->mem_size = resource_size(pp->mem); | |
337 | pp->mem_bus_addr = pp->mem->start - win->offset; | |
338 | break; | |
339 | case 0: | |
340 | pp->cfg = win->res; | |
341 | pp->cfg0_size = resource_size(pp->cfg) / 2; | |
342 | pp->cfg1_size = resource_size(pp->cfg) / 2; | |
343 | pp->cfg0_base = pp->cfg->start; | |
344 | pp->cfg1_base = pp->cfg->start + pp->cfg0_size; | |
345 | break; | |
346 | case IORESOURCE_BUS: | |
347 | pp->busn = win->res; | |
348 | break; | |
349 | } | |
350 | } | |
351 | ||
352 | if (!pci->dbi_base) { | |
cc7b0d49 LP |
353 | pci->dbi_base = devm_pci_remap_cfgspace(dev, |
354 | pp->cfg->start, | |
355 | resource_size(pp->cfg)); | |
feb85d9b KVA |
356 | if (!pci->dbi_base) { |
357 | dev_err(dev, "error with ioremap\n"); | |
358 | ret = -ENOMEM; | |
359 | goto error; | |
360 | } | |
361 | } | |
362 | ||
363 | pp->mem_base = pp->mem->start; | |
364 | ||
365 | if (!pp->va_cfg0_base) { | |
cc7b0d49 LP |
366 | pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, |
367 | pp->cfg0_base, pp->cfg0_size); | |
feb85d9b KVA |
368 | if (!pp->va_cfg0_base) { |
369 | dev_err(dev, "error with ioremap in function\n"); | |
370 | ret = -ENOMEM; | |
371 | goto error; | |
372 | } | |
373 | } | |
374 | ||
375 | if (!pp->va_cfg1_base) { | |
cc7b0d49 LP |
376 | pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, |
377 | pp->cfg1_base, | |
feb85d9b KVA |
378 | pp->cfg1_size); |
379 | if (!pp->va_cfg1_base) { | |
380 | dev_err(dev, "error with ioremap\n"); | |
381 | ret = -ENOMEM; | |
382 | goto error; | |
383 | } | |
384 | } | |
385 | ||
386 | ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport); | |
387 | if (ret) | |
388 | pci->num_viewport = 2; | |
389 | ||
390 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
391 | if (!pp->ops->msi_host_init) { | |
392 | pp->irq_domain = irq_domain_add_linear(dev->of_node, | |
393 | MAX_MSI_IRQS, &msi_domain_ops, | |
394 | &dw_pcie_msi_chip); | |
395 | if (!pp->irq_domain) { | |
396 | dev_err(dev, "irq domain init failed\n"); | |
397 | ret = -ENXIO; | |
398 | goto error; | |
399 | } | |
400 | ||
401 | for (i = 0; i < MAX_MSI_IRQS; i++) | |
402 | irq_create_mapping(pp->irq_domain, i); | |
403 | } else { | |
404 | ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip); | |
405 | if (ret < 0) | |
406 | goto error; | |
407 | } | |
408 | } | |
409 | ||
4a301766 BA |
410 | if (pp->ops->host_init) { |
411 | ret = pp->ops->host_init(pp); | |
412 | if (ret) | |
413 | goto error; | |
414 | } | |
feb85d9b KVA |
415 | |
416 | pp->root_bus_nr = pp->busn->start; | |
295aeb98 LP |
417 | |
418 | bridge->dev.parent = dev; | |
419 | bridge->sysdata = pp; | |
420 | bridge->busnr = pp->root_bus_nr; | |
421 | bridge->ops = &dw_pcie_ops; | |
60eca198 LP |
422 | bridge->map_irq = of_irq_parse_and_map_pci; |
423 | bridge->swizzle_irq = pci_common_swizzle; | |
feb85d9b | 424 | if (IS_ENABLED(CONFIG_PCI_MSI)) { |
295aeb98 | 425 | bridge->msi = &dw_pcie_msi_chip; |
feb85d9b | 426 | dw_pcie_msi_chip.dev = dev; |
feb85d9b KVA |
427 | } |
428 | ||
295aeb98 LP |
429 | ret = pci_scan_root_bus_bridge(bridge); |
430 | if (ret) | |
431 | goto error; | |
432 | ||
433 | bus = bridge->bus; | |
434 | ||
feb85d9b KVA |
435 | if (pp->ops->scan_bus) |
436 | pp->ops->scan_bus(pp); | |
437 | ||
feb85d9b KVA |
438 | pci_bus_size_bridges(bus); |
439 | pci_bus_assign_resources(bus); | |
440 | ||
441 | list_for_each_entry(child, &bus->children, node) | |
442 | pcie_bus_configure_settings(child); | |
443 | ||
444 | pci_bus_add_devices(bus); | |
445 | return 0; | |
446 | ||
447 | error: | |
295aeb98 | 448 | pci_free_host_bridge(bridge); |
feb85d9b KVA |
449 | return ret; |
450 | } | |
451 | ||
452 | static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | |
453 | u32 devfn, int where, int size, u32 *val) | |
454 | { | |
455 | int ret, type; | |
456 | u32 busdev, cfg_size; | |
457 | u64 cpu_addr; | |
458 | void __iomem *va_cfg_base; | |
459 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | |
460 | ||
461 | if (pp->ops->rd_other_conf) | |
462 | return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); | |
463 | ||
464 | busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | | |
465 | PCIE_ATU_FUNC(PCI_FUNC(devfn)); | |
466 | ||
467 | if (bus->parent->number == pp->root_bus_nr) { | |
468 | type = PCIE_ATU_TYPE_CFG0; | |
469 | cpu_addr = pp->cfg0_base; | |
470 | cfg_size = pp->cfg0_size; | |
471 | va_cfg_base = pp->va_cfg0_base; | |
472 | } else { | |
473 | type = PCIE_ATU_TYPE_CFG1; | |
474 | cpu_addr = pp->cfg1_base; | |
475 | cfg_size = pp->cfg1_size; | |
476 | va_cfg_base = pp->va_cfg1_base; | |
477 | } | |
478 | ||
479 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | |
480 | type, cpu_addr, | |
481 | busdev, cfg_size); | |
482 | ret = dw_pcie_read(va_cfg_base + where, size, val); | |
483 | if (pci->num_viewport <= 2) | |
484 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | |
485 | PCIE_ATU_TYPE_IO, pp->io_base, | |
486 | pp->io_bus_addr, pp->io_size); | |
487 | ||
488 | return ret; | |
489 | } | |
490 | ||
491 | static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | |
492 | u32 devfn, int where, int size, u32 val) | |
493 | { | |
494 | int ret, type; | |
495 | u32 busdev, cfg_size; | |
496 | u64 cpu_addr; | |
497 | void __iomem *va_cfg_base; | |
498 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | |
499 | ||
500 | if (pp->ops->wr_other_conf) | |
501 | return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); | |
502 | ||
503 | busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | | |
504 | PCIE_ATU_FUNC(PCI_FUNC(devfn)); | |
505 | ||
506 | if (bus->parent->number == pp->root_bus_nr) { | |
507 | type = PCIE_ATU_TYPE_CFG0; | |
508 | cpu_addr = pp->cfg0_base; | |
509 | cfg_size = pp->cfg0_size; | |
510 | va_cfg_base = pp->va_cfg0_base; | |
511 | } else { | |
512 | type = PCIE_ATU_TYPE_CFG1; | |
513 | cpu_addr = pp->cfg1_base; | |
514 | cfg_size = pp->cfg1_size; | |
515 | va_cfg_base = pp->va_cfg1_base; | |
516 | } | |
517 | ||
518 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | |
519 | type, cpu_addr, | |
520 | busdev, cfg_size); | |
521 | ret = dw_pcie_write(va_cfg_base + where, size, val); | |
522 | if (pci->num_viewport <= 2) | |
523 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | |
524 | PCIE_ATU_TYPE_IO, pp->io_base, | |
525 | pp->io_bus_addr, pp->io_size); | |
526 | ||
527 | return ret; | |
528 | } | |
529 | ||
530 | static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, | |
531 | int dev) | |
532 | { | |
533 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | |
534 | ||
535 | /* If there is no link, then there is no device */ | |
536 | if (bus->number != pp->root_bus_nr) { | |
537 | if (!dw_pcie_link_up(pci)) | |
538 | return 0; | |
539 | } | |
540 | ||
541 | /* access only one slot on each root port */ | |
542 | if (bus->number == pp->root_bus_nr && dev > 0) | |
543 | return 0; | |
544 | ||
545 | return 1; | |
546 | } | |
547 | ||
548 | static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, | |
549 | int size, u32 *val) | |
550 | { | |
551 | struct pcie_port *pp = bus->sysdata; | |
552 | ||
553 | if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { | |
554 | *val = 0xffffffff; | |
555 | return PCIBIOS_DEVICE_NOT_FOUND; | |
556 | } | |
557 | ||
558 | if (bus->number == pp->root_bus_nr) | |
559 | return dw_pcie_rd_own_conf(pp, where, size, val); | |
560 | ||
561 | return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); | |
562 | } | |
563 | ||
564 | static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | |
565 | int where, int size, u32 val) | |
566 | { | |
567 | struct pcie_port *pp = bus->sysdata; | |
568 | ||
569 | if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) | |
570 | return PCIBIOS_DEVICE_NOT_FOUND; | |
571 | ||
572 | if (bus->number == pp->root_bus_nr) | |
573 | return dw_pcie_wr_own_conf(pp, where, size, val); | |
574 | ||
575 | return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); | |
576 | } | |
577 | ||
578 | static struct pci_ops dw_pcie_ops = { | |
579 | .read = dw_pcie_rd_conf, | |
580 | .write = dw_pcie_wr_conf, | |
581 | }; | |
582 | ||
583 | static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) | |
584 | { | |
585 | u32 val; | |
586 | ||
587 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); | |
588 | if (val == 0xffffffff) | |
589 | return 1; | |
590 | ||
591 | return 0; | |
592 | } | |
593 | ||
594 | void dw_pcie_setup_rc(struct pcie_port *pp) | |
595 | { | |
596 | u32 val; | |
597 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | |
598 | ||
599 | dw_pcie_setup(pci); | |
600 | ||
601 | /* setup RC BARs */ | |
602 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); | |
603 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); | |
604 | ||
605 | /* setup interrupt pins */ | |
d91dfe50 | 606 | dw_pcie_dbi_ro_wr_en(pci); |
feb85d9b KVA |
607 | val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); |
608 | val &= 0xffff00ff; | |
609 | val |= 0x00000100; | |
610 | dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); | |
d91dfe50 | 611 | dw_pcie_dbi_ro_wr_dis(pci); |
feb85d9b KVA |
612 | |
613 | /* setup bus numbers */ | |
614 | val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); | |
615 | val &= 0xff000000; | |
fc110ebd | 616 | val |= 0x00ff0100; |
feb85d9b KVA |
617 | dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); |
618 | ||
619 | /* setup command register */ | |
620 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); | |
621 | val &= 0xffff0000; | |
622 | val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | | |
623 | PCI_COMMAND_MASTER | PCI_COMMAND_SERR; | |
624 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); | |
625 | ||
626 | /* | |
627 | * If the platform provides ->rd_other_conf, it means the platform | |
628 | * uses its own address translation component rather than ATU, so | |
629 | * we should not program the ATU here. | |
630 | */ | |
631 | if (!pp->ops->rd_other_conf) { | |
632 | /* get iATU unroll support */ | |
633 | pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); | |
634 | dev_dbg(pci->dev, "iATU unroll: %s\n", | |
635 | pci->iatu_unroll_enabled ? "enabled" : "disabled"); | |
636 | ||
637 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, | |
638 | PCIE_ATU_TYPE_MEM, pp->mem_base, | |
639 | pp->mem_bus_addr, pp->mem_size); | |
640 | if (pci->num_viewport > 2) | |
641 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2, | |
642 | PCIE_ATU_TYPE_IO, pp->io_base, | |
643 | pp->io_bus_addr, pp->io_size); | |
644 | } | |
645 | ||
646 | dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); | |
647 | ||
d91dfe50 HZ |
648 | /* Enable write permission for the DBI read-only register */ |
649 | dw_pcie_dbi_ro_wr_en(pci); | |
feb85d9b KVA |
650 | /* program correct class for RC */ |
651 | dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); | |
d91dfe50 HZ |
652 | /* Better disable write permission right after the update */ |
653 | dw_pcie_dbi_ro_wr_dis(pci); | |
feb85d9b KVA |
654 | |
655 | dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); | |
656 | val |= PORT_LOGIC_SPEED_CHANGE; | |
657 | dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); | |
658 | } |