2 * Support PCI/PCIe on PowerNV platforms
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/crash_dump.h>
17 #include <linux/delay.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/irq.h>
23 #include <linux/msi.h>
24 #include <linux/memblock.h>
25 #include <linux/iommu.h>
26 #include <linux/rculist.h>
27 #include <linux/sizes.h>
29 #include <asm/sections.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/machdep.h>
34 #include <asm/msi_bitmap.h>
35 #include <asm/ppc-pci.h>
37 #include <asm/iommu.h>
40 #include <asm/debugfs.h>
41 #include <asm/firmware.h>
42 #include <asm/pnv-pci.h>
43 #include <asm/mmzone.h>
45 #include <misc/cxl-base.h>
49 #include "../../../../drivers/pci/pci.h"
51 #define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */
52 #define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
53 #define PNV_IODA1_DMA32_SEGSIZE 0x10000000
55 static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK",
58 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
70 if (pe->flags & PNV_IODA_PE_DEV)
71 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
72 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
73 sprintf(pfix, "%04x:%02x ",
74 pci_domain_nr(pe->pbus), pe->pbus->number);
76 else if (pe->flags & PNV_IODA_PE_VF)
77 sprintf(pfix, "%04x:%02x:%2x.%d",
78 pci_domain_nr(pe->parent_dev->bus),
79 (pe->rid & 0xff00) >> 8,
80 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
81 #endif /* CONFIG_PCI_IOV*/
83 printk("%spci %s: [PE# %.2x] %pV",
84 level, pfix, pe->pe_number, &vaf);
89 static bool pnv_iommu_bypass_disabled __read_mostly;
90 static bool pci_reset_phbs __read_mostly;
92 static int __init iommu_setup(char *str)
98 if (!strncmp(str, "nobypass", 8)) {
99 pnv_iommu_bypass_disabled = true;
100 pr_info("PowerNV: IOMMU bypass window disabled.\n");
103 str += strcspn(str, ",");
110 early_param("iommu", iommu_setup);
112 static int __init pci_reset_phbs_setup(char *str)
114 pci_reset_phbs = true;
118 early_param("ppc_pci_reset_phbs", pci_reset_phbs_setup);
120 static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
123 * WARNING: We cannot rely on the resource flags. The Linux PCI
124 * allocation code sometimes decides to put a 64-bit prefetchable
125 * BAR in the 32-bit window, so we have to compare the addresses.
127 * For simplicity we only test resource start.
129 return (r->start >= phb->ioda.m64_base &&
130 r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
133 static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
135 unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
137 return (resource_flags & flags) == flags;
140 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
144 phb->ioda.pe_array[pe_no].phb = phb;
145 phb->ioda.pe_array[pe_no].pe_number = pe_no;
148 * Clear the PE frozen state as it might be put into frozen state
149 * in the last PCI remove path. It's not harmful to do so when the
150 * PE is already in unfrozen state.
152 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
153 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
154 if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
155 pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n",
156 __func__, rc, phb->hose->global_number, pe_no);
158 return &phb->ioda.pe_array[pe_no];
161 static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
163 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
164 pr_warn("%s: Invalid PE %x on PHB#%x\n",
165 __func__, pe_no, phb->hose->global_number);
169 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
170 pr_debug("%s: PE %x was reserved on PHB#%x\n",
171 __func__, pe_no, phb->hose->global_number);
173 pnv_ioda_init_pe(phb, pe_no);
176 static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
180 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
181 if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
182 return pnv_ioda_init_pe(phb, pe);
188 static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
190 struct pnv_phb *phb = pe->phb;
191 unsigned int pe_num = pe->pe_number;
195 memset(pe, 0, sizeof(struct pnv_ioda_pe));
196 clear_bit(pe_num, phb->ioda.pe_alloc);
199 /* The default M64 BAR is shared by all PEs */
200 static int pnv_ioda2_init_m64(struct pnv_phb *phb)
206 /* Configure the default M64 BAR */
207 rc = opal_pci_set_phb_mem_window(phb->opal_id,
208 OPAL_M64_WINDOW_TYPE,
209 phb->ioda.m64_bar_idx,
213 if (rc != OPAL_SUCCESS) {
214 desc = "configuring";
218 /* Enable the default M64 BAR */
219 rc = opal_pci_phb_mmio_enable(phb->opal_id,
220 OPAL_M64_WINDOW_TYPE,
221 phb->ioda.m64_bar_idx,
222 OPAL_ENABLE_M64_SPLIT);
223 if (rc != OPAL_SUCCESS) {
229 * Exclude the segments for reserved and root bus PE, which
230 * are first or last two PEs.
232 r = &phb->hose->mem_resources[1];
233 if (phb->ioda.reserved_pe_idx == 0)
234 r->start += (2 * phb->ioda.m64_segsize);
235 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
236 r->end -= (2 * phb->ioda.m64_segsize);
238 pr_warn(" Cannot strip M64 segment for reserved PE#%x\n",
239 phb->ioda.reserved_pe_idx);
244 pr_warn(" Failure %lld %s M64 BAR#%d\n",
245 rc, desc, phb->ioda.m64_bar_idx);
246 opal_pci_phb_mmio_enable(phb->opal_id,
247 OPAL_M64_WINDOW_TYPE,
248 phb->ioda.m64_bar_idx,
253 static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
254 unsigned long *pe_bitmap)
256 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
257 struct pnv_phb *phb = hose->private_data;
259 resource_size_t base, sgsz, start, end;
262 base = phb->ioda.m64_base;
263 sgsz = phb->ioda.m64_segsize;
264 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
265 r = &pdev->resource[i];
266 if (!r->parent || !pnv_pci_is_m64(phb, r))
269 start = _ALIGN_DOWN(r->start - base, sgsz);
270 end = _ALIGN_UP(r->end - base, sgsz);
271 for (segno = start / sgsz; segno < end / sgsz; segno++) {
273 set_bit(segno, pe_bitmap);
275 pnv_ioda_reserve_pe(phb, segno);
280 static int pnv_ioda1_init_m64(struct pnv_phb *phb)
286 * There are 16 M64 BARs, each of which has 8 segments. So
287 * there are as many M64 segments as the maximum number of
290 for (index = 0; index < PNV_IODA1_M64_NUM; index++) {
291 unsigned long base, segsz = phb->ioda.m64_segsize;
294 base = phb->ioda.m64_base +
295 index * PNV_IODA1_M64_SEGS * segsz;
296 rc = opal_pci_set_phb_mem_window(phb->opal_id,
297 OPAL_M64_WINDOW_TYPE, index, base, 0,
298 PNV_IODA1_M64_SEGS * segsz);
299 if (rc != OPAL_SUCCESS) {
300 pr_warn(" Error %lld setting M64 PHB#%x-BAR#%d\n",
301 rc, phb->hose->global_number, index);
305 rc = opal_pci_phb_mmio_enable(phb->opal_id,
306 OPAL_M64_WINDOW_TYPE, index,
307 OPAL_ENABLE_M64_SPLIT);
308 if (rc != OPAL_SUCCESS) {
309 pr_warn(" Error %lld enabling M64 PHB#%x-BAR#%d\n",
310 rc, phb->hose->global_number, index);
316 * Exclude the segments for reserved and root bus PE, which
317 * are first or last two PEs.
319 r = &phb->hose->mem_resources[1];
320 if (phb->ioda.reserved_pe_idx == 0)
321 r->start += (2 * phb->ioda.m64_segsize);
322 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
323 r->end -= (2 * phb->ioda.m64_segsize);
325 WARN(1, "Wrong reserved PE#%x on PHB#%x\n",
326 phb->ioda.reserved_pe_idx, phb->hose->global_number);
331 for ( ; index >= 0; index--)
332 opal_pci_phb_mmio_enable(phb->opal_id,
333 OPAL_M64_WINDOW_TYPE, index, OPAL_DISABLE_M64);
338 static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus,
339 unsigned long *pe_bitmap,
342 struct pci_dev *pdev;
344 list_for_each_entry(pdev, &bus->devices, bus_list) {
345 pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap);
347 if (all && pdev->subordinate)
348 pnv_ioda_reserve_m64_pe(pdev->subordinate,
353 static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
355 struct pci_controller *hose = pci_bus_to_host(bus);
356 struct pnv_phb *phb = hose->private_data;
357 struct pnv_ioda_pe *master_pe, *pe;
358 unsigned long size, *pe_alloc;
361 /* Root bus shouldn't use M64 */
362 if (pci_is_root_bus(bus))
365 /* Allocate bitmap */
366 size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
367 pe_alloc = kzalloc(size, GFP_KERNEL);
369 pr_warn("%s: Out of memory !\n",
374 /* Figure out reserved PE numbers by the PE */
375 pnv_ioda_reserve_m64_pe(bus, pe_alloc, all);
378 * the current bus might not own M64 window and that's all
379 * contributed by its child buses. For the case, we needn't
380 * pick M64 dependent PE#.
382 if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) {
388 * Figure out the master PE and put all slave PEs to master
389 * PE's list to form compound PE.
393 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) <
394 phb->ioda.total_pe_num) {
395 pe = &phb->ioda.pe_array[i];
397 phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number;
399 pe->flags |= PNV_IODA_PE_MASTER;
400 INIT_LIST_HEAD(&pe->slaves);
403 pe->flags |= PNV_IODA_PE_SLAVE;
404 pe->master = master_pe;
405 list_add_tail(&pe->list, &master_pe->slaves);
409 * P7IOC supports M64DT, which helps mapping M64 segment
410 * to one particular PE#. However, PHB3 has fixed mapping
411 * between M64 segment and PE#. In order to have same logic
412 * for P7IOC and PHB3, we enforce fixed mapping between M64
413 * segment and PE# on P7IOC.
415 if (phb->type == PNV_PHB_IODA1) {
418 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
419 pe->pe_number, OPAL_M64_WINDOW_TYPE,
420 pe->pe_number / PNV_IODA1_M64_SEGS,
421 pe->pe_number % PNV_IODA1_M64_SEGS);
422 if (rc != OPAL_SUCCESS)
423 pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
424 __func__, rc, phb->hose->global_number,
433 static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
435 struct pci_controller *hose = phb->hose;
436 struct device_node *dn = hose->dn;
437 struct resource *res;
442 if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) {
443 pr_info(" Not support M64 window\n");
447 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
448 pr_info(" Firmware too old to support M64 window\n");
452 r = of_get_property(dn, "ibm,opal-m64-window", NULL);
454 pr_info(" No <ibm,opal-m64-window> on %pOF\n",
460 * Find the available M64 BAR range and pickup the last one for
461 * covering the whole 64-bits space. We support only one range.
463 if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges",
465 /* In absence of the property, assume 0..15 */
469 /* We only support 64 bits in our allocator */
470 if (m64_range[1] > 63) {
471 pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n",
472 __func__, m64_range[1], phb->hose->global_number);
475 /* Empty range, no m64 */
476 if (m64_range[1] <= m64_range[0]) {
477 pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n",
478 __func__, phb->hose->global_number);
482 /* Configure M64 informations */
483 res = &hose->mem_resources[1];
484 res->name = dn->full_name;
485 res->start = of_translate_address(dn, r + 2);
486 res->end = res->start + of_read_number(r + 4, 2) - 1;
487 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
488 pci_addr = of_read_number(r, 2);
489 hose->mem_offset[1] = res->start - pci_addr;
491 phb->ioda.m64_size = resource_size(res);
492 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num;
493 phb->ioda.m64_base = pci_addr;
495 /* This lines up nicely with the display from processing OF ranges */
496 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n",
497 res->start, res->end, pci_addr, m64_range[0],
498 m64_range[0] + m64_range[1] - 1);
500 /* Mark all M64 used up by default */
501 phb->ioda.m64_bar_alloc = (unsigned long)-1;
503 /* Use last M64 BAR to cover M64 window */
505 phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1];
507 pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx);
509 /* Mark remaining ones free */
510 for (i = m64_range[0]; i < m64_range[1]; i++)
511 clear_bit(i, &phb->ioda.m64_bar_alloc);
514 * Setup init functions for M64 based on IODA version, IODA3 uses
517 if (phb->type == PNV_PHB_IODA1)
518 phb->init_m64 = pnv_ioda1_init_m64;
520 phb->init_m64 = pnv_ioda2_init_m64;
521 phb->reserve_m64_pe = pnv_ioda_reserve_m64_pe;
522 phb->pick_m64_pe = pnv_ioda_pick_m64_pe;
525 static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
527 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
528 struct pnv_ioda_pe *slave;
531 /* Fetch master PE */
532 if (pe->flags & PNV_IODA_PE_SLAVE) {
534 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
537 pe_no = pe->pe_number;
540 /* Freeze master PE */
541 rc = opal_pci_eeh_freeze_set(phb->opal_id,
543 OPAL_EEH_ACTION_SET_FREEZE_ALL);
544 if (rc != OPAL_SUCCESS) {
545 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
546 __func__, rc, phb->hose->global_number, pe_no);
550 /* Freeze slave PEs */
551 if (!(pe->flags & PNV_IODA_PE_MASTER))
554 list_for_each_entry(slave, &pe->slaves, list) {
555 rc = opal_pci_eeh_freeze_set(phb->opal_id,
557 OPAL_EEH_ACTION_SET_FREEZE_ALL);
558 if (rc != OPAL_SUCCESS)
559 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
560 __func__, rc, phb->hose->global_number,
565 static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
567 struct pnv_ioda_pe *pe, *slave;
571 pe = &phb->ioda.pe_array[pe_no];
572 if (pe->flags & PNV_IODA_PE_SLAVE) {
574 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
575 pe_no = pe->pe_number;
578 /* Clear frozen state for master PE */
579 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
580 if (rc != OPAL_SUCCESS) {
581 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
582 __func__, rc, opt, phb->hose->global_number, pe_no);
586 if (!(pe->flags & PNV_IODA_PE_MASTER))
589 /* Clear frozen state for slave PEs */
590 list_for_each_entry(slave, &pe->slaves, list) {
591 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
594 if (rc != OPAL_SUCCESS) {
595 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
596 __func__, rc, opt, phb->hose->global_number,
605 static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
607 struct pnv_ioda_pe *slave, *pe;
612 /* Sanity check on PE number */
613 if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num)
614 return OPAL_EEH_STOPPED_PERM_UNAVAIL;
617 * Fetch the master PE and the PE instance might be
618 * not initialized yet.
620 pe = &phb->ioda.pe_array[pe_no];
621 if (pe->flags & PNV_IODA_PE_SLAVE) {
623 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
624 pe_no = pe->pe_number;
627 /* Check the master PE */
628 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
629 &state, &pcierr, NULL);
630 if (rc != OPAL_SUCCESS) {
631 pr_warn("%s: Failure %lld getting "
632 "PHB#%x-PE#%x state\n",
634 phb->hose->global_number, pe_no);
635 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
638 /* Check the slave PE */
639 if (!(pe->flags & PNV_IODA_PE_MASTER))
642 list_for_each_entry(slave, &pe->slaves, list) {
643 rc = opal_pci_eeh_freeze_status(phb->opal_id,
648 if (rc != OPAL_SUCCESS) {
649 pr_warn("%s: Failure %lld getting "
650 "PHB#%x-PE#%x state\n",
652 phb->hose->global_number, slave->pe_number);
653 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
657 * Override the result based on the ascending
667 /* Currently those 2 are only used when MSIs are enabled, this will change
668 * but in the meantime, we need to protect them to avoid warnings
670 #ifdef CONFIG_PCI_MSI
671 struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
673 struct pci_controller *hose = pci_bus_to_host(dev->bus);
674 struct pnv_phb *phb = hose->private_data;
675 struct pci_dn *pdn = pci_get_pdn(dev);
679 if (pdn->pe_number == IODA_INVALID_PE)
681 return &phb->ioda.pe_array[pdn->pe_number];
683 #endif /* CONFIG_PCI_MSI */
685 static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
686 struct pnv_ioda_pe *parent,
687 struct pnv_ioda_pe *child,
690 const char *desc = is_add ? "adding" : "removing";
691 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
692 OPAL_REMOVE_PE_FROM_DOMAIN;
693 struct pnv_ioda_pe *slave;
696 /* Parent PE affects child PE */
697 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
698 child->pe_number, op);
699 if (rc != OPAL_SUCCESS) {
700 pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
705 if (!(child->flags & PNV_IODA_PE_MASTER))
708 /* Compound case: parent PE affects slave PEs */
709 list_for_each_entry(slave, &child->slaves, list) {
710 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
711 slave->pe_number, op);
712 if (rc != OPAL_SUCCESS) {
713 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
722 static int pnv_ioda_set_peltv(struct pnv_phb *phb,
723 struct pnv_ioda_pe *pe,
726 struct pnv_ioda_pe *slave;
727 struct pci_dev *pdev = NULL;
731 * Clear PE frozen state. If it's master PE, we need
732 * clear slave PE frozen state as well.
735 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
736 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
737 if (pe->flags & PNV_IODA_PE_MASTER) {
738 list_for_each_entry(slave, &pe->slaves, list)
739 opal_pci_eeh_freeze_clear(phb->opal_id,
741 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
746 * Associate PE in PELT. We need add the PE into the
747 * corresponding PELT-V as well. Otherwise, the error
748 * originated from the PE might contribute to other
751 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
755 /* For compound PEs, any one affects all of them */
756 if (pe->flags & PNV_IODA_PE_MASTER) {
757 list_for_each_entry(slave, &pe->slaves, list) {
758 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
764 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
765 pdev = pe->pbus->self;
766 else if (pe->flags & PNV_IODA_PE_DEV)
767 pdev = pe->pdev->bus->self;
768 #ifdef CONFIG_PCI_IOV
769 else if (pe->flags & PNV_IODA_PE_VF)
770 pdev = pe->parent_dev;
771 #endif /* CONFIG_PCI_IOV */
773 struct pci_dn *pdn = pci_get_pdn(pdev);
774 struct pnv_ioda_pe *parent;
776 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
777 parent = &phb->ioda.pe_array[pdn->pe_number];
778 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
783 pdev = pdev->bus->self;
789 static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
791 struct pci_dev *parent;
792 uint8_t bcomp, dcomp, fcomp;
796 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
800 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
801 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
802 parent = pe->pbus->self;
803 if (pe->flags & PNV_IODA_PE_BUS_ALL)
804 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
809 case 1: bcomp = OpalPciBusAll; break;
810 case 2: bcomp = OpalPciBus7Bits; break;
811 case 4: bcomp = OpalPciBus6Bits; break;
812 case 8: bcomp = OpalPciBus5Bits; break;
813 case 16: bcomp = OpalPciBus4Bits; break;
814 case 32: bcomp = OpalPciBus3Bits; break;
816 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
818 /* Do an exact match only */
819 bcomp = OpalPciBusAll;
821 rid_end = pe->rid + (count << 8);
823 #ifdef CONFIG_PCI_IOV
824 if (pe->flags & PNV_IODA_PE_VF)
825 parent = pe->parent_dev;
828 parent = pe->pdev->bus->self;
829 bcomp = OpalPciBusAll;
830 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
831 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
832 rid_end = pe->rid + 1;
835 /* Clear the reverse map */
836 for (rid = pe->rid; rid < rid_end; rid++)
837 phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
839 /* Release from all parents PELT-V */
841 struct pci_dn *pdn = pci_get_pdn(parent);
842 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
843 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
844 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
845 /* XXX What to do in case of error ? */
847 parent = parent->bus->self;
850 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
851 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
853 /* Disassociate PE in PELT */
854 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
855 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
857 pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
858 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
859 bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
861 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
865 #ifdef CONFIG_PCI_IOV
866 pe->parent_dev = NULL;
872 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
874 struct pci_dev *parent;
875 uint8_t bcomp, dcomp, fcomp;
876 long rc, rid_end, rid;
878 /* Bus validation ? */
882 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
883 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
884 parent = pe->pbus->self;
885 if (pe->flags & PNV_IODA_PE_BUS_ALL)
886 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
891 case 1: bcomp = OpalPciBusAll; break;
892 case 2: bcomp = OpalPciBus7Bits; break;
893 case 4: bcomp = OpalPciBus6Bits; break;
894 case 8: bcomp = OpalPciBus5Bits; break;
895 case 16: bcomp = OpalPciBus4Bits; break;
896 case 32: bcomp = OpalPciBus3Bits; break;
898 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
900 /* Do an exact match only */
901 bcomp = OpalPciBusAll;
903 rid_end = pe->rid + (count << 8);
905 #ifdef CONFIG_PCI_IOV
906 if (pe->flags & PNV_IODA_PE_VF)
907 parent = pe->parent_dev;
909 #endif /* CONFIG_PCI_IOV */
910 parent = pe->pdev->bus->self;
911 bcomp = OpalPciBusAll;
912 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
913 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
914 rid_end = pe->rid + 1;
918 * Associate PE in PELT. We need add the PE into the
919 * corresponding PELT-V as well. Otherwise, the error
920 * originated from the PE might contribute to other
923 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
924 bcomp, dcomp, fcomp, OPAL_MAP_PE);
926 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
931 * Configure PELTV. NPUs don't have a PELTV table so skip
932 * configuration on them.
934 if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI)
935 pnv_ioda_set_peltv(phb, pe, true);
937 /* Setup reverse map */
938 for (rid = pe->rid; rid < rid_end; rid++)
939 phb->ioda.pe_rmap[rid] = pe->pe_number;
941 /* Setup one MVTs on IODA1 */
942 if (phb->type != PNV_PHB_IODA1) {
947 pe->mve_number = pe->pe_number;
948 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
949 if (rc != OPAL_SUCCESS) {
950 pe_err(pe, "OPAL error %ld setting up MVE %x\n",
954 rc = opal_pci_set_mve_enable(phb->opal_id,
955 pe->mve_number, OPAL_ENABLE_MVE);
957 pe_err(pe, "OPAL error %ld enabling MVE %x\n",
967 #ifdef CONFIG_PCI_IOV
968 static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
970 struct pci_dn *pdn = pci_get_pdn(dev);
972 struct resource *res, res2;
973 resource_size_t size;
980 * "offset" is in VFs. The M64 windows are sized so that when they
981 * are segmented, each segment is the same size as the IOV BAR.
982 * Each segment is in a separate PE, and the high order bits of the
983 * address are the PE number. Therefore, each VF's BAR is in a
984 * separate PE, and changing the IOV BAR start address changes the
985 * range of PEs the VFs are in.
987 num_vfs = pdn->num_vfs;
988 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
989 res = &dev->resource[i + PCI_IOV_RESOURCES];
990 if (!res->flags || !res->parent)
994 * The actual IOV BAR range is determined by the start address
995 * and the actual size for num_vfs VFs BAR. This check is to
996 * make sure that after shifting, the range will not overlap
997 * with another device.
999 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
1000 res2.flags = res->flags;
1001 res2.start = res->start + (size * offset);
1002 res2.end = res2.start + (size * num_vfs) - 1;
1004 if (res2.end > res->end) {
1005 dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
1006 i, &res2, res, num_vfs, offset);
1012 * Since M64 BAR shares segments among all possible 256 PEs,
1013 * we have to shift the beginning of PF IOV BAR to make it start from
1014 * the segment which belongs to the PE number assigned to the first VF.
1015 * This creates a "hole" in the /proc/iomem which could be used for
1016 * allocating other resources so we reserve this area below and
1017 * release when IOV is released.
1019 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1020 res = &dev->resource[i + PCI_IOV_RESOURCES];
1021 if (!res->flags || !res->parent)
1024 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
1026 res->start += size * offset;
1028 dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
1029 i, &res2, res, (offset > 0) ? "En" : "Dis",
1033 devm_release_resource(&dev->dev, &pdn->holes[i]);
1034 memset(&pdn->holes[i], 0, sizeof(pdn->holes[i]));
1037 pci_update_resource(dev, i + PCI_IOV_RESOURCES);
1040 pdn->holes[i].start = res2.start;
1041 pdn->holes[i].end = res2.start + size * offset - 1;
1042 pdn->holes[i].flags = IORESOURCE_BUS;
1043 pdn->holes[i].name = "pnv_iov_reserved";
1044 devm_request_resource(&dev->dev, res->parent,
1050 #endif /* CONFIG_PCI_IOV */
1052 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
1054 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1055 struct pnv_phb *phb = hose->private_data;
1056 struct pci_dn *pdn = pci_get_pdn(dev);
1057 struct pnv_ioda_pe *pe;
1060 pr_err("%s: Device tree node not associated properly\n",
1064 if (pdn->pe_number != IODA_INVALID_PE)
1067 pe = pnv_ioda_alloc_pe(phb);
1069 pr_warn("%s: Not enough PE# available, disabling device\n",
1074 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
1075 * pointer in the PE data structure, both should be destroyed at the
1076 * same time. However, this needs to be looked at more closely again
1077 * once we actually start removing things (Hotplug, SR-IOV, ...)
1079 * At some point we want to remove the PDN completely anyways
1082 pdn->pe_number = pe->pe_number;
1083 pe->flags = PNV_IODA_PE_DEV;
1086 pe->mve_number = -1;
1087 pe->rid = dev->bus->number << 8 | pdn->devfn;
1089 pe_info(pe, "Associated device to PE\n");
1091 if (pnv_ioda_configure_pe(phb, pe)) {
1092 /* XXX What do we do here ? */
1093 pnv_ioda_free_pe(pe);
1094 pdn->pe_number = IODA_INVALID_PE;
1100 /* Put PE to the list */
1101 list_add_tail(&pe->list, &phb->ioda.pe_list);
1106 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
1108 struct pci_dev *dev;
1110 list_for_each_entry(dev, &bus->devices, bus_list) {
1111 struct pci_dn *pdn = pci_get_pdn(dev);
1114 pr_warn("%s: No device node associated with device !\n",
1120 * In partial hotplug case, the PCI device might be still
1121 * associated with the PE and needn't attach it to the PE
1124 if (pdn->pe_number != IODA_INVALID_PE)
1128 pdn->pe_number = pe->pe_number;
1129 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1130 pnv_ioda_setup_same_PE(dev->subordinate, pe);
1135 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
1136 * single PCI bus. Another one that contains the primary PCI bus and its
1137 * subordinate PCI devices and buses. The second type of PE is normally
1138 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
1140 static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
1142 struct pci_controller *hose = pci_bus_to_host(bus);
1143 struct pnv_phb *phb = hose->private_data;
1144 struct pnv_ioda_pe *pe = NULL;
1145 unsigned int pe_num;
1148 * In partial hotplug case, the PE instance might be still alive.
1149 * We should reuse it instead of allocating a new one.
1151 pe_num = phb->ioda.pe_rmap[bus->number << 8];
1152 if (pe_num != IODA_INVALID_PE) {
1153 pe = &phb->ioda.pe_array[pe_num];
1154 pnv_ioda_setup_same_PE(bus, pe);
1158 /* PE number for root bus should have been reserved */
1159 if (pci_is_root_bus(bus) &&
1160 phb->ioda.root_pe_idx != IODA_INVALID_PE)
1161 pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
1163 /* Check if PE is determined by M64 */
1164 if (!pe && phb->pick_m64_pe)
1165 pe = phb->pick_m64_pe(bus, all);
1167 /* The PE number isn't pinned by M64 */
1169 pe = pnv_ioda_alloc_pe(phb);
1172 pr_warn("%s: Not enough PE# available for PCI bus %04x:%02x\n",
1173 __func__, pci_domain_nr(bus), bus->number);
1177 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
1180 pe->mve_number = -1;
1181 pe->rid = bus->busn_res.start << 8;
1184 pe_info(pe, "Secondary bus %d..%d associated with PE#%x\n",
1185 bus->busn_res.start, bus->busn_res.end, pe->pe_number);
1187 pe_info(pe, "Secondary bus %d associated with PE#%x\n",
1188 bus->busn_res.start, pe->pe_number);
1190 if (pnv_ioda_configure_pe(phb, pe)) {
1191 /* XXX What do we do here ? */
1192 pnv_ioda_free_pe(pe);
1197 /* Associate it with all child devices */
1198 pnv_ioda_setup_same_PE(bus, pe);
1200 /* Put PE to the list */
1201 list_add_tail(&pe->list, &phb->ioda.pe_list);
1206 static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
1208 int pe_num, found_pe = false, rc;
1210 struct pnv_ioda_pe *pe;
1211 struct pci_dev *gpu_pdev;
1212 struct pci_dn *npu_pdn;
1213 struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
1214 struct pnv_phb *phb = hose->private_data;
1217 * Due to a hardware errata PE#0 on the NPU is reserved for
1218 * error handling. This means we only have three PEs remaining
1219 * which need to be assigned to four links, implying some
1220 * links must share PEs.
1222 * To achieve this we assign PEs such that NPUs linking the
1223 * same GPU get assigned the same PE.
1225 gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
1226 for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
1227 pe = &phb->ioda.pe_array[pe_num];
1231 if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
1233 * This device has the same peer GPU so should
1234 * be assigned the same PE as the existing
1237 dev_info(&npu_pdev->dev,
1238 "Associating to existing PE %x\n", pe_num);
1239 pci_dev_get(npu_pdev);
1240 npu_pdn = pci_get_pdn(npu_pdev);
1241 rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
1242 npu_pdn->pe_number = pe_num;
1243 phb->ioda.pe_rmap[rid] = pe->pe_number;
1245 /* Map the PE to this link */
1246 rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
1248 OPAL_COMPARE_RID_DEVICE_NUMBER,
1249 OPAL_COMPARE_RID_FUNCTION_NUMBER,
1251 WARN_ON(rc != OPAL_SUCCESS);
1259 * Could not find an existing PE so allocate a new
1262 return pnv_ioda_setup_dev_PE(npu_pdev);
1267 static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
1269 struct pci_dev *pdev;
1271 list_for_each_entry(pdev, &bus->devices, bus_list)
1272 pnv_ioda_setup_npu_PE(pdev);
1275 static void pnv_pci_ioda_setup_PEs(void)
1277 struct pci_controller *hose, *tmp;
1278 struct pnv_phb *phb;
1279 struct pci_bus *bus;
1280 struct pci_dev *pdev;
1282 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1283 phb = hose->private_data;
1284 if (phb->type == PNV_PHB_NPU_NVLINK) {
1285 /* PE#0 is needed for error reporting */
1286 pnv_ioda_reserve_pe(phb, 0);
1287 pnv_ioda_setup_npu_PEs(hose->bus);
1288 if (phb->model == PNV_PHB_MODEL_NPU2)
1291 if (phb->type == PNV_PHB_NPU_OCAPI) {
1293 list_for_each_entry(pdev, &bus->devices, bus_list)
1294 pnv_ioda_setup_dev_PE(pdev);
1299 #ifdef CONFIG_PCI_IOV
1300 static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)
1302 struct pci_bus *bus;
1303 struct pci_controller *hose;
1304 struct pnv_phb *phb;
1310 hose = pci_bus_to_host(bus);
1311 phb = hose->private_data;
1312 pdn = pci_get_pdn(pdev);
1314 if (pdn->m64_single_mode)
1319 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
1320 for (j = 0; j < m64_bars; j++) {
1321 if (pdn->m64_map[j][i] == IODA_INVALID_M64)
1323 opal_pci_phb_mmio_enable(phb->opal_id,
1324 OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 0);
1325 clear_bit(pdn->m64_map[j][i], &phb->ioda.m64_bar_alloc);
1326 pdn->m64_map[j][i] = IODA_INVALID_M64;
1329 kfree(pdn->m64_map);
1333 static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
1335 struct pci_bus *bus;
1336 struct pci_controller *hose;
1337 struct pnv_phb *phb;
1340 struct resource *res;
1344 resource_size_t size, start;
1349 hose = pci_bus_to_host(bus);
1350 phb = hose->private_data;
1351 pdn = pci_get_pdn(pdev);
1352 total_vfs = pci_sriov_get_totalvfs(pdev);
1354 if (pdn->m64_single_mode)
1359 pdn->m64_map = kmalloc_array(m64_bars,
1360 sizeof(*pdn->m64_map),
1364 /* Initialize the m64_map to IODA_INVALID_M64 */
1365 for (i = 0; i < m64_bars ; i++)
1366 for (j = 0; j < PCI_SRIOV_NUM_BARS; j++)
1367 pdn->m64_map[i][j] = IODA_INVALID_M64;
1370 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1371 res = &pdev->resource[i + PCI_IOV_RESOURCES];
1372 if (!res->flags || !res->parent)
1375 for (j = 0; j < m64_bars; j++) {
1377 win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
1378 phb->ioda.m64_bar_idx + 1, 0);
1380 if (win >= phb->ioda.m64_bar_idx + 1)
1382 } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
1384 pdn->m64_map[j][i] = win;
1386 if (pdn->m64_single_mode) {
1387 size = pci_iov_resource_size(pdev,
1388 PCI_IOV_RESOURCES + i);
1389 start = res->start + size * j;
1391 size = resource_size(res);
1395 /* Map the M64 here */
1396 if (pdn->m64_single_mode) {
1397 pe_num = pdn->pe_num_map[j];
1398 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1399 pe_num, OPAL_M64_WINDOW_TYPE,
1400 pdn->m64_map[j][i], 0);
1403 rc = opal_pci_set_phb_mem_window(phb->opal_id,
1404 OPAL_M64_WINDOW_TYPE,
1411 if (rc != OPAL_SUCCESS) {
1412 dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
1417 if (pdn->m64_single_mode)
1418 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1419 OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 2);
1421 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1422 OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 1);
1424 if (rc != OPAL_SUCCESS) {
1425 dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
1434 pnv_pci_vf_release_m64(pdev, num_vfs);
1438 static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
1441 static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
1443 struct iommu_table *tbl;
1446 tbl = pe->table_group.tables[0];
1447 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
1449 pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
1451 pnv_pci_ioda2_set_bypass(pe, false);
1452 if (pe->table_group.group) {
1453 iommu_group_put(pe->table_group.group);
1454 BUG_ON(pe->table_group.group);
1456 iommu_tce_table_put(tbl);
1459 static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
1461 struct pci_bus *bus;
1462 struct pci_controller *hose;
1463 struct pnv_phb *phb;
1464 struct pnv_ioda_pe *pe, *pe_n;
1468 hose = pci_bus_to_host(bus);
1469 phb = hose->private_data;
1470 pdn = pci_get_pdn(pdev);
1472 if (!pdev->is_physfn)
1475 list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
1476 if (pe->parent_dev != pdev)
1479 pnv_pci_ioda2_release_dma_pe(pdev, pe);
1481 /* Remove from list */
1482 mutex_lock(&phb->ioda.pe_list_mutex);
1483 list_del(&pe->list);
1484 mutex_unlock(&phb->ioda.pe_list_mutex);
1486 pnv_ioda_deconfigure_pe(phb, pe);
1488 pnv_ioda_free_pe(pe);
1492 void pnv_pci_sriov_disable(struct pci_dev *pdev)
1494 struct pci_bus *bus;
1495 struct pci_controller *hose;
1496 struct pnv_phb *phb;
1497 struct pnv_ioda_pe *pe;
1502 hose = pci_bus_to_host(bus);
1503 phb = hose->private_data;
1504 pdn = pci_get_pdn(pdev);
1505 num_vfs = pdn->num_vfs;
1507 /* Release VF PEs */
1508 pnv_ioda_release_vf_PE(pdev);
1510 if (phb->type == PNV_PHB_IODA2) {
1511 if (!pdn->m64_single_mode)
1512 pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map);
1514 /* Release M64 windows */
1515 pnv_pci_vf_release_m64(pdev, num_vfs);
1517 /* Release PE numbers */
1518 if (pdn->m64_single_mode) {
1519 for (i = 0; i < num_vfs; i++) {
1520 if (pdn->pe_num_map[i] == IODA_INVALID_PE)
1523 pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
1524 pnv_ioda_free_pe(pe);
1527 bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1528 /* Releasing pe_num_map */
1529 kfree(pdn->pe_num_map);
1533 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1534 struct pnv_ioda_pe *pe);
1535 static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1537 struct pci_bus *bus;
1538 struct pci_controller *hose;
1539 struct pnv_phb *phb;
1540 struct pnv_ioda_pe *pe;
1546 hose = pci_bus_to_host(bus);
1547 phb = hose->private_data;
1548 pdn = pci_get_pdn(pdev);
1550 if (!pdev->is_physfn)
1553 /* Reserve PE for each VF */
1554 for (vf_index = 0; vf_index < num_vfs; vf_index++) {
1555 if (pdn->m64_single_mode)
1556 pe_num = pdn->pe_num_map[vf_index];
1558 pe_num = *pdn->pe_num_map + vf_index;
1560 pe = &phb->ioda.pe_array[pe_num];
1561 pe->pe_number = pe_num;
1563 pe->flags = PNV_IODA_PE_VF;
1565 pe->parent_dev = pdev;
1566 pe->mve_number = -1;
1567 pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
1568 pci_iov_virtfn_devfn(pdev, vf_index);
1570 pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
1571 hose->global_number, pdev->bus->number,
1572 PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
1573 PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
1575 if (pnv_ioda_configure_pe(phb, pe)) {
1576 /* XXX What do we do here ? */
1577 pnv_ioda_free_pe(pe);
1582 /* Put PE to the list */
1583 mutex_lock(&phb->ioda.pe_list_mutex);
1584 list_add_tail(&pe->list, &phb->ioda.pe_list);
1585 mutex_unlock(&phb->ioda.pe_list_mutex);
1587 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1591 int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1593 struct pci_bus *bus;
1594 struct pci_controller *hose;
1595 struct pnv_phb *phb;
1596 struct pnv_ioda_pe *pe;
1602 hose = pci_bus_to_host(bus);
1603 phb = hose->private_data;
1604 pdn = pci_get_pdn(pdev);
1606 if (phb->type == PNV_PHB_IODA2) {
1607 if (!pdn->vfs_expanded) {
1608 dev_info(&pdev->dev, "don't support this SRIOV device"
1609 " with non 64bit-prefetchable IOV BAR\n");
1614 * When M64 BARs functions in Single PE mode, the number of VFs
1615 * could be enabled must be less than the number of M64 BARs.
1617 if (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {
1618 dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n");
1622 /* Allocating pe_num_map */
1623 if (pdn->m64_single_mode)
1624 pdn->pe_num_map = kmalloc_array(num_vfs,
1625 sizeof(*pdn->pe_num_map),
1628 pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL);
1630 if (!pdn->pe_num_map)
1633 if (pdn->m64_single_mode)
1634 for (i = 0; i < num_vfs; i++)
1635 pdn->pe_num_map[i] = IODA_INVALID_PE;
1637 /* Calculate available PE for required VFs */
1638 if (pdn->m64_single_mode) {
1639 for (i = 0; i < num_vfs; i++) {
1640 pe = pnv_ioda_alloc_pe(phb);
1646 pdn->pe_num_map[i] = pe->pe_number;
1649 mutex_lock(&phb->ioda.pe_alloc_mutex);
1650 *pdn->pe_num_map = bitmap_find_next_zero_area(
1651 phb->ioda.pe_alloc, phb->ioda.total_pe_num,
1653 if (*pdn->pe_num_map >= phb->ioda.total_pe_num) {
1654 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1655 dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
1656 kfree(pdn->pe_num_map);
1659 bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1660 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1662 pdn->num_vfs = num_vfs;
1664 /* Assign M64 window accordingly */
1665 ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
1667 dev_info(&pdev->dev, "Not enough M64 window resources\n");
1672 * When using one M64 BAR to map one IOV BAR, we need to shift
1673 * the IOV BAR according to the PE# allocated to the VFs.
1674 * Otherwise, the PE# for the VF will conflict with others.
1676 if (!pdn->m64_single_mode) {
1677 ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map);
1684 pnv_ioda_setup_vf_PE(pdev, num_vfs);
1689 if (pdn->m64_single_mode) {
1690 for (i = 0; i < num_vfs; i++) {
1691 if (pdn->pe_num_map[i] == IODA_INVALID_PE)
1694 pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
1695 pnv_ioda_free_pe(pe);
1698 bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1700 /* Releasing pe_num_map */
1701 kfree(pdn->pe_num_map);
1706 int pnv_pcibios_sriov_disable(struct pci_dev *pdev)
1708 pnv_pci_sriov_disable(pdev);
1710 /* Release PCI data */
1711 remove_dev_pci_data(pdev);
1715 int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1717 /* Allocate PCI data */
1718 add_dev_pci_data(pdev);
1720 return pnv_pci_sriov_enable(pdev, num_vfs);
1722 #endif /* CONFIG_PCI_IOV */
1724 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
1726 struct pci_dn *pdn = pci_get_pdn(pdev);
1727 struct pnv_ioda_pe *pe;
1730 * The function can be called while the PE#
1731 * hasn't been assigned. Do nothing for the
1734 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1737 pe = &phb->ioda.pe_array[pdn->pe_number];
1738 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
1739 set_dma_offset(&pdev->dev, pe->tce_bypass_base);
1740 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
1742 * Note: iommu_add_device() will fail here as
1743 * for physical PE: the device is already added by now;
1744 * for virtual PE: sysfs entries are not ready yet and
1745 * tce_iommu_bus_notifier will add the device to a group later.
1749 static bool pnv_pci_ioda_pe_single_vendor(struct pnv_ioda_pe *pe)
1751 unsigned short vendor = 0;
1752 struct pci_dev *pdev;
1754 if (pe->device_count == 1)
1757 /* pe->pdev should be set if it's a single device, pe->pbus if not */
1761 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
1763 vendor = pdev->vendor;
1767 if (pdev->vendor != vendor)
1775 * Reconfigure TVE#0 to be usable as 64-bit DMA space.
1777 * The first 4GB of virtual memory for a PE is reserved for 32-bit accesses.
1778 * Devices can only access more than that if bit 59 of the PCI address is set
1779 * by hardware, which indicates TVE#1 should be used instead of TVE#0.
1780 * Many PCI devices are not capable of addressing that many bits, and as a
1781 * result are limited to the 4GB of virtual memory made available to 32-bit
1784 * In order to work around this, reconfigure TVE#0 to be suitable for 64-bit
1785 * devices by configuring the virtual memory past the first 4GB inaccessible
1786 * by 64-bit DMAs. This should only be used by devices that want more than
1787 * 4GB, and only on PEs that have no 32-bit devices.
1789 * Currently this will only work on PHB3 (POWER8).
1791 static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe)
1793 u64 window_size, table_size, tce_count, addr;
1794 struct page *table_pages;
1795 u64 tce_order = 28; /* 256MB TCEs */
1800 * Window size needs to be a power of two, but needs to account for
1801 * shifting memory by the 4GB offset required to skip 32bit space.
1803 window_size = roundup_pow_of_two(memory_hotplug_max() + (1ULL << 32));
1804 tce_count = window_size >> tce_order;
1805 table_size = tce_count << 3;
1807 if (table_size < PAGE_SIZE)
1808 table_size = PAGE_SIZE;
1810 table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL,
1811 get_order(table_size));
1815 tces = page_address(table_pages);
1819 memset(tces, 0, table_size);
1821 for (addr = 0; addr < memory_hotplug_max(); addr += (1 << tce_order)) {
1822 tces[(addr + (1ULL << 32)) >> tce_order] =
1823 cpu_to_be64(addr | TCE_PCI_READ | TCE_PCI_WRITE);
1826 rc = opal_pci_map_pe_dma_window(pe->phb->opal_id,
1828 /* reconfigure window 0 */
1829 (pe->pe_number << 1) + 0,
1834 if (rc == OPAL_SUCCESS) {
1835 pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n");
1839 pe_err(pe, "Error configuring 64-bit DMA bypass\n");
1843 static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
1845 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1846 struct pnv_phb *phb = hose->private_data;
1847 struct pci_dn *pdn = pci_get_pdn(pdev);
1848 struct pnv_ioda_pe *pe;
1850 bool bypass = false;
1853 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1856 pe = &phb->ioda.pe_array[pdn->pe_number];
1857 if (pe->tce_bypass_enabled) {
1858 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1859 bypass = (dma_mask >= top);
1863 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
1864 set_dma_ops(&pdev->dev, &dma_nommu_ops);
1867 * If the device can't set the TCE bypass bit but still wants
1868 * to access 4GB or more, on PHB3 we can reconfigure TVE#0 to
1869 * bypass the 32-bit region and be usable for 64-bit DMAs.
1870 * The device needs to be able to address all of this space.
1872 if (dma_mask >> 32 &&
1873 dma_mask > (memory_hotplug_max() + (1ULL << 32)) &&
1874 pnv_pci_ioda_pe_single_vendor(pe) &&
1875 phb->model == PNV_PHB_MODEL_PHB3) {
1876 /* Configure the bypass mode */
1877 rc = pnv_pci_ioda_dma_64bit_bypass(pe);
1880 /* 4GB offset bypasses 32-bit space */
1881 set_dma_offset(&pdev->dev, (1ULL << 32));
1882 set_dma_ops(&pdev->dev, &dma_nommu_ops);
1883 } else if (dma_mask >> 32 && dma_mask != DMA_BIT_MASK(64)) {
1885 * Fail the request if a DMA mask between 32 and 64 bits
1886 * was requested but couldn't be fulfilled. Ideally we
1887 * would do this for 64-bits but historically we have
1888 * always fallen back to 32-bits.
1892 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
1893 set_dma_ops(&pdev->dev, &dma_iommu_ops);
1896 *pdev->dev.dma_mask = dma_mask;
1898 /* Update peer npu devices */
1899 pnv_npu_try_dma_set_bypass(pdev, bypass);
1904 static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
1906 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1907 struct pnv_phb *phb = hose->private_data;
1908 struct pci_dn *pdn = pci_get_pdn(pdev);
1909 struct pnv_ioda_pe *pe;
1912 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1915 pe = &phb->ioda.pe_array[pdn->pe_number];
1916 if (!pe->tce_bypass_enabled)
1917 return __dma_get_required_mask(&pdev->dev);
1920 end = pe->tce_bypass_base + memblock_end_of_DRAM();
1921 mask = 1ULL << (fls64(end) - 1);
1927 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
1928 struct pci_bus *bus,
1931 struct pci_dev *dev;
1933 list_for_each_entry(dev, &bus->devices, bus_list) {
1934 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
1935 set_dma_offset(&dev->dev, pe->tce_bypass_base);
1937 iommu_add_device(&dev->dev);
1939 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1940 pnv_ioda_setup_bus_dma(pe, dev->subordinate,
1945 static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
1948 return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
1949 (phb->regs + 0x210);
1952 static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
1953 unsigned long index, unsigned long npages, bool rm)
1955 struct iommu_table_group_link *tgl = list_first_entry_or_null(
1956 &tbl->it_group_list, struct iommu_table_group_link,
1958 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1959 struct pnv_ioda_pe, table_group);
1960 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
1961 unsigned long start, end, inc;
1963 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
1964 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
1967 /* p7ioc-style invalidation, 2 TCEs per write */
1968 start |= (1ull << 63);
1969 end |= (1ull << 63);
1971 end |= inc - 1; /* round up end to be different than start */
1973 mb(); /* Ensure above stores are visible */
1974 while (start <= end) {
1976 __raw_rm_writeq_be(start, invalidate);
1978 __raw_writeq_be(start, invalidate);
1984 * The iommu layer will do another mb() for us on build()
1985 * and we don't care on free()
1989 static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
1990 long npages, unsigned long uaddr,
1991 enum dma_data_direction direction,
1992 unsigned long attrs)
1994 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1998 pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
2003 #ifdef CONFIG_IOMMU_API
2004 static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
2005 unsigned long *hpa, enum dma_data_direction *direction)
2007 long ret = pnv_tce_xchg(tbl, index, hpa, direction, true);
2010 pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, false);
2015 static int pnv_ioda1_tce_xchg_rm(struct iommu_table *tbl, long index,
2016 unsigned long *hpa, enum dma_data_direction *direction)
2018 long ret = pnv_tce_xchg(tbl, index, hpa, direction, false);
2021 pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, true);
2027 static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
2030 pnv_tce_free(tbl, index, npages);
2032 pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
2035 static struct iommu_table_ops pnv_ioda1_iommu_ops = {
2036 .set = pnv_ioda1_tce_build,
2037 #ifdef CONFIG_IOMMU_API
2038 .exchange = pnv_ioda1_tce_xchg,
2039 .exchange_rm = pnv_ioda1_tce_xchg_rm,
2040 .useraddrptr = pnv_tce_useraddrptr,
2042 .clear = pnv_ioda1_tce_free,
2046 #define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0)
2047 #define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
2048 #define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
2050 static void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
2052 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
2053 const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
2055 mb(); /* Ensure previous TCE table stores are visible */
2057 __raw_rm_writeq_be(val, invalidate);
2059 __raw_writeq_be(val, invalidate);
2062 static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
2064 /* 01xb - invalidate TCEs that match the specified PE# */
2065 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
2066 unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
2068 mb(); /* Ensure above stores are visible */
2069 __raw_writeq_be(val, invalidate);
2072 static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
2073 unsigned shift, unsigned long index,
2074 unsigned long npages)
2076 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
2077 unsigned long start, end, inc;
2079 /* We'll invalidate DMA address in PE scope */
2080 start = PHB3_TCE_KILL_INVAL_ONE;
2081 start |= (pe->pe_number & 0xFF);
2084 /* Figure out the start, end and step */
2085 start |= (index << shift);
2086 end |= ((index + npages - 1) << shift);
2087 inc = (0x1ull << shift);
2090 while (start <= end) {
2092 __raw_rm_writeq_be(start, invalidate);
2094 __raw_writeq_be(start, invalidate);
2099 static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
2101 struct pnv_phb *phb = pe->phb;
2103 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
2104 pnv_pci_phb3_tce_invalidate_pe(pe);
2106 opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE,
2107 pe->pe_number, 0, 0, 0);
2110 static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
2111 unsigned long index, unsigned long npages, bool rm)
2113 struct iommu_table_group_link *tgl;
2115 list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) {
2116 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
2117 struct pnv_ioda_pe, table_group);
2118 struct pnv_phb *phb = pe->phb;
2119 unsigned int shift = tbl->it_page_shift;
2122 * NVLink1 can use the TCE kill register directly as
2123 * it's the same as PHB3. NVLink2 is different and
2124 * should go via the OPAL call.
2126 if (phb->model == PNV_PHB_MODEL_NPU) {
2128 * The NVLink hardware does not support TCE kill
2129 * per TCE entry so we have to invalidate
2130 * the entire cache for it.
2132 pnv_pci_phb3_tce_invalidate_entire(phb, rm);
2135 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
2136 pnv_pci_phb3_tce_invalidate(pe, rm, shift,
2139 opal_pci_tce_kill(phb->opal_id,
2140 OPAL_PCI_TCE_KILL_PAGES,
2141 pe->pe_number, 1u << shift,
2142 index << shift, npages);
2146 void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
2148 if (phb->model == PNV_PHB_MODEL_NPU || phb->model == PNV_PHB_MODEL_PHB3)
2149 pnv_pci_phb3_tce_invalidate_entire(phb, rm);
2151 opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL, 0, 0, 0, 0);
2154 static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
2155 long npages, unsigned long uaddr,
2156 enum dma_data_direction direction,
2157 unsigned long attrs)
2159 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
2163 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
2168 #ifdef CONFIG_IOMMU_API
2169 static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
2170 unsigned long *hpa, enum dma_data_direction *direction)
2172 long ret = pnv_tce_xchg(tbl, index, hpa, direction, true);
2175 pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
2180 static int pnv_ioda2_tce_xchg_rm(struct iommu_table *tbl, long index,
2181 unsigned long *hpa, enum dma_data_direction *direction)
2183 long ret = pnv_tce_xchg(tbl, index, hpa, direction, false);
2186 pnv_pci_ioda2_tce_invalidate(tbl, index, 1, true);
2192 static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
2195 pnv_tce_free(tbl, index, npages);
2197 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
2200 static struct iommu_table_ops pnv_ioda2_iommu_ops = {
2201 .set = pnv_ioda2_tce_build,
2202 #ifdef CONFIG_IOMMU_API
2203 .exchange = pnv_ioda2_tce_xchg,
2204 .exchange_rm = pnv_ioda2_tce_xchg_rm,
2205 .useraddrptr = pnv_tce_useraddrptr,
2207 .clear = pnv_ioda2_tce_free,
2209 .free = pnv_pci_ioda2_table_free_pages,
2212 static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data)
2214 unsigned int *weight = (unsigned int *)data;
2216 /* This is quite simplistic. The "base" weight of a device
2217 * is 10. 0 means no DMA is to be accounted for it.
2219 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
2222 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
2223 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
2224 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
2226 else if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
2234 static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe)
2236 unsigned int weight = 0;
2238 /* SRIOV VF has same DMA32 weight as its PF */
2239 #ifdef CONFIG_PCI_IOV
2240 if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) {
2241 pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight);
2246 if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) {
2247 pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight);
2248 } else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) {
2249 struct pci_dev *pdev;
2251 list_for_each_entry(pdev, &pe->pbus->devices, bus_list)
2252 pnv_pci_ioda_dev_dma_weight(pdev, &weight);
2253 } else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) {
2254 pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight);
2260 static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
2261 struct pnv_ioda_pe *pe)
2264 struct page *tce_mem = NULL;
2265 struct iommu_table *tbl;
2266 unsigned int weight, total_weight = 0;
2267 unsigned int tce32_segsz, base, segs, avail, i;
2271 /* XXX FIXME: Handle 64-bit only DMA devices */
2272 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
2273 /* XXX FIXME: Allocate multi-level tables on PHB3 */
2274 weight = pnv_pci_ioda_pe_dma_weight(pe);
2278 pci_walk_bus(phb->hose->bus, pnv_pci_ioda_dev_dma_weight,
2280 segs = (weight * phb->ioda.dma32_count) / total_weight;
2285 * Allocate contiguous DMA32 segments. We begin with the expected
2286 * number of segments. With one more attempt, the number of DMA32
2287 * segments to be allocated is decreased by one until one segment
2288 * is allocated successfully.
2291 for (base = 0; base <= phb->ioda.dma32_count - segs; base++) {
2292 for (avail = 0, i = base; i < base + segs; i++) {
2293 if (phb->ioda.dma32_segmap[i] ==
2304 pe_warn(pe, "No available DMA32 segments\n");
2309 tbl = pnv_pci_table_alloc(phb->hose->node);
2313 iommu_register_group(&pe->table_group, phb->hose->global_number,
2315 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
2317 /* Grab a 32-bit TCE table */
2318 pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n",
2319 weight, total_weight, base, segs);
2320 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
2321 base * PNV_IODA1_DMA32_SEGSIZE,
2322 (base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1);
2324 /* XXX Currently, we allocate one big contiguous table for the
2325 * TCEs. We only really need one chunk per 256M of TCE space
2326 * (ie per segment) but that's an optimization for later, it
2327 * requires some added smarts with our get/put_tce implementation
2329 * Each TCE page is 4KB in size and each TCE entry occupies 8
2332 tce32_segsz = PNV_IODA1_DMA32_SEGSIZE >> (IOMMU_PAGE_SHIFT_4K - 3);
2333 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
2334 get_order(tce32_segsz * segs));
2336 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
2339 addr = page_address(tce_mem);
2340 memset(addr, 0, tce32_segsz * segs);
2343 for (i = 0; i < segs; i++) {
2344 rc = opal_pci_map_pe_dma_window(phb->opal_id,
2347 __pa(addr) + tce32_segsz * i,
2348 tce32_segsz, IOMMU_PAGE_SIZE_4K);
2350 pe_err(pe, " Failed to configure 32-bit TCE table,"
2356 /* Setup DMA32 segment mapping */
2357 for (i = base; i < base + segs; i++)
2358 phb->ioda.dma32_segmap[i] = pe->pe_number;
2360 /* Setup linux iommu table */
2361 pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs,
2362 base * PNV_IODA1_DMA32_SEGSIZE,
2363 IOMMU_PAGE_SHIFT_4K);
2365 tbl->it_ops = &pnv_ioda1_iommu_ops;
2366 pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
2367 pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
2368 iommu_init_table(tbl, phb->hose->node);
2370 if (pe->flags & PNV_IODA_PE_DEV) {
2372 * Setting table base here only for carrying iommu_group
2373 * further down to let iommu_add_device() do the job.
2374 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2376 set_iommu_table_base(&pe->pdev->dev, tbl);
2377 iommu_add_device(&pe->pdev->dev);
2378 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2379 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
2383 /* XXX Failure: Try to fallback to 64-bit only ? */
2385 __free_pages(tce_mem, get_order(tce32_segsz * segs));
2387 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
2388 iommu_tce_table_put(tbl);
2392 static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
2393 int num, struct iommu_table *tbl)
2395 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2397 struct pnv_phb *phb = pe->phb;
2399 const unsigned long size = tbl->it_indirect_levels ?
2400 tbl->it_level_size : tbl->it_size;
2401 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
2402 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
2404 pe_info(pe, "Setting up window#%d %llx..%llx pg=%x\n", num,
2405 start_addr, start_addr + win_size - 1,
2406 IOMMU_PAGE_SIZE(tbl));
2409 * Map TCE table through TVT. The TVE index is the PE number
2410 * shifted by 1 bit for 32-bits DMA space.
2412 rc = opal_pci_map_pe_dma_window(phb->opal_id,
2414 (pe->pe_number << 1) + num,
2415 tbl->it_indirect_levels + 1,
2418 IOMMU_PAGE_SIZE(tbl));
2420 pe_err(pe, "Failed to configure TCE table, err %ld\n", rc);
2424 pnv_pci_link_table_and_group(phb->hose->node, num,
2425 tbl, &pe->table_group);
2426 pnv_pci_ioda2_tce_invalidate_pe(pe);
2431 void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
2433 uint16_t window_id = (pe->pe_number << 1 ) + 1;
2436 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
2438 phys_addr_t top = memblock_end_of_DRAM();
2440 top = roundup_pow_of_two(top);
2441 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2444 pe->tce_bypass_base,
2447 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2450 pe->tce_bypass_base,
2454 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
2456 pe->tce_bypass_enabled = enable;
2459 static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
2460 int num, __u32 page_shift, __u64 window_size, __u32 levels,
2461 bool alloc_userspace_copy, struct iommu_table **ptbl)
2463 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2465 int nid = pe->phb->hose->node;
2466 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start;
2468 struct iommu_table *tbl;
2470 tbl = pnv_pci_table_alloc(nid);
2474 tbl->it_ops = &pnv_ioda2_iommu_ops;
2476 ret = pnv_pci_ioda2_table_alloc_pages(nid,
2477 bus_offset, page_shift, window_size,
2478 levels, alloc_userspace_copy, tbl);
2480 iommu_tce_table_put(tbl);
2489 static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
2491 struct iommu_table *tbl = NULL;
2495 * crashkernel= specifies the kdump kernel's maximum memory at
2496 * some offset and there is no guaranteed the result is a power
2497 * of 2, which will cause errors later.
2499 const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
2502 * In memory constrained environments, e.g. kdump kernel, the
2503 * DMA window can be larger than available memory, which will
2504 * cause errors later.
2506 const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory);
2508 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
2509 IOMMU_PAGE_SHIFT_4K,
2511 POWERNV_IOMMU_DEFAULT_LEVELS, false, &tbl);
2513 pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
2518 iommu_init_table(tbl, pe->phb->hose->node);
2520 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
2522 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
2524 iommu_tce_table_put(tbl);
2528 if (!pnv_iommu_bypass_disabled)
2529 pnv_pci_ioda2_set_bypass(pe, true);
2532 * Setting table base here only for carrying iommu_group
2533 * further down to let iommu_add_device() do the job.
2534 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2536 if (pe->flags & PNV_IODA_PE_DEV)
2537 set_iommu_table_base(&pe->pdev->dev, tbl);
2542 #if defined(CONFIG_IOMMU_API) || defined(CONFIG_PCI_IOV)
2543 static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
2546 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2548 struct pnv_phb *phb = pe->phb;
2551 pe_info(pe, "Removing DMA window #%d\n", num);
2553 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
2554 (pe->pe_number << 1) + num,
2555 0/* levels */, 0/* table address */,
2556 0/* table size */, 0/* page size */);
2558 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
2560 pnv_pci_ioda2_tce_invalidate_pe(pe);
2562 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
2568 #ifdef CONFIG_IOMMU_API
2569 static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
2570 __u64 window_size, __u32 levels)
2572 unsigned long bytes = 0;
2573 const unsigned window_shift = ilog2(window_size);
2574 unsigned entries_shift = window_shift - page_shift;
2575 unsigned table_shift = entries_shift + 3;
2576 unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift);
2577 unsigned long direct_table_size;
2579 if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) ||
2580 !is_power_of_2(window_size))
2583 /* Calculate a direct table size from window_size and levels */
2584 entries_shift = (entries_shift + levels - 1) / levels;
2585 table_shift = entries_shift + 3;
2586 table_shift = max_t(unsigned, table_shift, PAGE_SHIFT);
2587 direct_table_size = 1UL << table_shift;
2589 for ( ; levels; --levels) {
2590 bytes += _ALIGN_UP(tce_table_size, direct_table_size);
2592 tce_table_size /= direct_table_size;
2593 tce_table_size <<= 3;
2594 tce_table_size = max_t(unsigned long,
2595 tce_table_size, direct_table_size);
2598 return bytes + bytes; /* one for HW table, one for userspace copy */
2601 static long pnv_pci_ioda2_create_table_userspace(
2602 struct iommu_table_group *table_group,
2603 int num, __u32 page_shift, __u64 window_size, __u32 levels,
2604 struct iommu_table **ptbl)
2606 return pnv_pci_ioda2_create_table(table_group,
2607 num, page_shift, window_size, levels, true, ptbl);
2610 static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
2612 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2614 /* Store @tbl as pnv_pci_ioda2_unset_window() resets it */
2615 struct iommu_table *tbl = pe->table_group.tables[0];
2617 pnv_pci_ioda2_set_bypass(pe, false);
2618 pnv_pci_ioda2_unset_window(&pe->table_group, 0);
2620 pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
2621 iommu_tce_table_put(tbl);
2624 static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
2626 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2629 pnv_pci_ioda2_setup_default_config(pe);
2631 pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
2634 static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
2635 .get_table_size = pnv_pci_ioda2_get_table_size,
2636 .create_table = pnv_pci_ioda2_create_table_userspace,
2637 .set_window = pnv_pci_ioda2_set_window,
2638 .unset_window = pnv_pci_ioda2_unset_window,
2639 .take_ownership = pnv_ioda2_take_ownership,
2640 .release_ownership = pnv_ioda2_release_ownership,
2643 static int gpe_table_group_to_npe_cb(struct device *dev, void *opaque)
2645 struct pci_controller *hose;
2646 struct pnv_phb *phb;
2647 struct pnv_ioda_pe **ptmppe = opaque;
2648 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
2649 struct pci_dn *pdn = pci_get_pdn(pdev);
2651 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
2654 hose = pci_bus_to_host(pdev->bus);
2655 phb = hose->private_data;
2656 if (phb->type != PNV_PHB_NPU_NVLINK)
2659 *ptmppe = &phb->ioda.pe_array[pdn->pe_number];
2665 * This returns PE of associated NPU.
2666 * This assumes that NPU is in the same IOMMU group with GPU and there is
2669 static struct pnv_ioda_pe *gpe_table_group_to_npe(
2670 struct iommu_table_group *table_group)
2672 struct pnv_ioda_pe *npe = NULL;
2673 int ret = iommu_group_for_each_dev(table_group->group, &npe,
2674 gpe_table_group_to_npe_cb);
2676 BUG_ON(!ret || !npe);
2681 static long pnv_pci_ioda2_npu_set_window(struct iommu_table_group *table_group,
2682 int num, struct iommu_table *tbl)
2684 struct pnv_ioda_pe *npe = gpe_table_group_to_npe(table_group);
2685 int num2 = (num == 0) ? 1 : 0;
2686 long ret = pnv_pci_ioda2_set_window(table_group, num, tbl);
2691 if (table_group->tables[num2])
2692 pnv_npu_unset_window(npe, num2);
2694 ret = pnv_npu_set_window(npe, num, tbl);
2696 pnv_pci_ioda2_unset_window(table_group, num);
2697 if (table_group->tables[num2])
2698 pnv_npu_set_window(npe, num2,
2699 table_group->tables[num2]);
2705 static long pnv_pci_ioda2_npu_unset_window(
2706 struct iommu_table_group *table_group,
2709 struct pnv_ioda_pe *npe = gpe_table_group_to_npe(table_group);
2710 int num2 = (num == 0) ? 1 : 0;
2711 long ret = pnv_pci_ioda2_unset_window(table_group, num);
2716 if (!npe->table_group.tables[num])
2719 ret = pnv_npu_unset_window(npe, num);
2723 if (table_group->tables[num2])
2724 ret = pnv_npu_set_window(npe, num2, table_group->tables[num2]);
2729 static void pnv_ioda2_npu_take_ownership(struct iommu_table_group *table_group)
2732 * Detach NPU first as pnv_ioda2_take_ownership() will destroy
2733 * the iommu_table if 32bit DMA is enabled.
2735 pnv_npu_take_ownership(gpe_table_group_to_npe(table_group));
2736 pnv_ioda2_take_ownership(table_group);
2739 static struct iommu_table_group_ops pnv_pci_ioda2_npu_ops = {
2740 .get_table_size = pnv_pci_ioda2_get_table_size,
2741 .create_table = pnv_pci_ioda2_create_table_userspace,
2742 .set_window = pnv_pci_ioda2_npu_set_window,
2743 .unset_window = pnv_pci_ioda2_npu_unset_window,
2744 .take_ownership = pnv_ioda2_npu_take_ownership,
2745 .release_ownership = pnv_ioda2_release_ownership,
2748 static void pnv_pci_ioda_setup_iommu_api(void)
2750 struct pci_controller *hose, *tmp;
2751 struct pnv_phb *phb;
2752 struct pnv_ioda_pe *pe, *gpe;
2755 * Now we have all PHBs discovered, time to add NPU devices to
2756 * the corresponding IOMMU groups.
2758 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2759 phb = hose->private_data;
2761 if (phb->type != PNV_PHB_NPU_NVLINK)
2764 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2765 gpe = pnv_pci_npu_setup_iommu(pe);
2767 gpe->table_group.ops = &pnv_pci_ioda2_npu_ops;
2771 #else /* !CONFIG_IOMMU_API */
2772 static void pnv_pci_ioda_setup_iommu_api(void) { };
2775 static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
2777 struct pci_controller *hose = phb->hose;
2778 struct device_node *dn = hose->dn;
2779 unsigned long mask = 0;
2783 count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes");
2785 mask = SZ_4K | SZ_64K;
2786 /* Add 16M for POWER8 by default */
2787 if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
2788 !cpu_has_feature(CPU_FTR_ARCH_300))
2789 mask |= SZ_16M | SZ_256M;
2793 for (i = 0; i < count; i++) {
2794 rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes",
2797 mask |= 1ULL << val;
2803 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
2804 struct pnv_ioda_pe *pe)
2808 if (!pnv_pci_ioda_pe_dma_weight(pe))
2811 /* TVE #1 is selected by PCI address bit 59 */
2812 pe->tce_bypass_base = 1ull << 59;
2814 iommu_register_group(&pe->table_group, phb->hose->global_number,
2817 /* The PE will reserve all possible 32-bits space */
2818 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
2819 phb->ioda.m32_pci_base);
2821 /* Setup linux iommu table */
2822 pe->table_group.tce32_start = 0;
2823 pe->table_group.tce32_size = phb->ioda.m32_pci_base;
2824 pe->table_group.max_dynamic_windows_supported =
2825 IOMMU_TABLE_GROUP_MAX_TABLES;
2826 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
2827 pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb);
2828 #ifdef CONFIG_IOMMU_API
2829 pe->table_group.ops = &pnv_pci_ioda2_ops;
2832 rc = pnv_pci_ioda2_setup_default_config(pe);
2836 if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2837 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
2840 #ifdef CONFIG_PCI_MSI
2841 int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq)
2843 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
2846 return opal_pci_msi_eoi(phb->opal_id, hw_irq);
2849 static void pnv_ioda2_msi_eoi(struct irq_data *d)
2852 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
2853 struct irq_chip *chip = irq_data_get_irq_chip(d);
2855 rc = pnv_opal_pci_msi_eoi(chip, hw_irq);
2862 void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
2864 struct irq_data *idata;
2865 struct irq_chip *ichip;
2867 /* The MSI EOI OPAL call is only needed on PHB3 */
2868 if (phb->model != PNV_PHB_MODEL_PHB3)
2871 if (!phb->ioda.irq_chip_init) {
2873 * First time we setup an MSI IRQ, we need to setup the
2874 * corresponding IRQ chip to route correctly.
2876 idata = irq_get_irq_data(virq);
2877 ichip = irq_data_get_irq_chip(idata);
2878 phb->ioda.irq_chip_init = 1;
2879 phb->ioda.irq_chip = *ichip;
2880 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
2882 irq_set_chip(virq, &phb->ioda.irq_chip);
2886 * Returns true iff chip is something that we could call
2887 * pnv_opal_pci_msi_eoi for.
2889 bool is_pnv_opal_msi(struct irq_chip *chip)
2891 return chip->irq_eoi == pnv_ioda2_msi_eoi;
2893 EXPORT_SYMBOL_GPL(is_pnv_opal_msi);
2895 static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
2896 unsigned int hwirq, unsigned int virq,
2897 unsigned int is_64, struct msi_msg *msg)
2899 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2900 unsigned int xive_num = hwirq - phb->msi_base;
2904 /* No PE assigned ? bail out ... no MSI for you ! */
2908 /* Check if we have an MVE */
2909 if (pe->mve_number < 0)
2912 /* Force 32-bit MSI on some broken devices */
2913 if (dev->no_64bit_msi)
2916 /* Assign XIVE to PE */
2917 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2919 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
2920 pci_name(dev), rc, xive_num);
2927 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2930 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
2934 msg->address_hi = be64_to_cpu(addr64) >> 32;
2935 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
2939 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2942 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
2946 msg->address_hi = 0;
2947 msg->address_lo = be32_to_cpu(addr32);
2949 msg->data = be32_to_cpu(data);
2951 pnv_set_msi_irq_chip(phb, virq);
2953 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
2954 " address=%x_%08x data=%x PE# %x\n",
2955 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
2956 msg->address_hi, msg->address_lo, data, pe->pe_number);
2961 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
2964 const __be32 *prop = of_get_property(phb->hose->dn,
2965 "ibm,opal-msi-ranges", NULL);
2968 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
2973 phb->msi_base = be32_to_cpup(prop);
2974 count = be32_to_cpup(prop + 1);
2975 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
2976 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
2977 phb->hose->global_number);
2981 phb->msi_setup = pnv_pci_ioda_msi_setup;
2982 phb->msi32_support = 1;
2983 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
2984 count, phb->msi_base);
2987 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
2988 #endif /* CONFIG_PCI_MSI */
2990 #ifdef CONFIG_PCI_IOV
2991 static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2993 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
2994 struct pnv_phb *phb = hose->private_data;
2995 const resource_size_t gate = phb->ioda.m64_segsize >> 2;
2996 struct resource *res;
2998 resource_size_t size, total_vf_bar_sz;
3002 if (!pdev->is_physfn || pci_dev_is_added(pdev))
3005 pdn = pci_get_pdn(pdev);
3006 pdn->vfs_expanded = 0;
3007 pdn->m64_single_mode = false;
3009 total_vfs = pci_sriov_get_totalvfs(pdev);
3010 mul = phb->ioda.total_pe_num;
3011 total_vf_bar_sz = 0;
3013 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
3014 res = &pdev->resource[i + PCI_IOV_RESOURCES];
3015 if (!res->flags || res->parent)
3017 if (!pnv_pci_is_m64_flags(res->flags)) {
3018 dev_warn(&pdev->dev, "Don't support SR-IOV with"
3019 " non M64 VF BAR%d: %pR. \n",
3024 total_vf_bar_sz += pci_iov_resource_size(pdev,
3025 i + PCI_IOV_RESOURCES);
3028 * If bigger than quarter of M64 segment size, just round up
3031 * Generally, one M64 BAR maps one IOV BAR. To avoid conflict
3032 * with other devices, IOV BAR size is expanded to be
3033 * (total_pe * VF_BAR_size). When VF_BAR_size is half of M64
3034 * segment size , the expanded size would equal to half of the
3035 * whole M64 space size, which will exhaust the M64 Space and
3036 * limit the system flexibility. This is a design decision to
3037 * set the boundary to quarter of the M64 segment size.
3039 if (total_vf_bar_sz > gate) {
3040 mul = roundup_pow_of_two(total_vfs);
3041 dev_info(&pdev->dev,
3042 "VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n",
3043 total_vf_bar_sz, gate, mul);
3044 pdn->m64_single_mode = true;
3049 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
3050 res = &pdev->resource[i + PCI_IOV_RESOURCES];
3051 if (!res->flags || res->parent)
3054 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
3056 * On PHB3, the minimum size alignment of M64 BAR in single
3059 if (pdn->m64_single_mode && (size < SZ_32M))
3061 dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
3062 res->end = res->start + size * mul - 1;
3063 dev_dbg(&pdev->dev, " %pR\n", res);
3064 dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
3067 pdn->vfs_expanded = mul;
3072 /* To save MMIO space, IOV BAR is truncated. */
3073 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
3074 res = &pdev->resource[i + PCI_IOV_RESOURCES];
3076 res->end = res->start - 1;
3079 #endif /* CONFIG_PCI_IOV */
3081 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
3082 struct resource *res)
3084 struct pnv_phb *phb = pe->phb;
3085 struct pci_bus_region region;
3089 if (!res || !res->flags || res->start > res->end)
3092 if (res->flags & IORESOURCE_IO) {
3093 region.start = res->start - phb->ioda.io_pci_base;
3094 region.end = res->end - phb->ioda.io_pci_base;
3095 index = region.start / phb->ioda.io_segsize;
3097 while (index < phb->ioda.total_pe_num &&
3098 region.start <= region.end) {
3099 phb->ioda.io_segmap[index] = pe->pe_number;
3100 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3101 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
3102 if (rc != OPAL_SUCCESS) {
3103 pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n",
3104 __func__, rc, index, pe->pe_number);
3108 region.start += phb->ioda.io_segsize;
3111 } else if ((res->flags & IORESOURCE_MEM) &&
3112 !pnv_pci_is_m64(phb, res)) {
3113 region.start = res->start -
3114 phb->hose->mem_offset[0] -
3115 phb->ioda.m32_pci_base;
3116 region.end = res->end -
3117 phb->hose->mem_offset[0] -
3118 phb->ioda.m32_pci_base;
3119 index = region.start / phb->ioda.m32_segsize;
3121 while (index < phb->ioda.total_pe_num &&
3122 region.start <= region.end) {
3123 phb->ioda.m32_segmap[index] = pe->pe_number;
3124 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3125 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
3126 if (rc != OPAL_SUCCESS) {
3127 pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x",
3128 __func__, rc, index, pe->pe_number);
3132 region.start += phb->ioda.m32_segsize;
3139 * This function is supposed to be called on basis of PE from top
3140 * to bottom style. So the the I/O or MMIO segment assigned to
3141 * parent PE could be overridden by its child PEs if necessary.
3143 static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
3145 struct pci_dev *pdev;
3149 * NOTE: We only care PCI bus based PE for now. For PCI
3150 * device based PE, for example SRIOV sensitive VF should
3151 * be figured out later.
3153 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
3155 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
3156 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
3157 pnv_ioda_setup_pe_res(pe, &pdev->resource[i]);
3160 * If the PE contains all subordinate PCI buses, the
3161 * windows of the child bridges should be mapped to
3164 if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev))
3166 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
3167 pnv_ioda_setup_pe_res(pe,
3168 &pdev->resource[PCI_BRIDGE_RESOURCES + i]);
3172 #ifdef CONFIG_DEBUG_FS
3173 static int pnv_pci_diag_data_set(void *data, u64 val)
3175 struct pci_controller *hose;
3176 struct pnv_phb *phb;
3182 hose = (struct pci_controller *)data;
3183 if (!hose || !hose->private_data)
3186 phb = hose->private_data;
3188 /* Retrieve the diag data from firmware */
3189 ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
3190 phb->diag_data_size);
3191 if (ret != OPAL_SUCCESS)
3194 /* Print the diag data to the kernel log */
3195 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
3199 DEFINE_SIMPLE_ATTRIBUTE(pnv_pci_diag_data_fops, NULL,
3200 pnv_pci_diag_data_set, "%llu\n");
3202 #endif /* CONFIG_DEBUG_FS */
3204 static void pnv_pci_ioda_create_dbgfs(void)
3206 #ifdef CONFIG_DEBUG_FS
3207 struct pci_controller *hose, *tmp;
3208 struct pnv_phb *phb;
3211 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
3212 phb = hose->private_data;
3214 /* Notify initialization of PHB done */
3215 phb->initialized = 1;
3217 sprintf(name, "PCI%04x", hose->global_number);
3218 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
3220 pr_warn("%s: Error on creating debugfs on PHB#%x\n",
3221 __func__, hose->global_number);
3225 debugfs_create_file("dump_diag_regs", 0200, phb->dbgfs, hose,
3226 &pnv_pci_diag_data_fops);
3228 #endif /* CONFIG_DEBUG_FS */
3231 static void pnv_pci_enable_bridge(struct pci_bus *bus)
3233 struct pci_dev *dev = bus->self;
3234 struct pci_bus *child;
3236 /* Empty bus ? bail */
3237 if (list_empty(&bus->devices))
3241 * If there's a bridge associated with that bus enable it. This works
3242 * around races in the generic code if the enabling is done during
3243 * parallel probing. This can be removed once those races have been
3247 int rc = pci_enable_device(dev);
3249 pci_err(dev, "Error enabling bridge (%d)\n", rc);
3250 pci_set_master(dev);
3253 /* Perform the same to child busses */
3254 list_for_each_entry(child, &bus->children, node)
3255 pnv_pci_enable_bridge(child);
3258 static void pnv_pci_enable_bridges(void)
3260 struct pci_controller *hose;
3262 list_for_each_entry(hose, &hose_list, list_node)
3263 pnv_pci_enable_bridge(hose->bus);
3266 static void pnv_pci_ioda_fixup(void)
3268 pnv_pci_ioda_setup_PEs();
3269 pnv_pci_ioda_setup_iommu_api();
3270 pnv_pci_ioda_create_dbgfs();
3272 pnv_pci_enable_bridges();
3275 pnv_eeh_post_init();
3280 * Returns the alignment for I/O or memory windows for P2P
3281 * bridges. That actually depends on how PEs are segmented.
3282 * For now, we return I/O or M32 segment size for PE sensitive
3283 * P2P bridges. Otherwise, the default values (4KiB for I/O,
3284 * 1MiB for memory) will be returned.
3286 * The current PCI bus might be put into one PE, which was
3287 * create against the parent PCI bridge. For that case, we
3288 * needn't enlarge the alignment so that we can save some
3291 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
3294 struct pci_dev *bridge;
3295 struct pci_controller *hose = pci_bus_to_host(bus);
3296 struct pnv_phb *phb = hose->private_data;
3297 int num_pci_bridges = 0;
3301 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
3303 if (num_pci_bridges >= 2)
3307 bridge = bridge->bus->self;
3311 * We fall back to M32 if M64 isn't supported. We enforce the M64
3312 * alignment for any 64-bit resource, PCIe doesn't care and
3313 * bridges only do 64-bit prefetchable anyway.
3315 if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
3316 return phb->ioda.m64_segsize;
3317 if (type & IORESOURCE_MEM)
3318 return phb->ioda.m32_segsize;
3320 return phb->ioda.io_segsize;
3324 * We are updating root port or the upstream port of the
3325 * bridge behind the root port with PHB's windows in order
3326 * to accommodate the changes on required resources during
3327 * PCI (slot) hotplug, which is connected to either root
3328 * port or the downstream ports of PCIe switch behind the
3331 static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
3334 struct pci_controller *hose = pci_bus_to_host(bus);
3335 struct pnv_phb *phb = hose->private_data;
3336 struct pci_dev *bridge = bus->self;
3337 struct resource *r, *w;
3338 bool msi_region = false;
3341 /* Check if we need apply fixup to the bridge's windows */
3342 if (!pci_is_root_bus(bridge->bus) &&
3343 !pci_is_root_bus(bridge->bus->self->bus))
3346 /* Fixup the resources */
3347 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
3348 r = &bridge->resource[PCI_BRIDGE_RESOURCES + i];
3349 if (!r->flags || !r->parent)
3353 if (r->flags & type & IORESOURCE_IO)
3354 w = &hose->io_resource;
3355 else if (pnv_pci_is_m64(phb, r) &&
3356 (type & IORESOURCE_PREFETCH) &&
3357 phb->ioda.m64_segsize)
3358 w = &hose->mem_resources[1];
3359 else if (r->flags & type & IORESOURCE_MEM) {
3360 w = &hose->mem_resources[0];
3364 r->start = w->start;
3367 /* The 64KB 32-bits MSI region shouldn't be included in
3368 * the 32-bits bridge window. Otherwise, we can see strange
3369 * issues. One of them is EEH error observed on Garrison.
3371 * Exclude top 1MB region which is the minimal alignment of
3372 * 32-bits bridge window.
3381 static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
3383 struct pci_controller *hose = pci_bus_to_host(bus);
3384 struct pnv_phb *phb = hose->private_data;
3385 struct pci_dev *bridge = bus->self;
3386 struct pnv_ioda_pe *pe;
3387 bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
3389 /* Extend bridge's windows if necessary */
3390 pnv_pci_fixup_bridge_resources(bus, type);
3392 /* The PE for root bus should be realized before any one else */
3393 if (!phb->ioda.root_pe_populated) {
3394 pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false);
3396 phb->ioda.root_pe_idx = pe->pe_number;
3397 phb->ioda.root_pe_populated = true;
3401 /* Don't assign PE to PCI bus, which doesn't have subordinate devices */
3402 if (list_empty(&bus->devices))
3405 /* Reserve PEs according to used M64 resources */
3406 if (phb->reserve_m64_pe)
3407 phb->reserve_m64_pe(bus, NULL, all);
3410 * Assign PE. We might run here because of partial hotplug.
3411 * For the case, we just pick up the existing PE and should
3412 * not allocate resources again.
3414 pe = pnv_ioda_setup_bus_PE(bus, all);
3418 pnv_ioda_setup_pe_seg(pe);
3419 switch (phb->type) {
3421 pnv_pci_ioda1_setup_dma_pe(phb, pe);
3424 pnv_pci_ioda2_setup_dma_pe(phb, pe);
3427 pr_warn("%s: No DMA for PHB#%x (type %d)\n",
3428 __func__, phb->hose->global_number, phb->type);
3432 static resource_size_t pnv_pci_default_alignment(void)
3437 #ifdef CONFIG_PCI_IOV
3438 static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
3441 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
3442 struct pnv_phb *phb = hose->private_data;
3443 struct pci_dn *pdn = pci_get_pdn(pdev);
3444 resource_size_t align;
3447 * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the
3448 * SR-IOV. While from hardware perspective, the range mapped by M64
3449 * BAR should be size aligned.
3451 * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra
3452 * powernv-specific hardware restriction is gone. But if just use the
3453 * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with
3454 * in one segment of M64 #15, which introduces the PE conflict between
3455 * PF and VF. Based on this, the minimum alignment of an IOV BAR is
3458 * This function returns the total IOV BAR size if M64 BAR is in
3459 * Shared PE mode or just VF BAR size if not.
3460 * If the M64 BAR is in Single PE mode, return the VF BAR size or
3461 * M64 segment size if IOV BAR size is less.
3463 align = pci_iov_resource_size(pdev, resno);
3464 if (!pdn->vfs_expanded)
3466 if (pdn->m64_single_mode)
3467 return max(align, (resource_size_t)phb->ioda.m64_segsize);
3469 return pdn->vfs_expanded * align;
3471 #endif /* CONFIG_PCI_IOV */
3473 /* Prevent enabling devices for which we couldn't properly
3476 static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
3478 struct pci_controller *hose = pci_bus_to_host(dev->bus);
3479 struct pnv_phb *phb = hose->private_data;
3482 /* The function is probably called while the PEs have
3483 * not be created yet. For example, resource reassignment
3484 * during PCI probe period. We just skip the check if
3487 if (!phb->initialized)
3490 pdn = pci_get_pdn(dev);
3491 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
3497 static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
3500 struct pnv_ioda_pe *pe = container_of(table_group,
3501 struct pnv_ioda_pe, table_group);
3502 struct pnv_phb *phb = pe->phb;
3506 pe_info(pe, "Removing DMA window #%d\n", num);
3507 for (idx = 0; idx < phb->ioda.dma32_count; idx++) {
3508 if (phb->ioda.dma32_segmap[idx] != pe->pe_number)
3511 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
3512 idx, 0, 0ul, 0ul, 0ul);
3513 if (rc != OPAL_SUCCESS) {
3514 pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n",
3519 phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE;
3522 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
3523 return OPAL_SUCCESS;
3526 static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
3528 unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
3529 struct iommu_table *tbl = pe->table_group.tables[0];
3535 rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
3536 if (rc != OPAL_SUCCESS)
3539 pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
3540 if (pe->table_group.group) {
3541 iommu_group_put(pe->table_group.group);
3542 WARN_ON(pe->table_group.group);
3545 free_pages(tbl->it_base, get_order(tbl->it_size << 3));
3546 iommu_tce_table_put(tbl);
3549 static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
3551 struct iommu_table *tbl = pe->table_group.tables[0];
3552 unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
3553 #ifdef CONFIG_IOMMU_API
3560 #ifdef CONFIG_IOMMU_API
3561 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
3563 pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
3566 pnv_pci_ioda2_set_bypass(pe, false);
3567 if (pe->table_group.group) {
3568 iommu_group_put(pe->table_group.group);
3569 WARN_ON(pe->table_group.group);
3572 iommu_tce_table_put(tbl);
3575 static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
3579 struct pnv_phb *phb = pe->phb;
3583 for (idx = 0; idx < phb->ioda.total_pe_num; idx++) {
3584 if (map[idx] != pe->pe_number)
3587 if (win == OPAL_M64_WINDOW_TYPE)
3588 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3589 phb->ioda.reserved_pe_idx, win,
3590 idx / PNV_IODA1_M64_SEGS,
3591 idx % PNV_IODA1_M64_SEGS);
3593 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3594 phb->ioda.reserved_pe_idx, win, 0, idx);
3596 if (rc != OPAL_SUCCESS)
3597 pe_warn(pe, "Error %ld unmapping (%d) segment#%d\n",
3600 map[idx] = IODA_INVALID_PE;
3604 static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
3606 struct pnv_phb *phb = pe->phb;
3608 if (phb->type == PNV_PHB_IODA1) {
3609 pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE,
3610 phb->ioda.io_segmap);
3611 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
3612 phb->ioda.m32_segmap);
3613 pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE,
3614 phb->ioda.m64_segmap);
3615 } else if (phb->type == PNV_PHB_IODA2) {
3616 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
3617 phb->ioda.m32_segmap);
3621 static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
3623 struct pnv_phb *phb = pe->phb;
3624 struct pnv_ioda_pe *slave, *tmp;
3626 list_del(&pe->list);
3627 switch (phb->type) {
3629 pnv_pci_ioda1_release_pe_dma(pe);
3632 pnv_pci_ioda2_release_pe_dma(pe);
3638 pnv_ioda_release_pe_seg(pe);
3639 pnv_ioda_deconfigure_pe(pe->phb, pe);
3641 /* Release slave PEs in the compound PE */
3642 if (pe->flags & PNV_IODA_PE_MASTER) {
3643 list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
3644 list_del(&slave->list);
3645 pnv_ioda_free_pe(slave);
3650 * The PE for root bus can be removed because of hotplug in EEH
3651 * recovery for fenced PHB error. We need to mark the PE dead so
3652 * that it can be populated again in PCI hot add path. The PE
3653 * shouldn't be destroyed as it's the global reserved resource.
3655 if (phb->ioda.root_pe_populated &&
3656 phb->ioda.root_pe_idx == pe->pe_number)
3657 phb->ioda.root_pe_populated = false;
3659 pnv_ioda_free_pe(pe);
3662 static void pnv_pci_release_device(struct pci_dev *pdev)
3664 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
3665 struct pnv_phb *phb = hose->private_data;
3666 struct pci_dn *pdn = pci_get_pdn(pdev);
3667 struct pnv_ioda_pe *pe;
3669 if (pdev->is_virtfn)
3672 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
3676 * PCI hotplug can happen as part of EEH error recovery. The @pdn
3677 * isn't removed and added afterwards in this scenario. We should
3678 * set the PE number in @pdn to an invalid one. Otherwise, the PE's
3679 * device count is decreased on removing devices while failing to
3680 * be increased on adding devices. It leads to unbalanced PE's device
3681 * count and eventually make normal PCI hotplug path broken.
3683 pe = &phb->ioda.pe_array[pdn->pe_number];
3684 pdn->pe_number = IODA_INVALID_PE;
3686 WARN_ON(--pe->device_count < 0);
3687 if (pe->device_count == 0)
3688 pnv_ioda_release_pe(pe);
3691 static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
3693 struct pnv_phb *phb = hose->private_data;
3695 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
3699 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
3700 .dma_dev_setup = pnv_pci_dma_dev_setup,
3701 .dma_bus_setup = pnv_pci_dma_bus_setup,
3702 #ifdef CONFIG_PCI_MSI
3703 .setup_msi_irqs = pnv_setup_msi_irqs,
3704 .teardown_msi_irqs = pnv_teardown_msi_irqs,
3706 .enable_device_hook = pnv_pci_enable_device_hook,
3707 .release_device = pnv_pci_release_device,
3708 .window_alignment = pnv_pci_window_alignment,
3709 .setup_bridge = pnv_pci_setup_bridge,
3710 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
3711 .dma_set_mask = pnv_pci_ioda_dma_set_mask,
3712 .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
3713 .shutdown = pnv_pci_ioda_shutdown,
3716 static int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask)
3718 dev_err_once(&npdev->dev,
3719 "%s operation unsupported for NVLink devices\n",
3724 static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
3725 .dma_dev_setup = pnv_pci_dma_dev_setup,
3726 #ifdef CONFIG_PCI_MSI
3727 .setup_msi_irqs = pnv_setup_msi_irqs,
3728 .teardown_msi_irqs = pnv_teardown_msi_irqs,
3730 .enable_device_hook = pnv_pci_enable_device_hook,
3731 .window_alignment = pnv_pci_window_alignment,
3732 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
3733 .dma_set_mask = pnv_npu_dma_set_mask,
3734 .shutdown = pnv_pci_ioda_shutdown,
3737 static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
3738 .enable_device_hook = pnv_pci_enable_device_hook,
3739 .window_alignment = pnv_pci_window_alignment,
3740 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
3741 .shutdown = pnv_pci_ioda_shutdown,
3744 static void __init pnv_pci_init_ioda_phb(struct device_node *np,
3745 u64 hub_id, int ioda_type)
3747 struct pci_controller *hose;
3748 struct pnv_phb *phb;
3749 unsigned long size, m64map_off, m32map_off, pemap_off;
3750 unsigned long iomap_off = 0, dma32map_off = 0;
3752 const __be64 *prop64;
3753 const __be32 *prop32;
3760 if (!of_device_is_available(np))
3763 pr_info("Initializing %s PHB (%pOF)\n", pnv_phb_names[ioda_type], np);
3765 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
3767 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
3770 phb_id = be64_to_cpup(prop64);
3771 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
3773 phb = memblock_alloc(sizeof(*phb), 0);
3775 /* Allocate PCI controller */
3776 phb->hose = hose = pcibios_alloc_controller(np);
3778 pr_err(" Can't allocate PCI controller for %pOF\n",
3780 memblock_free(__pa(phb), sizeof(struct pnv_phb));
3784 spin_lock_init(&phb->lock);
3785 prop32 = of_get_property(np, "bus-range", &len);
3786 if (prop32 && len == 8) {
3787 hose->first_busno = be32_to_cpu(prop32[0]);
3788 hose->last_busno = be32_to_cpu(prop32[1]);
3790 pr_warn(" Broken <bus-range> on %pOF\n", np);
3791 hose->first_busno = 0;
3792 hose->last_busno = 0xff;
3794 hose->private_data = phb;
3795 phb->hub_id = hub_id;
3796 phb->opal_id = phb_id;
3797 phb->type = ioda_type;
3798 mutex_init(&phb->ioda.pe_alloc_mutex);
3800 /* Detect specific models for error handling */
3801 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
3802 phb->model = PNV_PHB_MODEL_P7IOC;
3803 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
3804 phb->model = PNV_PHB_MODEL_PHB3;
3805 else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
3806 phb->model = PNV_PHB_MODEL_NPU;
3807 else if (of_device_is_compatible(np, "ibm,power9-npu-pciex"))
3808 phb->model = PNV_PHB_MODEL_NPU2;
3810 phb->model = PNV_PHB_MODEL_UNKNOWN;
3812 /* Initialize diagnostic data buffer */
3813 prop32 = of_get_property(np, "ibm,phb-diag-data-size", NULL);
3815 phb->diag_data_size = be32_to_cpup(prop32);
3817 phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
3819 phb->diag_data = memblock_alloc(phb->diag_data_size, 0);
3821 /* Parse 32-bit and IO ranges (if any) */
3822 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
3825 if (!of_address_to_resource(np, 0, &r)) {
3826 phb->regs_phys = r.start;
3827 phb->regs = ioremap(r.start, resource_size(&r));
3828 if (phb->regs == NULL)
3829 pr_err(" Failed to map registers !\n");
3832 /* Initialize more IODA stuff */
3833 phb->ioda.total_pe_num = 1;
3834 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
3836 phb->ioda.total_pe_num = be32_to_cpup(prop32);
3837 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
3839 phb->ioda.reserved_pe_idx = be32_to_cpup(prop32);
3841 /* Invalidate RID to PE# mapping */
3842 for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++)
3843 phb->ioda.pe_rmap[segno] = IODA_INVALID_PE;
3845 /* Parse 64-bit MMIO range */
3846 pnv_ioda_parse_m64_window(phb);
3848 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
3849 /* FW Has already off top 64k of M32 space (MSI space) */
3850 phb->ioda.m32_size += 0x10000;
3852 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num;
3853 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
3854 phb->ioda.io_size = hose->pci_io_size;
3855 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num;
3856 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
3858 /* Calculate how many 32-bit TCE segments we have */
3859 phb->ioda.dma32_count = phb->ioda.m32_pci_base /
3860 PNV_IODA1_DMA32_SEGSIZE;
3862 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
3863 size = _ALIGN_UP(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
3864 sizeof(unsigned long));
3866 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
3868 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]);
3869 if (phb->type == PNV_PHB_IODA1) {
3871 size += phb->ioda.total_pe_num * sizeof(phb->ioda.io_segmap[0]);
3872 dma32map_off = size;
3873 size += phb->ioda.dma32_count *
3874 sizeof(phb->ioda.dma32_segmap[0]);
3877 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
3878 aux = memblock_alloc(size, 0);
3879 phb->ioda.pe_alloc = aux;
3880 phb->ioda.m64_segmap = aux + m64map_off;
3881 phb->ioda.m32_segmap = aux + m32map_off;
3882 for (segno = 0; segno < phb->ioda.total_pe_num; segno++) {
3883 phb->ioda.m64_segmap[segno] = IODA_INVALID_PE;
3884 phb->ioda.m32_segmap[segno] = IODA_INVALID_PE;
3886 if (phb->type == PNV_PHB_IODA1) {
3887 phb->ioda.io_segmap = aux + iomap_off;
3888 for (segno = 0; segno < phb->ioda.total_pe_num; segno++)
3889 phb->ioda.io_segmap[segno] = IODA_INVALID_PE;
3891 phb->ioda.dma32_segmap = aux + dma32map_off;
3892 for (segno = 0; segno < phb->ioda.dma32_count; segno++)
3893 phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE;
3895 phb->ioda.pe_array = aux + pemap_off;
3898 * Choose PE number for root bus, which shouldn't have
3899 * M64 resources consumed by its child devices. To pick
3900 * the PE number adjacent to the reserved one if possible.
3902 pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx);
3903 if (phb->ioda.reserved_pe_idx == 0) {
3904 phb->ioda.root_pe_idx = 1;
3905 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
3906 } else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) {
3907 phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
3908 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
3910 phb->ioda.root_pe_idx = IODA_INVALID_PE;
3913 INIT_LIST_HEAD(&phb->ioda.pe_list);
3914 mutex_init(&phb->ioda.pe_list_mutex);
3916 /* Calculate how many 32-bit TCE segments we have */
3917 phb->ioda.dma32_count = phb->ioda.m32_pci_base /
3918 PNV_IODA1_DMA32_SEGSIZE;
3920 #if 0 /* We should really do that ... */
3921 rc = opal_pci_set_phb_mem_window(opal->phb_id,
3924 starting_real_address,
3925 starting_pci_address,
3929 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
3930 phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx,
3931 phb->ioda.m32_size, phb->ioda.m32_segsize);
3932 if (phb->ioda.m64_size)
3933 pr_info(" M64: 0x%lx [segment=0x%lx]\n",
3934 phb->ioda.m64_size, phb->ioda.m64_segsize);
3935 if (phb->ioda.io_size)
3936 pr_info(" IO: 0x%x [segment=0x%x]\n",
3937 phb->ioda.io_size, phb->ioda.io_segsize);
3940 phb->hose->ops = &pnv_pci_ops;
3941 phb->get_pe_state = pnv_ioda_get_pe_state;
3942 phb->freeze_pe = pnv_ioda_freeze_pe;
3943 phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
3945 /* Setup MSI support */
3946 pnv_pci_init_ioda_msis(phb);
3949 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
3950 * to let the PCI core do resource assignment. It's supposed
3951 * that the PCI core will do correct I/O and MMIO alignment
3952 * for the P2P bridge bars so that each PCI bus (excluding
3953 * the child P2P bridges) can form individual PE.
3955 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
3957 switch (phb->type) {
3958 case PNV_PHB_NPU_NVLINK:
3959 hose->controller_ops = pnv_npu_ioda_controller_ops;
3961 case PNV_PHB_NPU_OCAPI:
3962 hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops;
3965 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
3966 hose->controller_ops = pnv_pci_ioda_controller_ops;
3969 ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
3971 #ifdef CONFIG_PCI_IOV
3972 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
3973 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
3974 ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable;
3975 ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable;
3978 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
3980 /* Reset IODA tables to a clean state */
3981 rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
3983 pr_warn(" OPAL Error %ld performing IODA table reset !\n", rc);
3986 * If we're running in kdump kernel, the previous kernel never
3987 * shutdown PCI devices correctly. We already got IODA table
3988 * cleaned out. So we have to issue PHB reset to stop all PCI
3989 * transactions from previous kernel. The ppc_pci_reset_phbs
3990 * kernel parameter will force this reset too.
3992 if (is_kdump_kernel() || pci_reset_phbs) {
3993 pr_info(" Issue PHB reset ...\n");
3994 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
3995 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
3998 /* Remove M64 resource if we can't configure it successfully */
3999 if (!phb->init_m64 || phb->init_m64(phb))
4000 hose->mem_resources[1].flags = 0;
4003 void __init pnv_pci_init_ioda2_phb(struct device_node *np)
4005 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
4008 void __init pnv_pci_init_npu_phb(struct device_node *np)
4010 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_NVLINK);
4013 void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np)
4015 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI);
4018 static void pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev *dev)
4020 struct pci_controller *hose = pci_bus_to_host(dev->bus);
4021 struct pnv_phb *phb = hose->private_data;
4023 if (!machine_is(powernv))
4026 if (phb->type == PNV_PHB_NPU_OCAPI)
4027 dev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
4029 DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pnv_npu2_opencapi_cfg_size_fixup);
4031 void __init pnv_pci_init_ioda_hub(struct device_node *np)
4033 struct device_node *phbn;
4034 const __be64 *prop64;
4037 pr_info("Probing IODA IO-Hub %pOF\n", np);
4039 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
4041 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
4044 hub_id = be64_to_cpup(prop64);
4045 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
4047 /* Count child PHBs */
4048 for_each_child_of_node(np, phbn) {
4049 /* Look for IODA1 PHBs */
4050 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
4051 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);