Commit | Line | Data |
---|---|---|
25763b3c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
5d2aa710 AP |
2 | /* |
3 | * This file implements the DMA operations for NVLink devices. The NPU | |
4 | * devices all point to the same iommu table as the parent PCI device. | |
5 | * | |
6 | * Copyright Alistair Popple, IBM Corporation 2015. | |
5d2aa710 AP |
7 | */ |
8 | ||
1ab66d1f AP |
9 | #include <linux/mmu_notifier.h> |
10 | #include <linux/mmu_context.h> | |
11 | #include <linux/of.h> | |
5d2aa710 AP |
12 | #include <linux/pci.h> |
13 | #include <linux/memblock.h> | |
3689c37d | 14 | #include <linux/sizes.h> |
5d2aa710 | 15 | |
99c3ce33 | 16 | #include <asm/debugfs.h> |
1ab66d1f | 17 | #include <asm/powernv.h> |
5d2aa710 AP |
18 | #include <asm/opal.h> |
19 | ||
5d2aa710 AP |
20 | #include "pci.h" |
21 | ||
5d2aa710 AP |
22 | static struct pci_dev *get_pci_dev(struct device_node *dn) |
23 | { | |
902bdc57 | 24 | struct pci_dn *pdn = PCI_DN(dn); |
02c5f539 | 25 | struct pci_dev *pdev; |
902bdc57 | 26 | |
02c5f539 | 27 | pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus), |
902bdc57 | 28 | pdn->busno, pdn->devfn); |
02c5f539 GK |
29 | |
30 | /* | |
31 | * pci_get_domain_bus_and_slot() increased the reference count of | |
32 | * the PCI device, but callers don't need that actually as the PE | |
33 | * already holds a reference to the device. Since callers aren't | |
34 | * aware of the reference count change, call pci_dev_put() now to | |
35 | * avoid leaks. | |
36 | */ | |
37 | if (pdev) | |
38 | pci_dev_put(pdev); | |
39 | ||
40 | return pdev; | |
5d2aa710 AP |
41 | } |
42 | ||
43 | /* Given a NPU device get the associated PCI device. */ | |
44 | struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev) | |
45 | { | |
46 | struct device_node *dn; | |
47 | struct pci_dev *gpdev; | |
48 | ||
4c3b89ef AP |
49 | if (WARN_ON(!npdev)) |
50 | return NULL; | |
51 | ||
52 | if (WARN_ON(!npdev->dev.of_node)) | |
53 | return NULL; | |
54 | ||
5d2aa710 AP |
55 | /* Get assoicated PCI device */ |
56 | dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0); | |
57 | if (!dn) | |
58 | return NULL; | |
59 | ||
60 | gpdev = get_pci_dev(dn); | |
61 | of_node_put(dn); | |
62 | ||
63 | return gpdev; | |
64 | } | |
65 | EXPORT_SYMBOL(pnv_pci_get_gpu_dev); | |
66 | ||
67 | /* Given the real PCI device get a linked NPU device. */ | |
68 | struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index) | |
69 | { | |
70 | struct device_node *dn; | |
71 | struct pci_dev *npdev; | |
72 | ||
4c3b89ef AP |
73 | if (WARN_ON(!gpdev)) |
74 | return NULL; | |
75 | ||
377aa6b0 AP |
76 | /* Not all PCI devices have device-tree nodes */ |
77 | if (!gpdev->dev.of_node) | |
4c3b89ef AP |
78 | return NULL; |
79 | ||
5d2aa710 AP |
80 | /* Get assoicated PCI device */ |
81 | dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index); | |
82 | if (!dn) | |
83 | return NULL; | |
84 | ||
85 | npdev = get_pci_dev(dn); | |
86 | of_node_put(dn); | |
87 | ||
88 | return npdev; | |
89 | } | |
90 | EXPORT_SYMBOL(pnv_pci_get_npu_dev); | |
91 | ||
5d2aa710 AP |
92 | /* |
93 | * Returns the PE assoicated with the PCI device of the given | |
94 | * NPU. Returns the linked pci device if pci_dev != NULL. | |
95 | */ | |
96 | static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe, | |
97 | struct pci_dev **gpdev) | |
98 | { | |
99 | struct pnv_phb *phb; | |
100 | struct pci_controller *hose; | |
101 | struct pci_dev *pdev; | |
102 | struct pnv_ioda_pe *pe; | |
103 | struct pci_dn *pdn; | |
104 | ||
85674868 AK |
105 | pdev = pnv_pci_get_gpu_dev(npe->pdev); |
106 | if (!pdev) | |
107 | return NULL; | |
5d2aa710 | 108 | |
85674868 AK |
109 | pdn = pci_get_pdn(pdev); |
110 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | |
111 | return NULL; | |
112 | ||
113 | hose = pci_bus_to_host(pdev->bus); | |
114 | phb = hose->private_data; | |
115 | pe = &phb->ioda.pe_array[pdn->pe_number]; | |
5d2aa710 AP |
116 | |
117 | if (gpdev) | |
118 | *gpdev = pdev; | |
119 | ||
120 | return pe; | |
121 | } | |
122 | ||
83fb8ccf AK |
123 | static long pnv_npu_unset_window(struct iommu_table_group *table_group, |
124 | int num); | |
125 | ||
126 | static long pnv_npu_set_window(struct iommu_table_group *table_group, int num, | |
b575c731 AK |
127 | struct iommu_table *tbl) |
128 | { | |
83fb8ccf AK |
129 | struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, |
130 | table_group); | |
b575c731 AK |
131 | struct pnv_phb *phb = npe->phb; |
132 | int64_t rc; | |
133 | const unsigned long size = tbl->it_indirect_levels ? | |
134 | tbl->it_level_size : tbl->it_size; | |
135 | const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; | |
136 | const __u64 win_size = tbl->it_size << tbl->it_page_shift; | |
b04149c2 AK |
137 | int num2 = (num == 0) ? 1 : 0; |
138 | ||
139 | /* NPU has just one TVE so if there is another table, remove it first */ | |
140 | if (npe->table_group.tables[num2]) | |
83fb8ccf | 141 | pnv_npu_unset_window(&npe->table_group, num2); |
b575c731 AK |
142 | |
143 | pe_info(npe, "Setting up window %llx..%llx pg=%lx\n", | |
144 | start_addr, start_addr + win_size - 1, | |
145 | IOMMU_PAGE_SIZE(tbl)); | |
146 | ||
147 | rc = opal_pci_map_pe_dma_window(phb->opal_id, | |
148 | npe->pe_number, | |
149 | npe->pe_number, | |
150 | tbl->it_indirect_levels + 1, | |
151 | __pa(tbl->it_base), | |
152 | size << 3, | |
153 | IOMMU_PAGE_SIZE(tbl)); | |
154 | if (rc) { | |
155 | pe_err(npe, "Failed to configure TCE table, err %lld\n", rc); | |
156 | return rc; | |
157 | } | |
6b3d12a9 | 158 | pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
b575c731 | 159 | |
85674868 | 160 | /* Add the table to the list so its TCE cache will get invalidated */ |
b5cb9ab1 | 161 | pnv_pci_link_table_and_group(phb->hose->node, num, |
85674868 AK |
162 | tbl, &npe->table_group); |
163 | ||
b575c731 AK |
164 | return 0; |
165 | } | |
166 | ||
83fb8ccf | 167 | static long pnv_npu_unset_window(struct iommu_table_group *table_group, int num) |
b575c731 | 168 | { |
83fb8ccf AK |
169 | struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, |
170 | table_group); | |
b575c731 AK |
171 | struct pnv_phb *phb = npe->phb; |
172 | int64_t rc; | |
173 | ||
b04149c2 AK |
174 | if (!npe->table_group.tables[num]) |
175 | return 0; | |
176 | ||
b575c731 AK |
177 | pe_info(npe, "Removing DMA window\n"); |
178 | ||
179 | rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number, | |
180 | npe->pe_number, | |
181 | 0/* levels */, 0/* table address */, | |
182 | 0/* table size */, 0/* page size */); | |
183 | if (rc) { | |
184 | pe_err(npe, "Unmapping failed, ret = %lld\n", rc); | |
185 | return rc; | |
186 | } | |
6b3d12a9 | 187 | pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
b575c731 | 188 | |
b5cb9ab1 | 189 | pnv_pci_unlink_table_and_group(npe->table_group.tables[num], |
85674868 | 190 | &npe->table_group); |
5d2aa710 | 191 | |
85674868 | 192 | return 0; |
5d2aa710 AP |
193 | } |
194 | ||
195 | /* | |
f9f83456 | 196 | * Enables 32 bit DMA on NPU. |
5d2aa710 | 197 | */ |
f9f83456 | 198 | static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe) |
5d2aa710 | 199 | { |
5d2aa710 AP |
200 | struct pci_dev *gpdev; |
201 | struct pnv_ioda_pe *gpe; | |
5d2aa710 AP |
202 | int64_t rc; |
203 | ||
204 | /* | |
205 | * Find the assoicated PCI devices and get the dma window | |
206 | * information from there. | |
207 | */ | |
208 | if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV)) | |
209 | return; | |
210 | ||
211 | gpe = get_gpu_pci_dev_and_pe(npe, &gpdev); | |
212 | if (!gpe) | |
213 | return; | |
214 | ||
83fb8ccf AK |
215 | rc = pnv_npu_set_window(&npe->table_group, 0, |
216 | gpe->table_group.tables[0]); | |
5d2aa710 AP |
217 | |
218 | /* | |
3182215d AP |
219 | * NVLink devices use the same TCE table configuration as |
220 | * their parent device so drivers shouldn't be doing DMA | |
221 | * operations directly on these devices. | |
5d2aa710 | 222 | */ |
68005b67 | 223 | set_dma_ops(&npe->pdev->dev, &dma_dummy_ops); |
5d2aa710 AP |
224 | } |
225 | ||
226 | /* | |
f9f83456 | 227 | * Enables bypass mode on the NPU. The NPU only supports one |
446957ba | 228 | * window per link, so bypass needs to be explicitly enabled or |
5d2aa710 AP |
229 | * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be |
230 | * active at the same time. | |
231 | */ | |
f9f83456 | 232 | static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe) |
5d2aa710 AP |
233 | { |
234 | struct pnv_phb *phb = npe->phb; | |
235 | int64_t rc = 0; | |
f9f83456 | 236 | phys_addr_t top = memblock_end_of_DRAM(); |
5d2aa710 | 237 | |
7f2c39e9 | 238 | if (phb->type != PNV_PHB_NPU_NVLINK || !npe->pdev) |
5d2aa710 AP |
239 | return -EINVAL; |
240 | ||
83fb8ccf | 241 | rc = pnv_npu_unset_window(&npe->table_group, 0); |
b575c731 AK |
242 | if (rc != OPAL_SUCCESS) |
243 | return rc; | |
244 | ||
f9f83456 AK |
245 | /* Enable the bypass window */ |
246 | ||
247 | top = roundup_pow_of_two(top); | |
1f52f176 | 248 | dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n", |
f9f83456 AK |
249 | npe->pe_number); |
250 | rc = opal_pci_map_pe_dma_window_real(phb->opal_id, | |
251 | npe->pe_number, npe->pe_number, | |
252 | 0 /* bypass base */, top); | |
5d2aa710 | 253 | |
85674868 | 254 | if (rc == OPAL_SUCCESS) |
6b3d12a9 | 255 | pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
85674868 | 256 | |
5d2aa710 AP |
257 | return rc; |
258 | } | |
259 | ||
f9f83456 | 260 | void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass) |
5d2aa710 | 261 | { |
f9f83456 AK |
262 | int i; |
263 | struct pnv_phb *phb; | |
264 | struct pci_dn *pdn; | |
265 | struct pnv_ioda_pe *npe; | |
266 | struct pci_dev *npdev; | |
5d2aa710 | 267 | |
f9f83456 AK |
268 | for (i = 0; ; ++i) { |
269 | npdev = pnv_pci_get_npu_dev(gpdev, i); | |
5d2aa710 | 270 | |
f9f83456 AK |
271 | if (!npdev) |
272 | break; | |
5d2aa710 | 273 | |
f9f83456 AK |
274 | pdn = pci_get_pdn(npdev); |
275 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | |
276 | return; | |
5d2aa710 | 277 | |
f9f83456 | 278 | phb = pci_bus_to_host(npdev->bus)->private_data; |
5d2aa710 | 279 | |
f9f83456 AK |
280 | /* We only do bypass if it's enabled on the linked device */ |
281 | npe = &phb->ioda.pe_array[pdn->pe_number]; | |
5d2aa710 | 282 | |
f9f83456 AK |
283 | if (bypass) { |
284 | dev_info(&npdev->dev, | |
285 | "Using 64-bit DMA iommu bypass\n"); | |
286 | pnv_npu_dma_set_bypass(npe); | |
287 | } else { | |
288 | dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n"); | |
289 | pnv_npu_dma_set_32(npe); | |
290 | } | |
291 | } | |
5d2aa710 | 292 | } |
b5cb9ab1 | 293 | |
83fb8ccf | 294 | #ifdef CONFIG_IOMMU_API |
b5cb9ab1 | 295 | /* Switch ownership from platform code to external user (e.g. VFIO) */ |
83fb8ccf | 296 | static void pnv_npu_take_ownership(struct iommu_table_group *table_group) |
b5cb9ab1 | 297 | { |
83fb8ccf AK |
298 | struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, |
299 | table_group); | |
b5cb9ab1 AK |
300 | struct pnv_phb *phb = npe->phb; |
301 | int64_t rc; | |
1b785611 | 302 | struct pci_dev *gpdev = NULL; |
b5cb9ab1 AK |
303 | |
304 | /* | |
305 | * Note: NPU has just a single TVE in the hardware which means that | |
306 | * while used by the kernel, it can have either 32bit window or | |
307 | * DMA bypass but never both. So we deconfigure 32bit window only | |
308 | * if it was enabled at the moment of ownership change. | |
309 | */ | |
310 | if (npe->table_group.tables[0]) { | |
83fb8ccf | 311 | pnv_npu_unset_window(&npe->table_group, 0); |
b5cb9ab1 AK |
312 | return; |
313 | } | |
314 | ||
315 | /* Disable bypass */ | |
316 | rc = opal_pci_map_pe_dma_window_real(phb->opal_id, | |
317 | npe->pe_number, npe->pe_number, | |
318 | 0 /* bypass base */, 0); | |
319 | if (rc) { | |
320 | pe_err(npe, "Failed to disable bypass, err %lld\n", rc); | |
321 | return; | |
322 | } | |
6b3d12a9 | 323 | pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false); |
1b785611 AK |
324 | |
325 | get_gpu_pci_dev_and_pe(npe, &gpdev); | |
326 | if (gpdev) | |
327 | pnv_npu2_unmap_lpar_dev(gpdev); | |
328 | } | |
329 | ||
330 | static void pnv_npu_release_ownership(struct iommu_table_group *table_group) | |
331 | { | |
332 | struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, | |
333 | table_group); | |
334 | struct pci_dev *gpdev = NULL; | |
335 | ||
336 | get_gpu_pci_dev_and_pe(npe, &gpdev); | |
337 | if (gpdev) | |
338 | pnv_npu2_map_lpar_dev(gpdev, 0, MSR_DR | MSR_PR | MSR_HV); | |
b5cb9ab1 AK |
339 | } |
340 | ||
83fb8ccf AK |
341 | static struct iommu_table_group_ops pnv_pci_npu_ops = { |
342 | .set_window = pnv_npu_set_window, | |
343 | .unset_window = pnv_npu_unset_window, | |
344 | .take_ownership = pnv_npu_take_ownership, | |
1b785611 | 345 | .release_ownership = pnv_npu_release_ownership, |
83fb8ccf | 346 | }; |
83fb8ccf | 347 | #endif /* !CONFIG_IOMMU_API */ |
1ab66d1f | 348 | |
46a1449d AK |
349 | /* |
350 | * NPU2 ATS | |
351 | */ | |
352 | /* Maximum possible number of ATSD MMIO registers per NPU */ | |
353 | #define NV_NMMU_ATSD_REGS 8 | |
0bd97167 AK |
354 | #define NV_NPU_MAX_PE_NUM 16 |
355 | ||
356 | /* | |
357 | * A compound NPU IOMMU group which might consist of 1 GPU + 2xNPUs (POWER8) or | |
358 | * up to 3 x (GPU + 2xNPUs) (POWER9). | |
359 | */ | |
360 | struct npu_comp { | |
361 | struct iommu_table_group table_group; | |
362 | int pe_num; | |
363 | struct pnv_ioda_pe *pe[NV_NPU_MAX_PE_NUM]; | |
364 | }; | |
46a1449d AK |
365 | |
366 | /* An NPU descriptor, valid for POWER9 only */ | |
367 | struct npu { | |
368 | int index; | |
0bd97167 AK |
369 | struct npu_comp npucomp; |
370 | }; | |
371 | ||
372 | #ifdef CONFIG_IOMMU_API | |
373 | static long pnv_npu_peers_create_table_userspace( | |
374 | struct iommu_table_group *table_group, | |
375 | int num, __u32 page_shift, __u64 window_size, __u32 levels, | |
376 | struct iommu_table **ptbl) | |
377 | { | |
378 | struct npu_comp *npucomp = container_of(table_group, struct npu_comp, | |
379 | table_group); | |
380 | ||
381 | if (!npucomp->pe_num || !npucomp->pe[0] || | |
382 | !npucomp->pe[0]->table_group.ops || | |
383 | !npucomp->pe[0]->table_group.ops->create_table) | |
384 | return -EFAULT; | |
385 | ||
386 | return npucomp->pe[0]->table_group.ops->create_table( | |
387 | &npucomp->pe[0]->table_group, num, page_shift, | |
388 | window_size, levels, ptbl); | |
389 | } | |
390 | ||
391 | static long pnv_npu_peers_set_window(struct iommu_table_group *table_group, | |
392 | int num, struct iommu_table *tbl) | |
393 | { | |
394 | int i, j; | |
395 | long ret = 0; | |
396 | struct npu_comp *npucomp = container_of(table_group, struct npu_comp, | |
397 | table_group); | |
398 | ||
399 | for (i = 0; i < npucomp->pe_num; ++i) { | |
400 | struct pnv_ioda_pe *pe = npucomp->pe[i]; | |
401 | ||
402 | if (!pe->table_group.ops->set_window) | |
403 | continue; | |
404 | ||
405 | ret = pe->table_group.ops->set_window(&pe->table_group, | |
406 | num, tbl); | |
407 | if (ret) | |
408 | break; | |
409 | } | |
410 | ||
411 | if (ret) { | |
412 | for (j = 0; j < i; ++j) { | |
413 | struct pnv_ioda_pe *pe = npucomp->pe[j]; | |
414 | ||
415 | if (!pe->table_group.ops->unset_window) | |
416 | continue; | |
417 | ||
418 | ret = pe->table_group.ops->unset_window( | |
419 | &pe->table_group, num); | |
420 | if (ret) | |
421 | break; | |
422 | } | |
423 | } else { | |
424 | table_group->tables[num] = iommu_tce_table_get(tbl); | |
425 | } | |
426 | ||
427 | return ret; | |
428 | } | |
429 | ||
430 | static long pnv_npu_peers_unset_window(struct iommu_table_group *table_group, | |
431 | int num) | |
432 | { | |
433 | int i, j; | |
434 | long ret = 0; | |
435 | struct npu_comp *npucomp = container_of(table_group, struct npu_comp, | |
436 | table_group); | |
437 | ||
438 | for (i = 0; i < npucomp->pe_num; ++i) { | |
439 | struct pnv_ioda_pe *pe = npucomp->pe[i]; | |
440 | ||
441 | WARN_ON(npucomp->table_group.tables[num] != | |
442 | table_group->tables[num]); | |
443 | if (!npucomp->table_group.tables[num]) | |
444 | continue; | |
445 | ||
446 | if (!pe->table_group.ops->unset_window) | |
447 | continue; | |
448 | ||
449 | ret = pe->table_group.ops->unset_window(&pe->table_group, num); | |
450 | if (ret) | |
451 | break; | |
452 | } | |
453 | ||
454 | if (ret) { | |
455 | for (j = 0; j < i; ++j) { | |
456 | struct pnv_ioda_pe *pe = npucomp->pe[j]; | |
457 | ||
458 | if (!npucomp->table_group.tables[num]) | |
459 | continue; | |
460 | ||
461 | if (!pe->table_group.ops->set_window) | |
462 | continue; | |
463 | ||
464 | ret = pe->table_group.ops->set_window(&pe->table_group, | |
465 | num, table_group->tables[num]); | |
466 | if (ret) | |
467 | break; | |
468 | } | |
469 | } else if (table_group->tables[num]) { | |
470 | iommu_tce_table_put(table_group->tables[num]); | |
471 | table_group->tables[num] = NULL; | |
472 | } | |
473 | ||
474 | return ret; | |
475 | } | |
476 | ||
477 | static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group) | |
478 | { | |
479 | int i; | |
480 | struct npu_comp *npucomp = container_of(table_group, struct npu_comp, | |
481 | table_group); | |
482 | ||
483 | for (i = 0; i < npucomp->pe_num; ++i) { | |
484 | struct pnv_ioda_pe *pe = npucomp->pe[i]; | |
485 | ||
486 | if (!pe->table_group.ops->take_ownership) | |
487 | continue; | |
488 | pe->table_group.ops->take_ownership(&pe->table_group); | |
489 | } | |
490 | } | |
491 | ||
492 | static void pnv_npu_peers_release_ownership( | |
493 | struct iommu_table_group *table_group) | |
494 | { | |
495 | int i; | |
496 | struct npu_comp *npucomp = container_of(table_group, struct npu_comp, | |
497 | table_group); | |
498 | ||
499 | for (i = 0; i < npucomp->pe_num; ++i) { | |
500 | struct pnv_ioda_pe *pe = npucomp->pe[i]; | |
501 | ||
502 | if (!pe->table_group.ops->release_ownership) | |
503 | continue; | |
504 | pe->table_group.ops->release_ownership(&pe->table_group); | |
505 | } | |
506 | } | |
507 | ||
508 | static struct iommu_table_group_ops pnv_npu_peers_ops = { | |
509 | .get_table_size = pnv_pci_ioda2_get_table_size, | |
510 | .create_table = pnv_npu_peers_create_table_userspace, | |
511 | .set_window = pnv_npu_peers_set_window, | |
512 | .unset_window = pnv_npu_peers_unset_window, | |
513 | .take_ownership = pnv_npu_peers_take_ownership, | |
514 | .release_ownership = pnv_npu_peers_release_ownership, | |
46a1449d AK |
515 | }; |
516 | ||
0bd97167 AK |
517 | static void pnv_comp_attach_table_group(struct npu_comp *npucomp, |
518 | struct pnv_ioda_pe *pe) | |
519 | { | |
520 | if (WARN_ON(npucomp->pe_num == NV_NPU_MAX_PE_NUM)) | |
521 | return; | |
522 | ||
523 | npucomp->pe[npucomp->pe_num] = pe; | |
524 | ++npucomp->pe_num; | |
525 | } | |
526 | ||
527 | struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) | |
528 | { | |
529 | struct iommu_table_group *table_group; | |
530 | struct npu_comp *npucomp; | |
531 | struct pci_dev *gpdev = NULL; | |
532 | struct pci_controller *hose; | |
533 | struct pci_dev *npdev = NULL; | |
534 | ||
535 | list_for_each_entry(gpdev, &pe->pbus->devices, bus_list) { | |
536 | npdev = pnv_pci_get_npu_dev(gpdev, 0); | |
537 | if (npdev) | |
538 | break; | |
539 | } | |
540 | ||
541 | if (!npdev) | |
542 | /* It is not an NPU attached device, skip */ | |
543 | return NULL; | |
544 | ||
545 | hose = pci_bus_to_host(npdev->bus); | |
546 | ||
547 | if (hose->npu) { | |
548 | table_group = &hose->npu->npucomp.table_group; | |
549 | ||
550 | if (!table_group->group) { | |
551 | table_group->ops = &pnv_npu_peers_ops; | |
552 | iommu_register_group(table_group, | |
553 | hose->global_number, | |
554 | pe->pe_number); | |
555 | } | |
556 | } else { | |
557 | /* Create a group for 1 GPU and attached NPUs for POWER8 */ | |
d7b6cc19 | 558 | pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL); |
0bd97167 AK |
559 | table_group = &pe->npucomp->table_group; |
560 | table_group->ops = &pnv_npu_peers_ops; | |
561 | iommu_register_group(table_group, hose->global_number, | |
562 | pe->pe_number); | |
563 | } | |
564 | ||
565 | /* Steal capabilities from a GPU PE */ | |
566 | table_group->max_dynamic_windows_supported = | |
567 | pe->table_group.max_dynamic_windows_supported; | |
568 | table_group->tce32_start = pe->table_group.tce32_start; | |
569 | table_group->tce32_size = pe->table_group.tce32_size; | |
570 | table_group->max_levels = pe->table_group.max_levels; | |
571 | if (!table_group->pgsizes) | |
572 | table_group->pgsizes = pe->table_group.pgsizes; | |
573 | ||
574 | npucomp = container_of(table_group, struct npu_comp, table_group); | |
575 | pnv_comp_attach_table_group(npucomp, pe); | |
576 | ||
577 | return table_group; | |
578 | } | |
579 | ||
580 | struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe) | |
581 | { | |
582 | struct iommu_table_group *table_group; | |
583 | struct npu_comp *npucomp; | |
584 | struct pci_dev *gpdev = NULL; | |
585 | struct pci_dev *npdev; | |
586 | struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(pe, &gpdev); | |
587 | ||
588 | WARN_ON(!(pe->flags & PNV_IODA_PE_DEV)); | |
589 | if (!gpe) | |
590 | return NULL; | |
591 | ||
592 | /* | |
593 | * IODA2 bridges get this set up from pci_controller_ops::setup_bridge | |
594 | * but NPU bridges do not have this hook defined so we do it here. | |
595 | * We do not setup other table group parameters as they won't be used | |
596 | * anyway - NVLink bridges are subordinate PEs. | |
597 | */ | |
598 | pe->table_group.ops = &pnv_pci_npu_ops; | |
599 | ||
600 | table_group = iommu_group_get_iommudata( | |
601 | iommu_group_get(&gpdev->dev)); | |
602 | ||
603 | /* | |
604 | * On P9 NPU PHB and PCI PHB support different page sizes, | |
605 | * keep only matching. We expect here that NVLink bridge PE pgsizes is | |
606 | * initialized by the caller. | |
607 | */ | |
608 | table_group->pgsizes &= pe->table_group.pgsizes; | |
609 | npucomp = container_of(table_group, struct npu_comp, table_group); | |
610 | pnv_comp_attach_table_group(npucomp, pe); | |
611 | ||
612 | list_for_each_entry(npdev, &pe->phb->hose->bus->devices, bus_list) { | |
613 | struct pci_dev *gpdevtmp = pnv_pci_get_gpu_dev(npdev); | |
614 | ||
615 | if (gpdevtmp != gpdev) | |
616 | continue; | |
617 | ||
618 | iommu_add_device(table_group, &npdev->dev); | |
619 | } | |
620 | ||
621 | return table_group; | |
622 | } | |
623 | #endif /* CONFIG_IOMMU_API */ | |
624 | ||
0e759bd7 | 625 | int pnv_npu2_init(struct pci_controller *hose) |
1ab66d1f | 626 | { |
1ab66d1f | 627 | static int npu_index; |
46a1449d AK |
628 | struct npu *npu; |
629 | int ret; | |
630 | ||
631 | npu = kzalloc(sizeof(*npu), GFP_KERNEL); | |
632 | if (!npu) | |
633 | return -ENOMEM; | |
1ab66d1f | 634 | |
1ab66d1f | 635 | npu_index++; |
46a1449d AK |
636 | if (WARN_ON(npu_index >= NV_MAX_NPUS)) { |
637 | ret = -ENOSPC; | |
638 | goto fail_exit; | |
639 | } | |
46a1449d AK |
640 | npu->index = npu_index; |
641 | hose->npu = npu; | |
1ab66d1f AP |
642 | |
643 | return 0; | |
46a1449d AK |
644 | |
645 | fail_exit: | |
46a1449d | 646 | kfree(npu); |
46a1449d | 647 | return ret; |
1ab66d1f | 648 | } |
0e759bd7 AK |
649 | |
650 | int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid, | |
651 | unsigned long msr) | |
652 | { | |
653 | int ret; | |
654 | struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); | |
655 | struct pci_controller *hose; | |
656 | struct pnv_phb *nphb; | |
657 | ||
658 | if (!npdev) | |
659 | return -ENODEV; | |
660 | ||
661 | hose = pci_bus_to_host(npdev->bus); | |
662 | nphb = hose->private_data; | |
663 | ||
664 | dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n", | |
665 | nphb->opal_id, lparid); | |
666 | /* | |
667 | * Currently we only support radix and non-zero LPCR only makes sense | |
668 | * for hash tables so skiboot expects the LPCR parameter to be a zero. | |
669 | */ | |
51c51a48 HK |
670 | ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), lparid, |
671 | 0 /* LPCR bits */); | |
0e759bd7 AK |
672 | if (ret) { |
673 | dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret); | |
674 | return ret; | |
675 | } | |
676 | ||
677 | dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n", | |
678 | nphb->opal_id, msr); | |
679 | ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr, | |
51c51a48 | 680 | pci_dev_id(gpdev)); |
0e759bd7 AK |
681 | if (ret < 0) |
682 | dev_err(&gpdev->dev, "Failed to init context: %d\n", ret); | |
683 | else | |
684 | ret = 0; | |
685 | ||
686 | return 0; | |
687 | } | |
688 | EXPORT_SYMBOL_GPL(pnv_npu2_map_lpar_dev); | |
689 | ||
690 | void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr) | |
691 | { | |
692 | struct pci_dev *gpdev; | |
693 | ||
694 | list_for_each_entry(gpdev, &gpe->pbus->devices, bus_list) | |
695 | pnv_npu2_map_lpar_dev(gpdev, 0, msr); | |
696 | } | |
1b785611 AK |
697 | |
698 | int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev) | |
699 | { | |
700 | int ret; | |
701 | struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); | |
702 | struct pci_controller *hose; | |
703 | struct pnv_phb *nphb; | |
704 | ||
705 | if (!npdev) | |
706 | return -ENODEV; | |
707 | ||
708 | hose = pci_bus_to_host(npdev->bus); | |
709 | nphb = hose->private_data; | |
710 | ||
711 | dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n", | |
712 | nphb->opal_id); | |
713 | ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/, | |
51c51a48 | 714 | pci_dev_id(gpdev)); |
1b785611 AK |
715 | if (ret < 0) { |
716 | dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret); | |
717 | return ret; | |
718 | } | |
719 | ||
720 | /* Set LPID to 0 anyway, just to be safe */ | |
721 | dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id); | |
51c51a48 HK |
722 | ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), 0 /*LPID*/, |
723 | 0 /* LPCR bits */); | |
1b785611 AK |
724 | if (ret) |
725 | dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret); | |
726 | ||
727 | return ret; | |
728 | } | |
729 | EXPORT_SYMBOL_GPL(pnv_npu2_unmap_lpar_dev); |