Commit | Line | Data |
---|---|---|
5d2aa710 AP |
1 | /* |
2 | * This file implements the DMA operations for NVLink devices. The NPU | |
3 | * devices all point to the same iommu table as the parent PCI device. | |
4 | * | |
5 | * Copyright Alistair Popple, IBM Corporation 2015. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of version 2 of the GNU General Public | |
9 | * License as published by the Free Software Foundation. | |
10 | */ | |
11 | ||
1ab66d1f AP |
12 | #include <linux/slab.h> |
13 | #include <linux/mmu_notifier.h> | |
14 | #include <linux/mmu_context.h> | |
15 | #include <linux/of.h> | |
5d2aa710 AP |
16 | #include <linux/export.h> |
17 | #include <linux/pci.h> | |
18 | #include <linux/memblock.h> | |
b5cb9ab1 | 19 | #include <linux/iommu.h> |
5d2aa710 | 20 | |
1ab66d1f AP |
21 | #include <asm/tlb.h> |
22 | #include <asm/powernv.h> | |
23 | #include <asm/reg.h> | |
24 | #include <asm/opal.h> | |
25 | #include <asm/io.h> | |
5d2aa710 AP |
26 | #include <asm/iommu.h> |
27 | #include <asm/pnv-pci.h> | |
28 | #include <asm/msi_bitmap.h> | |
29 | #include <asm/opal.h> | |
30 | ||
31 | #include "powernv.h" | |
32 | #include "pci.h" | |
33 | ||
1ab66d1f AP |
34 | #define npu_to_phb(x) container_of(x, struct pnv_phb, npu) |
35 | ||
5d2aa710 AP |
36 | /* |
37 | * Other types of TCE cache invalidation are not functional in the | |
38 | * hardware. | |
39 | */ | |
5d2aa710 AP |
40 | static struct pci_dev *get_pci_dev(struct device_node *dn) |
41 | { | |
42 | return PCI_DN(dn)->pcidev; | |
43 | } | |
44 | ||
45 | /* Given a NPU device get the associated PCI device. */ | |
46 | struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev) | |
47 | { | |
48 | struct device_node *dn; | |
49 | struct pci_dev *gpdev; | |
50 | ||
4c3b89ef AP |
51 | if (WARN_ON(!npdev)) |
52 | return NULL; | |
53 | ||
54 | if (WARN_ON(!npdev->dev.of_node)) | |
55 | return NULL; | |
56 | ||
5d2aa710 AP |
57 | /* Get assoicated PCI device */ |
58 | dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0); | |
59 | if (!dn) | |
60 | return NULL; | |
61 | ||
62 | gpdev = get_pci_dev(dn); | |
63 | of_node_put(dn); | |
64 | ||
65 | return gpdev; | |
66 | } | |
67 | EXPORT_SYMBOL(pnv_pci_get_gpu_dev); | |
68 | ||
69 | /* Given the real PCI device get a linked NPU device. */ | |
70 | struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index) | |
71 | { | |
72 | struct device_node *dn; | |
73 | struct pci_dev *npdev; | |
74 | ||
4c3b89ef AP |
75 | if (WARN_ON(!gpdev)) |
76 | return NULL; | |
77 | ||
377aa6b0 AP |
78 | /* Not all PCI devices have device-tree nodes */ |
79 | if (!gpdev->dev.of_node) | |
4c3b89ef AP |
80 | return NULL; |
81 | ||
5d2aa710 AP |
82 | /* Get assoicated PCI device */ |
83 | dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index); | |
84 | if (!dn) | |
85 | return NULL; | |
86 | ||
87 | npdev = get_pci_dev(dn); | |
88 | of_node_put(dn); | |
89 | ||
90 | return npdev; | |
91 | } | |
92 | EXPORT_SYMBOL(pnv_pci_get_npu_dev); | |
93 | ||
94 | #define NPU_DMA_OP_UNSUPPORTED() \ | |
95 | dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \ | |
96 | __func__) | |
97 | ||
98 | static void *dma_npu_alloc(struct device *dev, size_t size, | |
99 | dma_addr_t *dma_handle, gfp_t flag, | |
00085f1e | 100 | unsigned long attrs) |
5d2aa710 AP |
101 | { |
102 | NPU_DMA_OP_UNSUPPORTED(); | |
103 | return NULL; | |
104 | } | |
105 | ||
106 | static void dma_npu_free(struct device *dev, size_t size, | |
107 | void *vaddr, dma_addr_t dma_handle, | |
00085f1e | 108 | unsigned long attrs) |
5d2aa710 AP |
109 | { |
110 | NPU_DMA_OP_UNSUPPORTED(); | |
111 | } | |
112 | ||
113 | static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, | |
114 | unsigned long offset, size_t size, | |
115 | enum dma_data_direction direction, | |
00085f1e | 116 | unsigned long attrs) |
5d2aa710 AP |
117 | { |
118 | NPU_DMA_OP_UNSUPPORTED(); | |
119 | return 0; | |
120 | } | |
121 | ||
122 | static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist, | |
123 | int nelems, enum dma_data_direction direction, | |
00085f1e | 124 | unsigned long attrs) |
5d2aa710 AP |
125 | { |
126 | NPU_DMA_OP_UNSUPPORTED(); | |
127 | return 0; | |
128 | } | |
129 | ||
130 | static int dma_npu_dma_supported(struct device *dev, u64 mask) | |
131 | { | |
132 | NPU_DMA_OP_UNSUPPORTED(); | |
133 | return 0; | |
134 | } | |
135 | ||
136 | static u64 dma_npu_get_required_mask(struct device *dev) | |
137 | { | |
138 | NPU_DMA_OP_UNSUPPORTED(); | |
139 | return 0; | |
140 | } | |
141 | ||
5299709d | 142 | static const struct dma_map_ops dma_npu_ops = { |
5d2aa710 AP |
143 | .map_page = dma_npu_map_page, |
144 | .map_sg = dma_npu_map_sg, | |
145 | .alloc = dma_npu_alloc, | |
146 | .free = dma_npu_free, | |
147 | .dma_supported = dma_npu_dma_supported, | |
148 | .get_required_mask = dma_npu_get_required_mask, | |
149 | }; | |
150 | ||
151 | /* | |
152 | * Returns the PE assoicated with the PCI device of the given | |
153 | * NPU. Returns the linked pci device if pci_dev != NULL. | |
154 | */ | |
155 | static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe, | |
156 | struct pci_dev **gpdev) | |
157 | { | |
158 | struct pnv_phb *phb; | |
159 | struct pci_controller *hose; | |
160 | struct pci_dev *pdev; | |
161 | struct pnv_ioda_pe *pe; | |
162 | struct pci_dn *pdn; | |
163 | ||
85674868 AK |
164 | pdev = pnv_pci_get_gpu_dev(npe->pdev); |
165 | if (!pdev) | |
166 | return NULL; | |
5d2aa710 | 167 | |
85674868 AK |
168 | pdn = pci_get_pdn(pdev); |
169 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | |
170 | return NULL; | |
171 | ||
172 | hose = pci_bus_to_host(pdev->bus); | |
173 | phb = hose->private_data; | |
174 | pe = &phb->ioda.pe_array[pdn->pe_number]; | |
5d2aa710 AP |
175 | |
176 | if (gpdev) | |
177 | *gpdev = pdev; | |
178 | ||
179 | return pe; | |
180 | } | |
181 | ||
b5cb9ab1 | 182 | long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num, |
b575c731 AK |
183 | struct iommu_table *tbl) |
184 | { | |
185 | struct pnv_phb *phb = npe->phb; | |
186 | int64_t rc; | |
187 | const unsigned long size = tbl->it_indirect_levels ? | |
188 | tbl->it_level_size : tbl->it_size; | |
189 | const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; | |
190 | const __u64 win_size = tbl->it_size << tbl->it_page_shift; | |
191 | ||
192 | pe_info(npe, "Setting up window %llx..%llx pg=%lx\n", | |
193 | start_addr, start_addr + win_size - 1, | |
194 | IOMMU_PAGE_SIZE(tbl)); | |
195 | ||
196 | rc = opal_pci_map_pe_dma_window(phb->opal_id, | |
197 | npe->pe_number, | |
198 | npe->pe_number, | |
199 | tbl->it_indirect_levels + 1, | |
200 | __pa(tbl->it_base), | |
201 | size << 3, | |
202 | IOMMU_PAGE_SIZE(tbl)); | |
203 | if (rc) { | |
204 | pe_err(npe, "Failed to configure TCE table, err %lld\n", rc); | |
205 | return rc; | |
206 | } | |
6b3d12a9 | 207 | pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
b575c731 | 208 | |
85674868 | 209 | /* Add the table to the list so its TCE cache will get invalidated */ |
b5cb9ab1 | 210 | pnv_pci_link_table_and_group(phb->hose->node, num, |
85674868 AK |
211 | tbl, &npe->table_group); |
212 | ||
b575c731 AK |
213 | return 0; |
214 | } | |
215 | ||
b5cb9ab1 | 216 | long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num) |
b575c731 AK |
217 | { |
218 | struct pnv_phb *phb = npe->phb; | |
219 | int64_t rc; | |
220 | ||
221 | pe_info(npe, "Removing DMA window\n"); | |
222 | ||
223 | rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number, | |
224 | npe->pe_number, | |
225 | 0/* levels */, 0/* table address */, | |
226 | 0/* table size */, 0/* page size */); | |
227 | if (rc) { | |
228 | pe_err(npe, "Unmapping failed, ret = %lld\n", rc); | |
229 | return rc; | |
230 | } | |
6b3d12a9 | 231 | pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
b575c731 | 232 | |
b5cb9ab1 | 233 | pnv_pci_unlink_table_and_group(npe->table_group.tables[num], |
85674868 | 234 | &npe->table_group); |
5d2aa710 | 235 | |
85674868 | 236 | return 0; |
5d2aa710 AP |
237 | } |
238 | ||
239 | /* | |
f9f83456 | 240 | * Enables 32 bit DMA on NPU. |
5d2aa710 | 241 | */ |
f9f83456 | 242 | static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe) |
5d2aa710 | 243 | { |
5d2aa710 AP |
244 | struct pci_dev *gpdev; |
245 | struct pnv_ioda_pe *gpe; | |
5d2aa710 AP |
246 | int64_t rc; |
247 | ||
248 | /* | |
249 | * Find the assoicated PCI devices and get the dma window | |
250 | * information from there. | |
251 | */ | |
252 | if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV)) | |
253 | return; | |
254 | ||
255 | gpe = get_gpu_pci_dev_and_pe(npe, &gpdev); | |
256 | if (!gpe) | |
257 | return; | |
258 | ||
b5cb9ab1 | 259 | rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]); |
5d2aa710 AP |
260 | |
261 | /* | |
262 | * We don't initialise npu_pe->tce32_table as we always use | |
263 | * dma_npu_ops which are nops. | |
264 | */ | |
265 | set_dma_ops(&npe->pdev->dev, &dma_npu_ops); | |
266 | } | |
267 | ||
268 | /* | |
f9f83456 | 269 | * Enables bypass mode on the NPU. The NPU only supports one |
446957ba | 270 | * window per link, so bypass needs to be explicitly enabled or |
5d2aa710 AP |
271 | * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be |
272 | * active at the same time. | |
273 | */ | |
f9f83456 | 274 | static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe) |
5d2aa710 AP |
275 | { |
276 | struct pnv_phb *phb = npe->phb; | |
277 | int64_t rc = 0; | |
f9f83456 | 278 | phys_addr_t top = memblock_end_of_DRAM(); |
5d2aa710 | 279 | |
7f2c39e9 | 280 | if (phb->type != PNV_PHB_NPU_NVLINK || !npe->pdev) |
5d2aa710 AP |
281 | return -EINVAL; |
282 | ||
b5cb9ab1 | 283 | rc = pnv_npu_unset_window(npe, 0); |
b575c731 AK |
284 | if (rc != OPAL_SUCCESS) |
285 | return rc; | |
286 | ||
f9f83456 AK |
287 | /* Enable the bypass window */ |
288 | ||
289 | top = roundup_pow_of_two(top); | |
1f52f176 | 290 | dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n", |
f9f83456 AK |
291 | npe->pe_number); |
292 | rc = opal_pci_map_pe_dma_window_real(phb->opal_id, | |
293 | npe->pe_number, npe->pe_number, | |
294 | 0 /* bypass base */, top); | |
5d2aa710 | 295 | |
85674868 | 296 | if (rc == OPAL_SUCCESS) |
6b3d12a9 | 297 | pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
85674868 | 298 | |
5d2aa710 AP |
299 | return rc; |
300 | } | |
301 | ||
f9f83456 | 302 | void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass) |
5d2aa710 | 303 | { |
f9f83456 AK |
304 | int i; |
305 | struct pnv_phb *phb; | |
306 | struct pci_dn *pdn; | |
307 | struct pnv_ioda_pe *npe; | |
308 | struct pci_dev *npdev; | |
5d2aa710 | 309 | |
f9f83456 AK |
310 | for (i = 0; ; ++i) { |
311 | npdev = pnv_pci_get_npu_dev(gpdev, i); | |
5d2aa710 | 312 | |
f9f83456 AK |
313 | if (!npdev) |
314 | break; | |
5d2aa710 | 315 | |
f9f83456 AK |
316 | pdn = pci_get_pdn(npdev); |
317 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | |
318 | return; | |
5d2aa710 | 319 | |
f9f83456 | 320 | phb = pci_bus_to_host(npdev->bus)->private_data; |
5d2aa710 | 321 | |
f9f83456 AK |
322 | /* We only do bypass if it's enabled on the linked device */ |
323 | npe = &phb->ioda.pe_array[pdn->pe_number]; | |
5d2aa710 | 324 | |
f9f83456 AK |
325 | if (bypass) { |
326 | dev_info(&npdev->dev, | |
327 | "Using 64-bit DMA iommu bypass\n"); | |
328 | pnv_npu_dma_set_bypass(npe); | |
329 | } else { | |
330 | dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n"); | |
331 | pnv_npu_dma_set_32(npe); | |
332 | } | |
333 | } | |
5d2aa710 | 334 | } |
b5cb9ab1 AK |
335 | |
336 | /* Switch ownership from platform code to external user (e.g. VFIO) */ | |
337 | void pnv_npu_take_ownership(struct pnv_ioda_pe *npe) | |
338 | { | |
339 | struct pnv_phb *phb = npe->phb; | |
340 | int64_t rc; | |
341 | ||
342 | /* | |
343 | * Note: NPU has just a single TVE in the hardware which means that | |
344 | * while used by the kernel, it can have either 32bit window or | |
345 | * DMA bypass but never both. So we deconfigure 32bit window only | |
346 | * if it was enabled at the moment of ownership change. | |
347 | */ | |
348 | if (npe->table_group.tables[0]) { | |
349 | pnv_npu_unset_window(npe, 0); | |
350 | return; | |
351 | } | |
352 | ||
353 | /* Disable bypass */ | |
354 | rc = opal_pci_map_pe_dma_window_real(phb->opal_id, | |
355 | npe->pe_number, npe->pe_number, | |
356 | 0 /* bypass base */, 0); | |
357 | if (rc) { | |
358 | pe_err(npe, "Failed to disable bypass, err %lld\n", rc); | |
359 | return; | |
360 | } | |
6b3d12a9 | 361 | pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false); |
b5cb9ab1 AK |
362 | } |
363 | ||
364 | struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe) | |
365 | { | |
366 | struct pnv_phb *phb = npe->phb; | |
367 | struct pci_bus *pbus = phb->hose->bus; | |
368 | struct pci_dev *npdev, *gpdev = NULL, *gptmp; | |
369 | struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(npe, &gpdev); | |
370 | ||
371 | if (!gpe || !gpdev) | |
372 | return NULL; | |
373 | ||
374 | list_for_each_entry(npdev, &pbus->devices, bus_list) { | |
375 | gptmp = pnv_pci_get_gpu_dev(npdev); | |
376 | ||
377 | if (gptmp != gpdev) | |
378 | continue; | |
379 | ||
380 | pe_info(gpe, "Attached NPU %s\n", dev_name(&npdev->dev)); | |
381 | iommu_group_add_device(gpe->table_group.group, &npdev->dev); | |
382 | } | |
383 | ||
384 | return gpe; | |
385 | } | |
1ab66d1f AP |
386 | |
387 | /* Maximum number of nvlinks per npu */ | |
388 | #define NV_MAX_LINKS 6 | |
389 | ||
390 | /* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */ | |
391 | static int max_npu2_index; | |
392 | ||
393 | struct npu_context { | |
394 | struct mm_struct *mm; | |
395 | struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS]; | |
396 | struct mmu_notifier mn; | |
397 | struct kref kref; | |
1b2c2b12 | 398 | bool nmmu_flush; |
1ab66d1f AP |
399 | |
400 | /* Callback to stop translation requests on a given GPU */ | |
401 | struct npu_context *(*release_cb)(struct npu_context *, void *); | |
402 | ||
403 | /* | |
404 | * Private pointer passed to the above callback for usage by | |
405 | * device drivers. | |
406 | */ | |
407 | void *priv; | |
408 | }; | |
409 | ||
410 | /* | |
411 | * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC | |
412 | * if none are available. | |
413 | */ | |
414 | static int get_mmio_atsd_reg(struct npu *npu) | |
415 | { | |
416 | int i; | |
417 | ||
418 | for (i = 0; i < npu->mmio_atsd_count; i++) { | |
419 | if (!test_and_set_bit(i, &npu->mmio_atsd_usage)) | |
420 | return i; | |
421 | } | |
422 | ||
423 | return -ENOSPC; | |
424 | } | |
425 | ||
426 | static void put_mmio_atsd_reg(struct npu *npu, int reg) | |
427 | { | |
428 | clear_bit(reg, &npu->mmio_atsd_usage); | |
429 | } | |
430 | ||
431 | /* MMIO ATSD register offsets */ | |
432 | #define XTS_ATSD_AVA 1 | |
433 | #define XTS_ATSD_STAT 2 | |
434 | ||
435 | static int mmio_launch_invalidate(struct npu *npu, unsigned long launch, | |
436 | unsigned long va) | |
437 | { | |
438 | int mmio_atsd_reg; | |
439 | ||
440 | do { | |
441 | mmio_atsd_reg = get_mmio_atsd_reg(npu); | |
442 | cpu_relax(); | |
443 | } while (mmio_atsd_reg < 0); | |
444 | ||
445 | __raw_writeq(cpu_to_be64(va), | |
446 | npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA); | |
447 | eieio(); | |
448 | __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]); | |
449 | ||
450 | return mmio_atsd_reg; | |
451 | } | |
452 | ||
bbd5ff50 | 453 | static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush) |
1ab66d1f AP |
454 | { |
455 | unsigned long launch; | |
456 | ||
457 | /* IS set to invalidate matching PID */ | |
458 | launch = PPC_BIT(12); | |
459 | ||
460 | /* PRS set to process-scoped */ | |
461 | launch |= PPC_BIT(13); | |
462 | ||
463 | /* AP */ | |
464 | launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); | |
465 | ||
466 | /* PID */ | |
467 | launch |= pid << PPC_BITLSHIFT(38); | |
468 | ||
bbd5ff50 AP |
469 | /* No flush */ |
470 | launch |= !flush << PPC_BITLSHIFT(39); | |
471 | ||
1ab66d1f AP |
472 | /* Invalidating the entire process doesn't use a va */ |
473 | return mmio_launch_invalidate(npu, launch, 0); | |
474 | } | |
475 | ||
476 | static int mmio_invalidate_va(struct npu *npu, unsigned long va, | |
bbd5ff50 | 477 | unsigned long pid, bool flush) |
1ab66d1f AP |
478 | { |
479 | unsigned long launch; | |
480 | ||
481 | /* IS set to invalidate target VA */ | |
482 | launch = 0; | |
483 | ||
484 | /* PRS set to process scoped */ | |
485 | launch |= PPC_BIT(13); | |
486 | ||
487 | /* AP */ | |
488 | launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); | |
489 | ||
490 | /* PID */ | |
491 | launch |= pid << PPC_BITLSHIFT(38); | |
492 | ||
bbd5ff50 AP |
493 | /* No flush */ |
494 | launch |= !flush << PPC_BITLSHIFT(39); | |
495 | ||
1ab66d1f AP |
496 | return mmio_launch_invalidate(npu, launch, va); |
497 | } | |
498 | ||
499 | #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) | |
500 | ||
bbd5ff50 AP |
501 | struct mmio_atsd_reg { |
502 | struct npu *npu; | |
503 | int reg; | |
504 | }; | |
505 | ||
506 | static void mmio_invalidate_wait( | |
507 | struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush) | |
508 | { | |
509 | struct npu *npu; | |
510 | int i, reg; | |
511 | ||
512 | /* Wait for all invalidations to complete */ | |
513 | for (i = 0; i <= max_npu2_index; i++) { | |
514 | if (mmio_atsd_reg[i].reg < 0) | |
515 | continue; | |
516 | ||
517 | /* Wait for completion */ | |
518 | npu = mmio_atsd_reg[i].npu; | |
519 | reg = mmio_atsd_reg[i].reg; | |
520 | while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) | |
521 | cpu_relax(); | |
522 | ||
523 | put_mmio_atsd_reg(npu, reg); | |
524 | ||
525 | /* | |
526 | * The GPU requires two flush ATSDs to ensure all entries have | |
527 | * been flushed. We use PID 0 as it will never be used for a | |
528 | * process on the GPU. | |
529 | */ | |
530 | if (flush) | |
531 | mmio_invalidate_pid(npu, 0, true); | |
532 | } | |
533 | } | |
534 | ||
1ab66d1f AP |
535 | /* |
536 | * Invalidate either a single address or an entire PID depending on | |
537 | * the value of va. | |
538 | */ | |
539 | static void mmio_invalidate(struct npu_context *npu_context, int va, | |
bbd5ff50 | 540 | unsigned long address, bool flush) |
1ab66d1f | 541 | { |
bbd5ff50 | 542 | int i, j; |
1ab66d1f AP |
543 | struct npu *npu; |
544 | struct pnv_phb *nphb; | |
545 | struct pci_dev *npdev; | |
bbd5ff50 | 546 | struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; |
1ab66d1f AP |
547 | unsigned long pid = npu_context->mm->context.id; |
548 | ||
1b2c2b12 AP |
549 | if (npu_context->nmmu_flush) |
550 | /* | |
551 | * Unfortunately the nest mmu does not support flushing specific | |
552 | * addresses so we have to flush the whole mm once before | |
553 | * shooting down the GPU translation. | |
554 | */ | |
555 | flush_all_mm(npu_context->mm); | |
bab9f954 | 556 | |
1ab66d1f AP |
557 | /* |
558 | * Loop over all the NPUs this process is active on and launch | |
559 | * an invalidate. | |
560 | */ | |
561 | for (i = 0; i <= max_npu2_index; i++) { | |
562 | mmio_atsd_reg[i].reg = -1; | |
563 | for (j = 0; j < NV_MAX_LINKS; j++) { | |
564 | npdev = npu_context->npdev[i][j]; | |
565 | if (!npdev) | |
566 | continue; | |
567 | ||
568 | nphb = pci_bus_to_host(npdev->bus)->private_data; | |
569 | npu = &nphb->npu; | |
570 | mmio_atsd_reg[i].npu = npu; | |
571 | ||
572 | if (va) | |
573 | mmio_atsd_reg[i].reg = | |
bbd5ff50 AP |
574 | mmio_invalidate_va(npu, address, pid, |
575 | flush); | |
1ab66d1f AP |
576 | else |
577 | mmio_atsd_reg[i].reg = | |
bbd5ff50 | 578 | mmio_invalidate_pid(npu, pid, flush); |
1ab66d1f AP |
579 | |
580 | /* | |
581 | * The NPU hardware forwards the shootdown to all GPUs | |
582 | * so we only have to launch one shootdown per NPU. | |
583 | */ | |
584 | break; | |
585 | } | |
586 | } | |
587 | ||
bbd5ff50 AP |
588 | mmio_invalidate_wait(mmio_atsd_reg, flush); |
589 | if (flush) | |
590 | /* Wait for the flush to complete */ | |
591 | mmio_invalidate_wait(mmio_atsd_reg, false); | |
1ab66d1f AP |
592 | } |
593 | ||
594 | static void pnv_npu2_mn_release(struct mmu_notifier *mn, | |
595 | struct mm_struct *mm) | |
596 | { | |
597 | struct npu_context *npu_context = mn_to_npu_context(mn); | |
598 | ||
599 | /* Call into device driver to stop requests to the NMMU */ | |
600 | if (npu_context->release_cb) | |
601 | npu_context->release_cb(npu_context, npu_context->priv); | |
602 | ||
603 | /* | |
604 | * There should be no more translation requests for this PID, but we | |
605 | * need to ensure any entries for it are removed from the TLB. | |
606 | */ | |
bbd5ff50 | 607 | mmio_invalidate(npu_context, 0, 0, true); |
1ab66d1f AP |
608 | } |
609 | ||
610 | static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, | |
611 | struct mm_struct *mm, | |
612 | unsigned long address, | |
613 | pte_t pte) | |
614 | { | |
615 | struct npu_context *npu_context = mn_to_npu_context(mn); | |
616 | ||
bbd5ff50 | 617 | mmio_invalidate(npu_context, 1, address, true); |
1ab66d1f AP |
618 | } |
619 | ||
1ab66d1f AP |
620 | static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, |
621 | struct mm_struct *mm, | |
622 | unsigned long start, unsigned long end) | |
623 | { | |
624 | struct npu_context *npu_context = mn_to_npu_context(mn); | |
625 | unsigned long address; | |
626 | ||
bbd5ff50 AP |
627 | for (address = start; address < end; address += PAGE_SIZE) |
628 | mmio_invalidate(npu_context, 1, address, false); | |
629 | ||
630 | /* Do the flush only on the final addess == end */ | |
631 | mmio_invalidate(npu_context, 1, address, true); | |
1ab66d1f AP |
632 | } |
633 | ||
634 | static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { | |
635 | .release = pnv_npu2_mn_release, | |
636 | .change_pte = pnv_npu2_mn_change_pte, | |
1ab66d1f AP |
637 | .invalidate_range = pnv_npu2_mn_invalidate_range, |
638 | }; | |
639 | ||
640 | /* | |
641 | * Call into OPAL to setup the nmmu context for the current task in | |
642 | * the NPU. This must be called to setup the context tables before the | |
643 | * GPU issues ATRs. pdev should be a pointed to PCIe GPU device. | |
644 | * | |
645 | * A release callback should be registered to allow a device driver to | |
646 | * be notified that it should not launch any new translation requests | |
647 | * as the final TLB invalidate is about to occur. | |
648 | * | |
649 | * Returns an error if there no contexts are currently available or a | |
650 | * npu_context which should be passed to pnv_npu2_handle_fault(). | |
651 | * | |
652 | * mmap_sem must be held in write mode. | |
653 | */ | |
654 | struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, | |
655 | unsigned long flags, | |
656 | struct npu_context *(*cb)(struct npu_context *, void *), | |
657 | void *priv) | |
658 | { | |
659 | int rc; | |
660 | u32 nvlink_index; | |
661 | struct device_node *nvlink_dn; | |
662 | struct mm_struct *mm = current->mm; | |
663 | struct pnv_phb *nphb; | |
664 | struct npu *npu; | |
665 | struct npu_context *npu_context; | |
666 | ||
667 | /* | |
668 | * At present we don't support GPUs connected to multiple NPUs and I'm | |
669 | * not sure the hardware does either. | |
670 | */ | |
671 | struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); | |
672 | ||
673 | if (!firmware_has_feature(FW_FEATURE_OPAL)) | |
674 | return ERR_PTR(-ENODEV); | |
675 | ||
676 | if (!npdev) | |
677 | /* No nvlink associated with this GPU device */ | |
678 | return ERR_PTR(-ENODEV); | |
679 | ||
bbd5ff50 AP |
680 | if (!mm || mm->context.id == 0) { |
681 | /* | |
682 | * Kernel thread contexts are not supported and context id 0 is | |
683 | * reserved on the GPU. | |
684 | */ | |
1ab66d1f AP |
685 | return ERR_PTR(-EINVAL); |
686 | } | |
687 | ||
688 | nphb = pci_bus_to_host(npdev->bus)->private_data; | |
689 | npu = &nphb->npu; | |
690 | ||
691 | /* | |
692 | * Setup the NPU context table for a particular GPU. These need to be | |
693 | * per-GPU as we need the tables to filter ATSDs when there are no | |
694 | * active contexts on a particular GPU. | |
695 | */ | |
696 | rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags, | |
697 | PCI_DEVID(gpdev->bus->number, gpdev->devfn)); | |
698 | if (rc < 0) | |
699 | return ERR_PTR(-ENOSPC); | |
700 | ||
701 | /* | |
702 | * We store the npu pci device so we can more easily get at the | |
703 | * associated npus. | |
704 | */ | |
705 | npu_context = mm->context.npu_context; | |
706 | if (!npu_context) { | |
707 | npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL); | |
708 | if (!npu_context) | |
709 | return ERR_PTR(-ENOMEM); | |
710 | ||
711 | mm->context.npu_context = npu_context; | |
712 | npu_context->mm = mm; | |
713 | npu_context->mn.ops = &nv_nmmu_notifier_ops; | |
714 | __mmu_notifier_register(&npu_context->mn, mm); | |
715 | kref_init(&npu_context->kref); | |
716 | } else { | |
717 | kref_get(&npu_context->kref); | |
718 | } | |
719 | ||
720 | npu_context->release_cb = cb; | |
721 | npu_context->priv = priv; | |
722 | nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); | |
723 | if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", | |
724 | &nvlink_index))) | |
725 | return ERR_PTR(-ENODEV); | |
726 | npu_context->npdev[npu->index][nvlink_index] = npdev; | |
727 | ||
1b2c2b12 AP |
728 | if (!nphb->npu.nmmu_flush) { |
729 | /* | |
730 | * If we're not explicitly flushing ourselves we need to mark | |
731 | * the thread for global flushes | |
732 | */ | |
733 | npu_context->nmmu_flush = false; | |
734 | mm_context_add_copro(mm); | |
735 | } else | |
736 | npu_context->nmmu_flush = true; | |
737 | ||
1ab66d1f AP |
738 | return npu_context; |
739 | } | |
740 | EXPORT_SYMBOL(pnv_npu2_init_context); | |
741 | ||
742 | static void pnv_npu2_release_context(struct kref *kref) | |
743 | { | |
744 | struct npu_context *npu_context = | |
745 | container_of(kref, struct npu_context, kref); | |
746 | ||
1b2c2b12 AP |
747 | if (!npu_context->nmmu_flush) |
748 | mm_context_remove_copro(npu_context->mm); | |
749 | ||
1ab66d1f AP |
750 | npu_context->mm->context.npu_context = NULL; |
751 | mmu_notifier_unregister(&npu_context->mn, | |
752 | npu_context->mm); | |
753 | ||
754 | kfree(npu_context); | |
755 | } | |
756 | ||
757 | void pnv_npu2_destroy_context(struct npu_context *npu_context, | |
758 | struct pci_dev *gpdev) | |
759 | { | |
415ba3c1 | 760 | struct pnv_phb *nphb; |
1ab66d1f AP |
761 | struct npu *npu; |
762 | struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); | |
763 | struct device_node *nvlink_dn; | |
764 | u32 nvlink_index; | |
765 | ||
766 | if (WARN_ON(!npdev)) | |
767 | return; | |
768 | ||
769 | if (!firmware_has_feature(FW_FEATURE_OPAL)) | |
770 | return; | |
771 | ||
772 | nphb = pci_bus_to_host(npdev->bus)->private_data; | |
773 | npu = &nphb->npu; | |
1ab66d1f AP |
774 | nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); |
775 | if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", | |
776 | &nvlink_index))) | |
777 | return; | |
778 | npu_context->npdev[npu->index][nvlink_index] = NULL; | |
415ba3c1 | 779 | opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, |
1ab66d1f AP |
780 | PCI_DEVID(gpdev->bus->number, gpdev->devfn)); |
781 | kref_put(&npu_context->kref, pnv_npu2_release_context); | |
782 | } | |
783 | EXPORT_SYMBOL(pnv_npu2_destroy_context); | |
784 | ||
785 | /* | |
786 | * Assumes mmap_sem is held for the contexts associated mm. | |
787 | */ | |
788 | int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea, | |
789 | unsigned long *flags, unsigned long *status, int count) | |
790 | { | |
791 | u64 rc = 0, result = 0; | |
792 | int i, is_write; | |
793 | struct page *page[1]; | |
794 | ||
795 | /* mmap_sem should be held so the struct_mm must be present */ | |
796 | struct mm_struct *mm = context->mm; | |
797 | ||
798 | if (!firmware_has_feature(FW_FEATURE_OPAL)) | |
799 | return -ENODEV; | |
800 | ||
801 | WARN_ON(!rwsem_is_locked(&mm->mmap_sem)); | |
802 | ||
803 | for (i = 0; i < count; i++) { | |
804 | is_write = flags[i] & NPU2_WRITE; | |
805 | rc = get_user_pages_remote(NULL, mm, ea[i], 1, | |
806 | is_write ? FOLL_WRITE : 0, | |
807 | page, NULL, NULL); | |
808 | ||
809 | /* | |
810 | * To support virtualised environments we will have to do an | |
811 | * access to the page to ensure it gets faulted into the | |
812 | * hypervisor. For the moment virtualisation is not supported in | |
813 | * other areas so leave the access out. | |
814 | */ | |
815 | if (rc != 1) { | |
816 | status[i] = rc; | |
817 | result = -EFAULT; | |
818 | continue; | |
819 | } | |
820 | ||
821 | status[i] = 0; | |
822 | put_page(page[0]); | |
823 | } | |
824 | ||
825 | return result; | |
826 | } | |
827 | EXPORT_SYMBOL(pnv_npu2_handle_fault); | |
828 | ||
829 | int pnv_npu2_init(struct pnv_phb *phb) | |
830 | { | |
831 | unsigned int i; | |
832 | u64 mmio_atsd; | |
833 | struct device_node *dn; | |
834 | struct pci_dev *gpdev; | |
835 | static int npu_index; | |
836 | uint64_t rc = 0; | |
837 | ||
1b2c2b12 AP |
838 | phb->npu.nmmu_flush = |
839 | of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush"); | |
1ab66d1f AP |
840 | for_each_child_of_node(phb->hose->dn, dn) { |
841 | gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn)); | |
842 | if (gpdev) { | |
843 | rc = opal_npu_map_lpar(phb->opal_id, | |
844 | PCI_DEVID(gpdev->bus->number, gpdev->devfn), | |
845 | 0, 0); | |
846 | if (rc) | |
847 | dev_err(&gpdev->dev, | |
848 | "Error %lld mapping device to LPAR\n", | |
849 | rc); | |
850 | } | |
851 | } | |
852 | ||
853 | for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd", | |
854 | i, &mmio_atsd); i++) | |
855 | phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32); | |
856 | ||
857 | pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i); | |
858 | phb->npu.mmio_atsd_count = i; | |
859 | phb->npu.mmio_atsd_usage = 0; | |
860 | npu_index++; | |
861 | if (WARN_ON(npu_index >= NV_MAX_NPUS)) | |
862 | return -ENOSPC; | |
863 | max_npu2_index = npu_index; | |
864 | phb->npu.index = npu_index; | |
865 | ||
866 | return 0; | |
867 | } |