Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / powerpc / kvm / book3s_64_vio.c
CommitLineData
d94d71cb 1// SPDX-License-Identifier: GPL-2.0-only
f31e65e1 2/*
f31e65e1
BH
3 *
4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
d3695aa4 6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
f31e65e1
BH
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/highmem.h>
14#include <linux/gfp.h>
15#include <linux/slab.h>
3f07c014 16#include <linux/sched/signal.h>
f31e65e1
BH
17#include <linux/hugetlb.h>
18#include <linux/list.h>
19#include <linux/anon_inodes.h>
121f80ba
AK
20#include <linux/iommu.h>
21#include <linux/file.h>
79eb597c 22#include <linux/mm.h>
f31e65e1 23
f31e65e1
BH
24#include <asm/kvm_ppc.h>
25#include <asm/kvm_book3s.h>
f64e8084 26#include <asm/book3s/64/mmu-hash.h>
f31e65e1
BH
27#include <asm/hvcall.h>
28#include <asm/synch.h>
29#include <asm/ppc-opcode.h>
30#include <asm/kvm_host.h>
31#include <asm/udbg.h>
462ee11e 32#include <asm/iommu.h>
d3695aa4 33#include <asm/tce.h>
121f80ba 34#include <asm/mmu_context.h>
f31e65e1 35
fe26e527 36static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
f31e65e1 37{
fe26e527 38 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
f31e65e1
BH
39}
40
f8626985
AK
41static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
42{
43 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
44 (tce_pages * sizeof(struct page *));
45
46 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
47}
48
121f80ba
AK
49static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
50{
51 struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
52 struct kvmppc_spapr_tce_iommu_table, rcu);
53
54 iommu_tce_table_put(stit->tbl);
55
56 kfree(stit);
57}
58
59static void kvm_spapr_tce_liobn_put(struct kref *kref)
60{
61 struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
62 struct kvmppc_spapr_tce_iommu_table, kref);
63
64 list_del_rcu(&stit->next);
65
66 call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
67}
68
69extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
70 struct iommu_group *grp)
71{
72 int i;
73 struct kvmppc_spapr_tce_table *stt;
74 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
75 struct iommu_table_group *table_group = NULL;
76
77 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
78
79 table_group = iommu_group_get_iommudata(grp);
80 if (WARN_ON(!table_group))
81 continue;
82
83 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
84 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
85 if (table_group->tables[i] != stit->tbl)
86 continue;
87
88 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
121f80ba
AK
89 }
90 }
91 }
92}
93
94extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
95 struct iommu_group *grp)
96{
97 struct kvmppc_spapr_tce_table *stt = NULL;
98 bool found = false;
99 struct iommu_table *tbl = NULL;
100 struct iommu_table_group *table_group;
101 long i;
102 struct kvmppc_spapr_tce_iommu_table *stit;
103 struct fd f;
104
105 f = fdget(tablefd);
106 if (!f.file)
107 return -EBADF;
108
109 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
110 if (stt == f.file->private_data) {
111 found = true;
112 break;
113 }
114 }
115
116 fdput(f);
117
118 if (!found)
119 return -EINVAL;
120
121 table_group = iommu_group_get_iommudata(grp);
122 if (WARN_ON(!table_group))
123 return -EFAULT;
124
125 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
126 struct iommu_table *tbltmp = table_group->tables[i];
127
128 if (!tbltmp)
129 continue;
ca1fc489
AK
130 /* Make sure hardware table parameters are compatible */
131 if ((tbltmp->it_page_shift <= stt->page_shift) &&
132 (tbltmp->it_offset << tbltmp->it_page_shift ==
133 stt->offset << stt->page_shift) &&
76346cd9 134 (tbltmp->it_size << tbltmp->it_page_shift >=
ca1fc489 135 stt->size << stt->page_shift)) {
121f80ba
AK
136 /*
137 * Reference the table to avoid races with
138 * add/remove DMA windows.
139 */
140 tbl = iommu_tce_table_get(tbltmp);
141 break;
142 }
143 }
144 if (!tbl)
145 return -EINVAL;
146
147 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
148 if (tbl != stit->tbl)
149 continue;
150
151 if (!kref_get_unless_zero(&stit->kref)) {
152 /* stit is being destroyed */
153 iommu_tce_table_put(tbl);
154 return -ENOTTY;
155 }
156 /*
157 * The table is already known to this KVM, we just increased
158 * its KVM reference counter and can return.
159 */
160 return 0;
161 }
162
163 stit = kzalloc(sizeof(*stit), GFP_KERNEL);
164 if (!stit) {
165 iommu_tce_table_put(tbl);
166 return -ENOMEM;
167 }
168
169 stit->tbl = tbl;
170 kref_init(&stit->kref);
171
172 list_add_rcu(&stit->next, &stt->iommu_tables);
173
174 return 0;
175}
176
366baf28 177static void release_spapr_tce_table(struct rcu_head *head)
f31e65e1 178{
366baf28
AK
179 struct kvmppc_spapr_tce_table *stt = container_of(head,
180 struct kvmppc_spapr_tce_table, rcu);
fe26e527 181 unsigned long i, npages = kvmppc_tce_pages(stt->size);
f31e65e1 182
f8626985 183 for (i = 0; i < npages; i++)
e1a1ef84
AK
184 if (stt->pages[i])
185 __free_page(stt->pages[i]);
f31e65e1 186
366baf28 187 kfree(stt);
f31e65e1
BH
188}
189
e1a1ef84
AK
190static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
191 unsigned long sttpage)
192{
193 struct page *page = stt->pages[sttpage];
194
195 if (page)
196 return page;
197
198 mutex_lock(&stt->alloc_lock);
199 page = stt->pages[sttpage];
200 if (!page) {
201 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
202 WARN_ON_ONCE(!page);
203 if (page)
204 stt->pages[sttpage] = page;
205 }
206 mutex_unlock(&stt->alloc_lock);
207
208 return page;
209}
210
16d5c39d 211static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
f31e65e1 212{
11bac800 213 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
f31e65e1
BH
214 struct page *page;
215
fe26e527 216 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
f31e65e1
BH
217 return VM_FAULT_SIGBUS;
218
e1a1ef84
AK
219 page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
220 if (!page)
221 return VM_FAULT_OOM;
222
f31e65e1
BH
223 get_page(page);
224 vmf->page = page;
225 return 0;
226}
227
228static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
229 .fault = kvm_spapr_tce_fault,
230};
231
232static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
233{
234 vma->vm_ops = &kvm_spapr_tce_vm_ops;
235 return 0;
236}
237
238static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
239{
240 struct kvmppc_spapr_tce_table *stt = filp->private_data;
121f80ba 241 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
edd03602 242 struct kvm *kvm = stt->kvm;
f31e65e1 243
edd03602 244 mutex_lock(&kvm->lock);
366baf28 245 list_del_rcu(&stt->list);
edd03602 246 mutex_unlock(&kvm->lock);
366baf28 247
121f80ba
AK
248 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
249 WARN_ON(!kref_read(&stit->kref));
250 while (1) {
251 if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
252 break;
253 }
254 }
255
366baf28
AK
256 kvm_put_kvm(stt->kvm);
257
79eb597c 258 account_locked_vm(current->mm,
fe26e527 259 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
366baf28
AK
260 call_rcu(&stt->rcu, release_spapr_tce_table);
261
f31e65e1
BH
262 return 0;
263}
264
75ef9de1 265static const struct file_operations kvm_spapr_tce_fops = {
f31e65e1
BH
266 .mmap = kvm_spapr_tce_mmap,
267 .release = kvm_spapr_tce_release,
268};
269
270long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
58ded420 271 struct kvm_create_spapr_tce_64 *args)
f31e65e1
BH
272{
273 struct kvmppc_spapr_tce_table *stt = NULL;
47c5310a 274 struct kvmppc_spapr_tce_table *siter;
76346cd9 275 unsigned long npages, size = args->size;
f31e65e1 276 int ret = -ENOMEM;
f31e65e1 277
e45719af
AK
278 if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
279 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
58ded420
AK
280 return -EINVAL;
281
fe26e527 282 npages = kvmppc_tce_pages(size);
79eb597c 283 ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true);
47c5310a
PM
284 if (ret)
285 return ret;
f31e65e1 286
5982f084 287 ret = -ENOMEM;
f31e65e1
BH
288 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
289 GFP_KERNEL);
290 if (!stt)
47c5310a 291 goto fail_acct;
f31e65e1
BH
292
293 stt->liobn = args->liobn;
58ded420
AK
294 stt->page_shift = args->page_shift;
295 stt->offset = args->offset;
fe26e527 296 stt->size = size;
f31e65e1 297 stt->kvm = kvm;
e1a1ef84 298 mutex_init(&stt->alloc_lock);
121f80ba 299 INIT_LIST_HEAD_RCU(&stt->iommu_tables);
f31e65e1 300
f31e65e1 301 mutex_lock(&kvm->lock);
47c5310a
PM
302
303 /* Check this LIOBN hasn't been previously allocated */
304 ret = 0;
305 list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
306 if (siter->liobn == args->liobn) {
307 ret = -EBUSY;
308 break;
309 }
310 }
311
716cb116 312 kvm_get_kvm(kvm);
edd03602
PM
313 if (!ret)
314 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
315 stt, O_RDWR | O_CLOEXEC);
316
716cb116 317 if (ret >= 0)
47c5310a 318 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
716cb116 319 else
149487bd 320 kvm_put_kvm_no_destroy(kvm);
f31e65e1
BH
321
322 mutex_unlock(&kvm->lock);
323
edd03602
PM
324 if (ret >= 0)
325 return ret;
f31e65e1 326
47c5310a
PM
327 kfree(stt);
328 fail_acct:
79eb597c 329 account_locked_vm(current->mm, kvmppc_stt_pages(npages), false);
f31e65e1
BH
330 return ret;
331}
d3695aa4 332
2001825e
AK
333static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
334 unsigned long *ua)
335{
336 unsigned long gfn = tce >> PAGE_SHIFT;
337 struct kvm_memory_slot *memslot;
338
339 memslot = search_memslots(kvm_memslots(kvm), gfn);
340 if (!memslot)
341 return -EINVAL;
342
343 *ua = __gfn_to_hva_memslot(memslot, gfn) |
344 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
345
346 return 0;
347}
348
42de7b9e
AK
349static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
350 unsigned long tce)
351{
352 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
353 enum dma_data_direction dir = iommu_tce_direction(tce);
354 struct kvmppc_spapr_tce_iommu_table *stit;
355 unsigned long ua = 0;
356
357 /* Allow userspace to poison TCE table */
358 if (dir == DMA_NONE)
359 return H_SUCCESS;
360
361 if (iommu_tce_check_gpa(stt->page_shift, gpa))
362 return H_TOO_HARD;
363
2001825e 364 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
42de7b9e
AK
365 return H_TOO_HARD;
366
367 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
368 unsigned long hpa = 0;
369 struct mm_iommu_table_group_mem_t *mem;
370 long shift = stit->tbl->it_page_shift;
371
372 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
373 if (!mem)
374 return H_TOO_HARD;
375
376 if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
377 return H_TOO_HARD;
378 }
379
380 return H_SUCCESS;
381}
382
e1a1ef84
AK
383/*
384 * Handles TCE requests for emulated devices.
385 * Puts guest TCE values to the table and expects user space to convert them.
386 * Cannot fail so kvmppc_tce_validate must be called before it.
387 */
388static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
389 unsigned long idx, unsigned long tce)
390{
391 struct page *page;
392 u64 *tbl;
393 unsigned long sttpage;
394
395 idx -= stt->offset;
396 sttpage = idx / TCES_PER_PAGE;
397 page = stt->pages[sttpage];
398
399 if (!page) {
400 /* We allow any TCE, not just with read|write permissions */
401 if (!tce)
402 return;
403
404 page = kvm_spapr_get_tce_page(stt, sttpage);
405 if (!page)
406 return;
407 }
408 tbl = page_to_virt(page);
409
410 tbl[idx % TCES_PER_PAGE] = tce;
411}
412
c10c21ef
AK
413static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
414 unsigned long entry)
121f80ba
AK
415{
416 unsigned long hpa = 0;
417 enum dma_data_direction dir = DMA_NONE;
418
01b7d128 419 iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
121f80ba
AK
420}
421
422static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
423 struct iommu_table *tbl, unsigned long entry)
424{
425 struct mm_iommu_table_group_mem_t *mem = NULL;
426 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
6e301a8e 427 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
121f80ba
AK
428
429 if (!pua)
6e301a8e 430 return H_SUCCESS;
121f80ba 431
00a5c58d 432 mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
121f80ba
AK
433 if (!mem)
434 return H_TOO_HARD;
435
436 mm_iommu_mapped_dec(mem);
437
00a5c58d 438 *pua = cpu_to_be64(0);
121f80ba
AK
439
440 return H_SUCCESS;
441}
442
ca1fc489 443static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
121f80ba
AK
444 struct iommu_table *tbl, unsigned long entry)
445{
446 enum dma_data_direction dir = DMA_NONE;
447 unsigned long hpa = 0;
448 long ret;
449
01b7d128
AK
450 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
451 &dir)))
f7960e29 452 return H_TOO_HARD;
121f80ba
AK
453
454 if (dir == DMA_NONE)
455 return H_SUCCESS;
456
457 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
458 if (ret != H_SUCCESS)
01b7d128 459 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
121f80ba
AK
460
461 return ret;
462}
463
ca1fc489
AK
464static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
465 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
466 unsigned long entry)
467{
468 unsigned long i, ret = H_SUCCESS;
469 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
470 unsigned long io_entry = entry * subpages;
471
472 for (i = 0; i < subpages; ++i) {
473 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
474 if (ret != H_SUCCESS)
475 break;
476 }
477
478 return ret;
479}
480
481long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
121f80ba
AK
482 unsigned long entry, unsigned long ua,
483 enum dma_data_direction dir)
484{
485 long ret;
00a5c58d
AK
486 unsigned long hpa;
487 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
121f80ba
AK
488 struct mm_iommu_table_group_mem_t *mem;
489
490 if (!pua)
491 /* it_userspace allocation might be delayed */
492 return H_TOO_HARD;
493
494 mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
495 if (!mem)
496 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
497 return H_TOO_HARD;
498
76fa4975 499 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
f7960e29 500 return H_TOO_HARD;
121f80ba
AK
501
502 if (mm_iommu_mapped_inc(mem))
f7960e29 503 return H_TOO_HARD;
121f80ba 504
01b7d128 505 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
121f80ba
AK
506 if (WARN_ON_ONCE(ret)) {
507 mm_iommu_mapped_dec(mem);
f7960e29 508 return H_TOO_HARD;
121f80ba
AK
509 }
510
511 if (dir != DMA_NONE)
512 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
513
00a5c58d 514 *pua = cpu_to_be64(ua);
121f80ba
AK
515
516 return 0;
517}
518
ca1fc489
AK
519static long kvmppc_tce_iommu_map(struct kvm *kvm,
520 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
521 unsigned long entry, unsigned long ua,
522 enum dma_data_direction dir)
523{
524 unsigned long i, pgoff, ret = H_SUCCESS;
525 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
526 unsigned long io_entry = entry * subpages;
527
528 for (i = 0, pgoff = 0; i < subpages;
529 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
530
531 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
532 io_entry + i, ua + pgoff, dir);
533 if (ret != H_SUCCESS)
534 break;
535 }
536
537 return ret;
538}
539
31217db7
AK
540long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
541 unsigned long ioba, unsigned long tce)
542{
503bfcbe 543 struct kvmppc_spapr_tce_table *stt;
121f80ba
AK
544 long ret, idx;
545 struct kvmppc_spapr_tce_iommu_table *stit;
546 unsigned long entry, ua = 0;
547 enum dma_data_direction dir;
31217db7
AK
548
549 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
550 /* liobn, ioba, tce); */
551
503bfcbe 552 stt = kvmppc_find_table(vcpu->kvm, liobn);
31217db7
AK
553 if (!stt)
554 return H_TOO_HARD;
555
556 ret = kvmppc_ioba_validate(stt, ioba, 1);
557 if (ret != H_SUCCESS)
558 return ret;
559
345077c8
AK
560 idx = srcu_read_lock(&vcpu->kvm->srcu);
561
31217db7
AK
562 ret = kvmppc_tce_validate(stt, tce);
563 if (ret != H_SUCCESS)
345077c8 564 goto unlock_exit;
31217db7 565
121f80ba 566 dir = iommu_tce_direction(tce);
8f6a9f0d 567
2001825e 568 if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
8f6a9f0d
AK
569 ret = H_PARAMETER;
570 goto unlock_exit;
571 }
121f80ba
AK
572
573 entry = ioba >> stt->page_shift;
574
575 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
8f6a9f0d 576 if (dir == DMA_NONE)
ca1fc489 577 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
121f80ba 578 stit->tbl, entry);
8f6a9f0d 579 else
ca1fc489 580 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
121f80ba 581 entry, ua, dir);
121f80ba 582
01b7d128
AK
583 iommu_tce_kill(stit->tbl, entry, 1);
584
2691f0ff 585 if (ret != H_SUCCESS) {
c10c21ef 586 kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
8f6a9f0d 587 goto unlock_exit;
2691f0ff 588 }
121f80ba
AK
589 }
590
591 kvmppc_tce_put(stt, entry, tce);
31217db7 592
8f6a9f0d
AK
593unlock_exit:
594 srcu_read_unlock(&vcpu->kvm->srcu, idx);
595
596 return ret;
31217db7
AK
597}
598EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
599
d3695aa4
AK
600long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
601 unsigned long liobn, unsigned long ioba,
602 unsigned long tce_list, unsigned long npages)
603{
604 struct kvmppc_spapr_tce_table *stt;
605 long i, ret = H_SUCCESS, idx;
606 unsigned long entry, ua = 0;
f8750513
DA
607 u64 __user *tces;
608 u64 tce;
121f80ba 609 struct kvmppc_spapr_tce_iommu_table *stit;
d3695aa4 610
503bfcbe 611 stt = kvmppc_find_table(vcpu->kvm, liobn);
d3695aa4
AK
612 if (!stt)
613 return H_TOO_HARD;
614
fe26e527 615 entry = ioba >> stt->page_shift;
d3695aa4
AK
616 /*
617 * SPAPR spec says that the maximum size of the list is 512 TCEs
618 * so the whole table fits in 4K page
619 */
620 if (npages > 512)
621 return H_PARAMETER;
622
623 if (tce_list & (SZ_4K - 1))
624 return H_PARAMETER;
625
626 ret = kvmppc_ioba_validate(stt, ioba, npages);
627 if (ret != H_SUCCESS)
628 return ret;
629
630 idx = srcu_read_lock(&vcpu->kvm->srcu);
2001825e 631 if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
d3695aa4
AK
632 ret = H_TOO_HARD;
633 goto unlock_exit;
634 }
635 tces = (u64 __user *) ua;
636
637 for (i = 0; i < npages; ++i) {
638 if (get_user(tce, tces + i)) {
639 ret = H_TOO_HARD;
640 goto unlock_exit;
641 }
642 tce = be64_to_cpu(tce);
643
644 ret = kvmppc_tce_validate(stt, tce);
645 if (ret != H_SUCCESS)
646 goto unlock_exit;
e199ad2b
AK
647 }
648
649 for (i = 0; i < npages; ++i) {
650 /*
651 * This looks unsafe, because we validate, then regrab
652 * the TCE from userspace which could have been changed by
653 * another thread.
654 *
655 * But it actually is safe, because the relevant checks will be
656 * re-executed in the following code. If userspace tries to
657 * change this dodgily it will result in a messier failure mode
658 * but won't threaten the host.
659 */
660 if (get_user(tce, tces + i)) {
661 ret = H_TOO_HARD;
01b7d128 662 goto invalidate_exit;
e199ad2b
AK
663 }
664 tce = be64_to_cpu(tce);
d3695aa4 665
4f916593
AK
666 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
667 ret = H_PARAMETER;
01b7d128 668 goto invalidate_exit;
4f916593 669 }
121f80ba
AK
670
671 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
ca1fc489 672 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
121f80ba
AK
673 stit->tbl, entry + i, ua,
674 iommu_tce_direction(tce));
675
2691f0ff 676 if (ret != H_SUCCESS) {
c10c21ef
AK
677 kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
678 entry);
01b7d128 679 goto invalidate_exit;
2691f0ff 680 }
121f80ba
AK
681 }
682
d3695aa4
AK
683 kvmppc_tce_put(stt, entry + i, tce);
684 }
685
01b7d128
AK
686invalidate_exit:
687 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
688 iommu_tce_kill(stit->tbl, entry, npages);
689
d3695aa4
AK
690unlock_exit:
691 srcu_read_unlock(&vcpu->kvm->srcu, idx);
692
693 return ret;
694}
695EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
31217db7
AK
696
697long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
698 unsigned long liobn, unsigned long ioba,
699 unsigned long tce_value, unsigned long npages)
700{
701 struct kvmppc_spapr_tce_table *stt;
702 long i, ret;
121f80ba 703 struct kvmppc_spapr_tce_iommu_table *stit;
31217db7 704
503bfcbe 705 stt = kvmppc_find_table(vcpu->kvm, liobn);
31217db7
AK
706 if (!stt)
707 return H_TOO_HARD;
708
709 ret = kvmppc_ioba_validate(stt, ioba, npages);
710 if (ret != H_SUCCESS)
711 return ret;
712
713 /* Check permission bits only to allow userspace poison TCE for debug */
714 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
715 return H_PARAMETER;
716
121f80ba 717 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
c6b61661 718 unsigned long entry = ioba >> stt->page_shift;
121f80ba
AK
719
720 for (i = 0; i < npages; ++i) {
ca1fc489 721 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
121f80ba
AK
722 stit->tbl, entry + i);
723
724 if (ret == H_SUCCESS)
725 continue;
726
727 if (ret == H_TOO_HARD)
01b7d128 728 goto invalidate_exit;
121f80ba
AK
729
730 WARN_ON_ONCE(1);
c10c21ef 731 kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
121f80ba
AK
732 }
733 }
734
31217db7
AK
735 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
736 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
737
01b7d128
AK
738invalidate_exit:
739 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
740 iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
741
742 return ret;
31217db7
AK
743}
744EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);