Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-block.git] / arch / powerpc / kvm / book3s_64_vio_hv.c
CommitLineData
54738c09
DG
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
d3695aa4 17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
54738c09
DG
18 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/highmem.h>
25#include <linux/gfp.h>
26#include <linux/slab.h>
27#include <linux/hugetlb.h>
28#include <linux/list.h>
5c35a02c 29#include <linux/stringify.h>
54738c09 30
54738c09
DG
31#include <asm/kvm_ppc.h>
32#include <asm/kvm_book3s.h>
f64e8084 33#include <asm/book3s/64/mmu-hash.h>
d3695aa4 34#include <asm/mmu_context.h>
54738c09
DG
35#include <asm/hvcall.h>
36#include <asm/synch.h>
37#include <asm/ppc-opcode.h>
38#include <asm/kvm_host.h>
39#include <asm/udbg.h>
fcbb2ce6 40#include <asm/iommu.h>
5ee7af18 41#include <asm/tce.h>
94171b19 42#include <asm/pte-walk.h>
54738c09 43
121f80ba
AK
44#ifdef CONFIG_BUG
45
46#define WARN_ON_ONCE_RM(condition) ({ \
47 static bool __section(.data.unlikely) __warned; \
48 int __ret_warn_once = !!(condition); \
49 \
50 if (unlikely(__ret_warn_once && !__warned)) { \
51 __warned = true; \
52 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
53 __stringify(condition), \
54 __func__, __LINE__); \
55 dump_stack(); \
56 } \
57 unlikely(__ret_warn_once); \
58})
59
60#else
61
62#define WARN_ON_ONCE_RM(condition) ({ \
63 int __ret_warn_on = !!(condition); \
64 unlikely(__ret_warn_on); \
65})
66
67#endif
68
54738c09
DG
69#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
70
fcbb2ce6
AK
71/*
72 * Finds a TCE table descriptor by LIOBN.
73 *
74 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
75 * mode on PR KVM
76 */
503bfcbe 77struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
fcbb2ce6
AK
78 unsigned long liobn)
79{
fcbb2ce6
AK
80 struct kvmppc_spapr_tce_table *stt;
81
366baf28 82 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
fcbb2ce6
AK
83 if (stt->liobn == liobn)
84 return stt;
85
86 return NULL;
87}
d3695aa4 88EXPORT_SYMBOL_GPL(kvmppc_find_table);
fcbb2ce6 89
5ee7af18
AK
90/*
91 * Validates TCE address.
92 * At the moment flags and page mask are validated.
93 * As the host kernel does not access those addresses (just puts them
94 * to the table and user space is supposed to process them), we can skip
95 * checking other things (such as TCE is a guest RAM address or the page
96 * was actually allocated).
97 *
98 * WARNING: This will be called in real-mode on HV KVM and virtual
99 * mode on PR KVM
100 */
101long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
102{
b1af23d8
AK
103 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
104 enum dma_data_direction dir = iommu_tce_direction(tce);
105
106 /* Allow userspace to poison TCE table */
107 if (dir == DMA_NONE)
108 return H_SUCCESS;
5ee7af18 109
b1af23d8 110 if (iommu_tce_check_gpa(stt->page_shift, gpa))
5ee7af18
AK
111 return H_PARAMETER;
112
113 return H_SUCCESS;
114}
115EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
116
117/* Note on the use of page_address() in real mode,
118 *
119 * It is safe to use page_address() in real mode on ppc64 because
120 * page_address() is always defined as lowmem_page_address()
121 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
122 * operation and does not access page struct.
123 *
124 * Theoretically page_address() could be defined different
125 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
126 * would have to be enabled.
127 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
128 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
129 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
130 * is not expected to be enabled on ppc32, page_address()
131 * is safe for ppc32 as well.
132 *
133 * WARNING: This will be called in real-mode on HV KVM and virtual
134 * mode on PR KVM
135 */
136static u64 *kvmppc_page_address(struct page *page)
137{
138#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
139#error TODO: fix to avoid page_address() here
140#endif
141 return (u64 *) page_address(page);
142}
143
144/*
145 * Handles TCE requests for emulated devices.
146 * Puts guest TCE values to the table and expects user space to convert them.
147 * Called in both real and virtual modes.
148 * Cannot fail so kvmppc_tce_validate must be called before it.
149 *
150 * WARNING: This will be called in real-mode on HV KVM and virtual
151 * mode on PR KVM
152 */
153void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
154 unsigned long idx, unsigned long tce)
155{
156 struct page *page;
157 u64 *tbl;
158
14f853f1 159 idx -= stt->offset;
5ee7af18
AK
160 page = stt->pages[idx / TCES_PER_PAGE];
161 tbl = kvmppc_page_address(page);
162
163 tbl[idx % TCES_PER_PAGE] = tce;
164}
165EXPORT_SYMBOL_GPL(kvmppc_tce_put);
fcbb2ce6 166
d3695aa4
AK
167long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
168 unsigned long *ua, unsigned long **prmap)
169{
170 unsigned long gfn = gpa >> PAGE_SHIFT;
171 struct kvm_memory_slot *memslot;
172
173 memslot = search_memslots(kvm_memslots(kvm), gfn);
174 if (!memslot)
175 return -EINVAL;
176
177 *ua = __gfn_to_hva_memslot(memslot, gfn) |
178 (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
179
180#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
181 if (prmap)
182 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
183#endif
184
185 return 0;
186}
187EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
188
189#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
425333bf
AK
190static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
191 unsigned long entry, unsigned long *hpa,
192 enum dma_data_direction *direction)
193{
194 long ret;
195
196 ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
197
198 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
199 (*direction == DMA_BIDIRECTIONAL))) {
200 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
201 /*
202 * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
203 * calling this so we still get here a valid UA.
204 */
205 if (pua && *pua)
206 mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
207 }
208
209 return ret;
210}
211
212static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
213 unsigned long entry)
121f80ba
AK
214{
215 unsigned long hpa = 0;
216 enum dma_data_direction dir = DMA_NONE;
217
425333bf 218 iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
121f80ba
AK
219}
220
221static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
222 struct iommu_table *tbl, unsigned long entry)
223{
224 struct mm_iommu_table_group_mem_t *mem = NULL;
225 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
a68bd126 226 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
121f80ba
AK
227
228 if (!pua)
229 /* it_userspace allocation might be delayed */
230 return H_TOO_HARD;
231
00a5c58d 232 mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
121f80ba
AK
233 if (!mem)
234 return H_TOO_HARD;
235
236 mm_iommu_mapped_dec(mem);
237
00a5c58d 238 *pua = cpu_to_be64(0);
121f80ba
AK
239
240 return H_SUCCESS;
241}
242
ca1fc489 243static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
121f80ba
AK
244 struct iommu_table *tbl, unsigned long entry)
245{
246 enum dma_data_direction dir = DMA_NONE;
247 unsigned long hpa = 0;
248 long ret;
249
425333bf 250 if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
121f80ba
AK
251 /*
252 * real mode xchg can fail if struct page crosses
253 * a page boundary
254 */
255 return H_TOO_HARD;
256
257 if (dir == DMA_NONE)
258 return H_SUCCESS;
259
260 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
261 if (ret)
425333bf 262 iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
121f80ba
AK
263
264 return ret;
265}
266
ca1fc489
AK
267static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
268 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
269 unsigned long entry)
270{
271 unsigned long i, ret = H_SUCCESS;
272 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
273 unsigned long io_entry = entry * subpages;
274
275 for (i = 0; i < subpages; ++i) {
276 ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
277 if (ret != H_SUCCESS)
278 break;
279 }
280
281 return ret;
282}
283
284static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
121f80ba
AK
285 unsigned long entry, unsigned long ua,
286 enum dma_data_direction dir)
287{
288 long ret;
289 unsigned long hpa = 0;
a68bd126 290 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
121f80ba
AK
291 struct mm_iommu_table_group_mem_t *mem;
292
293 if (!pua)
294 /* it_userspace allocation might be delayed */
295 return H_TOO_HARD;
296
297 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
298 if (!mem)
299 return H_TOO_HARD;
300
76fa4975
AK
301 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
302 &hpa)))
121f80ba
AK
303 return H_HARDWARE;
304
121f80ba
AK
305 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
306 return H_CLOSED;
307
425333bf 308 ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
121f80ba
AK
309 if (ret) {
310 mm_iommu_mapped_dec(mem);
311 /*
312 * real mode xchg can fail if struct page crosses
313 * a page boundary
314 */
315 return H_TOO_HARD;
316 }
317
318 if (dir != DMA_NONE)
319 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
320
00a5c58d 321 *pua = cpu_to_be64(ua);
121f80ba
AK
322
323 return 0;
324}
325
ca1fc489
AK
326static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
327 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
328 unsigned long entry, unsigned long ua,
329 enum dma_data_direction dir)
330{
331 unsigned long i, pgoff, ret = H_SUCCESS;
332 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
333 unsigned long io_entry = entry * subpages;
334
335 for (i = 0, pgoff = 0; i < subpages;
336 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
337
338 ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
339 io_entry + i, ua + pgoff, dir);
340 if (ret != H_SUCCESS)
341 break;
342 }
343
344 return ret;
345}
346
31217db7
AK
347long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
348 unsigned long ioba, unsigned long tce)
54738c09 349{
503bfcbe 350 struct kvmppc_spapr_tce_table *stt;
fcbb2ce6 351 long ret;
121f80ba
AK
352 struct kvmppc_spapr_tce_iommu_table *stit;
353 unsigned long entry, ua = 0;
354 enum dma_data_direction dir;
54738c09
DG
355
356 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
357 /* liobn, ioba, tce); */
358
acde2572
PM
359 /* For radix, we might be in virtual mode, so punt */
360 if (kvm_is_radix(vcpu->kvm))
361 return H_TOO_HARD;
362
503bfcbe 363 stt = kvmppc_find_table(vcpu->kvm, liobn);
fcbb2ce6
AK
364 if (!stt)
365 return H_TOO_HARD;
366
367 ret = kvmppc_ioba_validate(stt, ioba, 1);
368 if (ret != H_SUCCESS)
369 return ret;
370
5ee7af18
AK
371 ret = kvmppc_tce_validate(stt, tce);
372 if (ret != H_SUCCESS)
373 return ret;
fcbb2ce6 374
121f80ba
AK
375 dir = iommu_tce_direction(tce);
376 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
377 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
378 return H_PARAMETER;
379
380 entry = ioba >> stt->page_shift;
381
382 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
383 if (dir == DMA_NONE)
ca1fc489 384 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
121f80ba
AK
385 stit->tbl, entry);
386 else
ca1fc489 387 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
121f80ba
AK
388 stit->tbl, entry, ua, dir);
389
390 if (ret == H_SUCCESS)
391 continue;
392
393 if (ret == H_TOO_HARD)
394 return ret;
395
396 WARN_ON_ONCE_RM(1);
425333bf 397 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
121f80ba
AK
398 }
399
400 kvmppc_tce_put(stt, entry, tce);
fcbb2ce6
AK
401
402 return H_SUCCESS;
54738c09 403}
69e9fbb2 404
d3695aa4
AK
405static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
406 unsigned long ua, unsigned long *phpa)
407{
408 pte_t *ptep, pte;
409 unsigned shift = 0;
410
94171b19
AK
411 /*
412 * Called in real mode with MSR_EE = 0. We are safe here.
413 * It is ok to do the lookup with arch.pgdir here, because
414 * we are doing this on secondary cpus and current task there
415 * is not the hypervisor. Also this is safe against THP in the
416 * host, because an IPI to primary thread will wait for the secondary
417 * to exit which will agains result in the below page table walk
418 * to finish.
419 */
420 ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
d3695aa4
AK
421 if (!ptep || !pte_present(*ptep))
422 return -ENXIO;
423 pte = *ptep;
424
425 if (!shift)
426 shift = PAGE_SHIFT;
427
428 /* Avoid handling anything potentially complicated in realmode */
429 if (shift > PAGE_SHIFT)
430 return -EAGAIN;
431
432 if (!pte_young(pte))
433 return -EAGAIN;
434
435 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
436 (ua & ~PAGE_MASK);
437
438 return 0;
439}
440
441long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
442 unsigned long liobn, unsigned long ioba,
443 unsigned long tce_list, unsigned long npages)
444{
445 struct kvmppc_spapr_tce_table *stt;
446 long i, ret = H_SUCCESS;
447 unsigned long tces, entry, ua = 0;
448 unsigned long *rmap = NULL;
da6f59e1 449 bool prereg = false;
121f80ba 450 struct kvmppc_spapr_tce_iommu_table *stit;
d3695aa4 451
acde2572
PM
452 /* For radix, we might be in virtual mode, so punt */
453 if (kvm_is_radix(vcpu->kvm))
454 return H_TOO_HARD;
455
503bfcbe 456 stt = kvmppc_find_table(vcpu->kvm, liobn);
d3695aa4
AK
457 if (!stt)
458 return H_TOO_HARD;
459
fe26e527 460 entry = ioba >> stt->page_shift;
d3695aa4
AK
461 /*
462 * The spec says that the maximum size of the list is 512 TCEs
463 * so the whole table addressed resides in 4K page
464 */
465 if (npages > 512)
466 return H_PARAMETER;
467
468 if (tce_list & (SZ_4K - 1))
469 return H_PARAMETER;
470
471 ret = kvmppc_ioba_validate(stt, ioba, npages);
472 if (ret != H_SUCCESS)
473 return ret;
474
da6f59e1
AK
475 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
476 /*
477 * We get here if guest memory was pre-registered which
478 * is normally VFIO case and gpa->hpa translation does not
479 * depend on hpt.
480 */
481 struct mm_iommu_table_group_mem_t *mem;
d3695aa4 482
da6f59e1
AK
483 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
484 return H_TOO_HARD;
d3695aa4 485
da6f59e1
AK
486 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
487 if (mem)
76fa4975
AK
488 prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
489 IOMMU_PAGE_SHIFT_4K, &tces) == 0;
da6f59e1
AK
490 }
491
492 if (!prereg) {
493 /*
494 * This is usually a case of a guest with emulated devices only
495 * when TCE list is not in preregistered memory.
496 * We do not require memory to be preregistered in this case
497 * so lock rmap and do __find_linux_pte_or_hugepte().
498 */
499 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
500 return H_TOO_HARD;
501
502 rmap = (void *) vmalloc_to_phys(rmap);
121f80ba
AK
503 if (WARN_ON_ONCE_RM(!rmap))
504 return H_HARDWARE;
da6f59e1
AK
505
506 /*
507 * Synchronize with the MMU notifier callbacks in
39c983ea 508 * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
da6f59e1
AK
509 * While we have the rmap lock, code running on other CPUs
510 * cannot finish unmapping the host real page that backs
511 * this guest real page, so we are OK to access the host
512 * real page.
513 */
514 lock_rmap(rmap);
515 if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
516 ret = H_TOO_HARD;
517 goto unlock_exit;
518 }
d3695aa4
AK
519 }
520
521 for (i = 0; i < npages; ++i) {
522 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
523
524 ret = kvmppc_tce_validate(stt, tce);
525 if (ret != H_SUCCESS)
526 goto unlock_exit;
527
121f80ba
AK
528 ua = 0;
529 if (kvmppc_gpa_to_ua(vcpu->kvm,
530 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
531 &ua, NULL))
532 return H_PARAMETER;
533
534 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
ca1fc489 535 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
121f80ba
AK
536 stit->tbl, entry + i, ua,
537 iommu_tce_direction(tce));
538
539 if (ret == H_SUCCESS)
540 continue;
541
542 if (ret == H_TOO_HARD)
543 goto unlock_exit;
544
545 WARN_ON_ONCE_RM(1);
425333bf 546 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
121f80ba
AK
547 }
548
d3695aa4
AK
549 kvmppc_tce_put(stt, entry + i, tce);
550 }
551
552unlock_exit:
da6f59e1
AK
553 if (rmap)
554 unlock_rmap(rmap);
d3695aa4
AK
555
556 return ret;
557}
558
31217db7 559long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
d3695aa4
AK
560 unsigned long liobn, unsigned long ioba,
561 unsigned long tce_value, unsigned long npages)
562{
563 struct kvmppc_spapr_tce_table *stt;
564 long i, ret;
121f80ba 565 struct kvmppc_spapr_tce_iommu_table *stit;
d3695aa4 566
acde2572
PM
567 /* For radix, we might be in virtual mode, so punt */
568 if (kvm_is_radix(vcpu->kvm))
569 return H_TOO_HARD;
570
503bfcbe 571 stt = kvmppc_find_table(vcpu->kvm, liobn);
d3695aa4
AK
572 if (!stt)
573 return H_TOO_HARD;
574
575 ret = kvmppc_ioba_validate(stt, ioba, npages);
576 if (ret != H_SUCCESS)
577 return ret;
578
579 /* Check permission bits only to allow userspace poison TCE for debug */
580 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
581 return H_PARAMETER;
582
121f80ba 583 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
c6b61661 584 unsigned long entry = ioba >> stt->page_shift;
121f80ba
AK
585
586 for (i = 0; i < npages; ++i) {
ca1fc489 587 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
121f80ba
AK
588 stit->tbl, entry + i);
589
590 if (ret == H_SUCCESS)
591 continue;
592
593 if (ret == H_TOO_HARD)
594 return ret;
595
596 WARN_ON_ONCE_RM(1);
425333bf 597 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
121f80ba
AK
598 }
599 }
600
fe26e527
AK
601 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
602 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
d3695aa4
AK
603
604 return H_SUCCESS;
605}
d3695aa4 606
acde2572 607/* This can be called in either virtual mode or real mode */
69e9fbb2
LD
608long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
609 unsigned long ioba)
610{
503bfcbe 611 struct kvmppc_spapr_tce_table *stt;
fcbb2ce6
AK
612 long ret;
613 unsigned long idx;
614 struct page *page;
615 u64 *tbl;
69e9fbb2 616
503bfcbe 617 stt = kvmppc_find_table(vcpu->kvm, liobn);
fcbb2ce6
AK
618 if (!stt)
619 return H_TOO_HARD;
69e9fbb2 620
fcbb2ce6
AK
621 ret = kvmppc_ioba_validate(stt, ioba, 1);
622 if (ret != H_SUCCESS)
623 return ret;
69e9fbb2 624
14f853f1 625 idx = (ioba >> stt->page_shift) - stt->offset;
fcbb2ce6
AK
626 page = stt->pages[idx / TCES_PER_PAGE];
627 tbl = (u64 *)page_address(page);
69e9fbb2 628
1143a706 629 vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
69e9fbb2 630
fcbb2ce6 631 return H_SUCCESS;
69e9fbb2
LD
632}
633EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
d3695aa4
AK
634
635#endif /* KVM_BOOK3S_HV_POSSIBLE */