KVM: LAPIC: Fix pv ipis out-of-bounds access
[linux-2.6-block.git] / arch / powerpc / kvm / book3s_64_vio_hv.c
CommitLineData
54738c09
DG
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
d3695aa4 17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
54738c09
DG
18 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/highmem.h>
25#include <linux/gfp.h>
26#include <linux/slab.h>
27#include <linux/hugetlb.h>
28#include <linux/list.h>
5c35a02c 29#include <linux/stringify.h>
54738c09 30
54738c09
DG
31#include <asm/kvm_ppc.h>
32#include <asm/kvm_book3s.h>
f64e8084 33#include <asm/book3s/64/mmu-hash.h>
d3695aa4 34#include <asm/mmu_context.h>
54738c09
DG
35#include <asm/hvcall.h>
36#include <asm/synch.h>
37#include <asm/ppc-opcode.h>
38#include <asm/kvm_host.h>
39#include <asm/udbg.h>
fcbb2ce6 40#include <asm/iommu.h>
5ee7af18 41#include <asm/tce.h>
94171b19 42#include <asm/pte-walk.h>
54738c09 43
121f80ba
AK
44#ifdef CONFIG_BUG
45
46#define WARN_ON_ONCE_RM(condition) ({ \
47 static bool __section(.data.unlikely) __warned; \
48 int __ret_warn_once = !!(condition); \
49 \
50 if (unlikely(__ret_warn_once && !__warned)) { \
51 __warned = true; \
52 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
53 __stringify(condition), \
54 __func__, __LINE__); \
55 dump_stack(); \
56 } \
57 unlikely(__ret_warn_once); \
58})
59
60#else
61
62#define WARN_ON_ONCE_RM(condition) ({ \
63 int __ret_warn_on = !!(condition); \
64 unlikely(__ret_warn_on); \
65})
66
67#endif
68
54738c09
DG
69#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
70
fcbb2ce6
AK
71/*
72 * Finds a TCE table descriptor by LIOBN.
73 *
74 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
75 * mode on PR KVM
76 */
503bfcbe 77struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
fcbb2ce6
AK
78 unsigned long liobn)
79{
fcbb2ce6
AK
80 struct kvmppc_spapr_tce_table *stt;
81
366baf28 82 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
fcbb2ce6
AK
83 if (stt->liobn == liobn)
84 return stt;
85
86 return NULL;
87}
d3695aa4 88EXPORT_SYMBOL_GPL(kvmppc_find_table);
fcbb2ce6 89
5ee7af18
AK
90/*
91 * Validates TCE address.
92 * At the moment flags and page mask are validated.
93 * As the host kernel does not access those addresses (just puts them
94 * to the table and user space is supposed to process them), we can skip
95 * checking other things (such as TCE is a guest RAM address or the page
96 * was actually allocated).
97 *
98 * WARNING: This will be called in real-mode on HV KVM and virtual
99 * mode on PR KVM
100 */
101long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
102{
b1af23d8
AK
103 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
104 enum dma_data_direction dir = iommu_tce_direction(tce);
105
106 /* Allow userspace to poison TCE table */
107 if (dir == DMA_NONE)
108 return H_SUCCESS;
5ee7af18 109
b1af23d8 110 if (iommu_tce_check_gpa(stt->page_shift, gpa))
5ee7af18
AK
111 return H_PARAMETER;
112
113 return H_SUCCESS;
114}
115EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
116
117/* Note on the use of page_address() in real mode,
118 *
119 * It is safe to use page_address() in real mode on ppc64 because
120 * page_address() is always defined as lowmem_page_address()
121 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
122 * operation and does not access page struct.
123 *
124 * Theoretically page_address() could be defined different
125 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
126 * would have to be enabled.
127 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
128 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
129 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
130 * is not expected to be enabled on ppc32, page_address()
131 * is safe for ppc32 as well.
132 *
133 * WARNING: This will be called in real-mode on HV KVM and virtual
134 * mode on PR KVM
135 */
136static u64 *kvmppc_page_address(struct page *page)
137{
138#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
139#error TODO: fix to avoid page_address() here
140#endif
141 return (u64 *) page_address(page);
142}
143
144/*
145 * Handles TCE requests for emulated devices.
146 * Puts guest TCE values to the table and expects user space to convert them.
147 * Called in both real and virtual modes.
148 * Cannot fail so kvmppc_tce_validate must be called before it.
149 *
150 * WARNING: This will be called in real-mode on HV KVM and virtual
151 * mode on PR KVM
152 */
153void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
154 unsigned long idx, unsigned long tce)
155{
156 struct page *page;
157 u64 *tbl;
158
14f853f1 159 idx -= stt->offset;
5ee7af18
AK
160 page = stt->pages[idx / TCES_PER_PAGE];
161 tbl = kvmppc_page_address(page);
162
163 tbl[idx % TCES_PER_PAGE] = tce;
164}
165EXPORT_SYMBOL_GPL(kvmppc_tce_put);
fcbb2ce6 166
d3695aa4
AK
167long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
168 unsigned long *ua, unsigned long **prmap)
169{
170 unsigned long gfn = gpa >> PAGE_SHIFT;
171 struct kvm_memory_slot *memslot;
172
173 memslot = search_memslots(kvm_memslots(kvm), gfn);
174 if (!memslot)
175 return -EINVAL;
176
177 *ua = __gfn_to_hva_memslot(memslot, gfn) |
178 (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
179
180#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
181 if (prmap)
182 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
183#endif
184
185 return 0;
186}
187EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
188
189#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
121f80ba
AK
190static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
191{
192 unsigned long hpa = 0;
193 enum dma_data_direction dir = DMA_NONE;
194
195 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
196}
197
198static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
199 struct iommu_table *tbl, unsigned long entry)
200{
201 struct mm_iommu_table_group_mem_t *mem = NULL;
202 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
a68bd126 203 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
121f80ba
AK
204
205 if (!pua)
206 /* it_userspace allocation might be delayed */
207 return H_TOO_HARD;
208
00a5c58d 209 mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
121f80ba
AK
210 if (!mem)
211 return H_TOO_HARD;
212
213 mm_iommu_mapped_dec(mem);
214
00a5c58d 215 *pua = cpu_to_be64(0);
121f80ba
AK
216
217 return H_SUCCESS;
218}
219
ca1fc489 220static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
121f80ba
AK
221 struct iommu_table *tbl, unsigned long entry)
222{
223 enum dma_data_direction dir = DMA_NONE;
224 unsigned long hpa = 0;
225 long ret;
226
227 if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
228 /*
229 * real mode xchg can fail if struct page crosses
230 * a page boundary
231 */
232 return H_TOO_HARD;
233
234 if (dir == DMA_NONE)
235 return H_SUCCESS;
236
237 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
238 if (ret)
239 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
240
241 return ret;
242}
243
ca1fc489
AK
244static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
245 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
246 unsigned long entry)
247{
248 unsigned long i, ret = H_SUCCESS;
249 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
250 unsigned long io_entry = entry * subpages;
251
252 for (i = 0; i < subpages; ++i) {
253 ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
254 if (ret != H_SUCCESS)
255 break;
256 }
257
258 return ret;
259}
260
261static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
121f80ba
AK
262 unsigned long entry, unsigned long ua,
263 enum dma_data_direction dir)
264{
265 long ret;
266 unsigned long hpa = 0;
a68bd126 267 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
121f80ba
AK
268 struct mm_iommu_table_group_mem_t *mem;
269
270 if (!pua)
271 /* it_userspace allocation might be delayed */
272 return H_TOO_HARD;
273
274 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
275 if (!mem)
276 return H_TOO_HARD;
277
76fa4975
AK
278 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
279 &hpa)))
121f80ba
AK
280 return H_HARDWARE;
281
121f80ba
AK
282 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
283 return H_CLOSED;
284
285 ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
286 if (ret) {
287 mm_iommu_mapped_dec(mem);
288 /*
289 * real mode xchg can fail if struct page crosses
290 * a page boundary
291 */
292 return H_TOO_HARD;
293 }
294
295 if (dir != DMA_NONE)
296 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
297
00a5c58d 298 *pua = cpu_to_be64(ua);
121f80ba
AK
299
300 return 0;
301}
302
ca1fc489
AK
303static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
304 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
305 unsigned long entry, unsigned long ua,
306 enum dma_data_direction dir)
307{
308 unsigned long i, pgoff, ret = H_SUCCESS;
309 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
310 unsigned long io_entry = entry * subpages;
311
312 for (i = 0, pgoff = 0; i < subpages;
313 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
314
315 ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
316 io_entry + i, ua + pgoff, dir);
317 if (ret != H_SUCCESS)
318 break;
319 }
320
321 return ret;
322}
323
31217db7
AK
324long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
325 unsigned long ioba, unsigned long tce)
54738c09 326{
503bfcbe 327 struct kvmppc_spapr_tce_table *stt;
fcbb2ce6 328 long ret;
121f80ba
AK
329 struct kvmppc_spapr_tce_iommu_table *stit;
330 unsigned long entry, ua = 0;
331 enum dma_data_direction dir;
54738c09
DG
332
333 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
334 /* liobn, ioba, tce); */
335
acde2572
PM
336 /* For radix, we might be in virtual mode, so punt */
337 if (kvm_is_radix(vcpu->kvm))
338 return H_TOO_HARD;
339
503bfcbe 340 stt = kvmppc_find_table(vcpu->kvm, liobn);
fcbb2ce6
AK
341 if (!stt)
342 return H_TOO_HARD;
343
344 ret = kvmppc_ioba_validate(stt, ioba, 1);
345 if (ret != H_SUCCESS)
346 return ret;
347
5ee7af18
AK
348 ret = kvmppc_tce_validate(stt, tce);
349 if (ret != H_SUCCESS)
350 return ret;
fcbb2ce6 351
121f80ba
AK
352 dir = iommu_tce_direction(tce);
353 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
354 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
355 return H_PARAMETER;
356
357 entry = ioba >> stt->page_shift;
358
359 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
360 if (dir == DMA_NONE)
ca1fc489 361 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
121f80ba
AK
362 stit->tbl, entry);
363 else
ca1fc489 364 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
121f80ba
AK
365 stit->tbl, entry, ua, dir);
366
367 if (ret == H_SUCCESS)
368 continue;
369
370 if (ret == H_TOO_HARD)
371 return ret;
372
373 WARN_ON_ONCE_RM(1);
374 kvmppc_rm_clear_tce(stit->tbl, entry);
375 }
376
377 kvmppc_tce_put(stt, entry, tce);
fcbb2ce6
AK
378
379 return H_SUCCESS;
54738c09 380}
69e9fbb2 381
d3695aa4
AK
382static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
383 unsigned long ua, unsigned long *phpa)
384{
385 pte_t *ptep, pte;
386 unsigned shift = 0;
387
94171b19
AK
388 /*
389 * Called in real mode with MSR_EE = 0. We are safe here.
390 * It is ok to do the lookup with arch.pgdir here, because
391 * we are doing this on secondary cpus and current task there
392 * is not the hypervisor. Also this is safe against THP in the
393 * host, because an IPI to primary thread will wait for the secondary
394 * to exit which will agains result in the below page table walk
395 * to finish.
396 */
397 ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
d3695aa4
AK
398 if (!ptep || !pte_present(*ptep))
399 return -ENXIO;
400 pte = *ptep;
401
402 if (!shift)
403 shift = PAGE_SHIFT;
404
405 /* Avoid handling anything potentially complicated in realmode */
406 if (shift > PAGE_SHIFT)
407 return -EAGAIN;
408
409 if (!pte_young(pte))
410 return -EAGAIN;
411
412 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
413 (ua & ~PAGE_MASK);
414
415 return 0;
416}
417
418long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
419 unsigned long liobn, unsigned long ioba,
420 unsigned long tce_list, unsigned long npages)
421{
422 struct kvmppc_spapr_tce_table *stt;
423 long i, ret = H_SUCCESS;
424 unsigned long tces, entry, ua = 0;
425 unsigned long *rmap = NULL;
da6f59e1 426 bool prereg = false;
121f80ba 427 struct kvmppc_spapr_tce_iommu_table *stit;
d3695aa4 428
acde2572
PM
429 /* For radix, we might be in virtual mode, so punt */
430 if (kvm_is_radix(vcpu->kvm))
431 return H_TOO_HARD;
432
503bfcbe 433 stt = kvmppc_find_table(vcpu->kvm, liobn);
d3695aa4
AK
434 if (!stt)
435 return H_TOO_HARD;
436
fe26e527 437 entry = ioba >> stt->page_shift;
d3695aa4
AK
438 /*
439 * The spec says that the maximum size of the list is 512 TCEs
440 * so the whole table addressed resides in 4K page
441 */
442 if (npages > 512)
443 return H_PARAMETER;
444
445 if (tce_list & (SZ_4K - 1))
446 return H_PARAMETER;
447
448 ret = kvmppc_ioba_validate(stt, ioba, npages);
449 if (ret != H_SUCCESS)
450 return ret;
451
da6f59e1
AK
452 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
453 /*
454 * We get here if guest memory was pre-registered which
455 * is normally VFIO case and gpa->hpa translation does not
456 * depend on hpt.
457 */
458 struct mm_iommu_table_group_mem_t *mem;
d3695aa4 459
da6f59e1
AK
460 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
461 return H_TOO_HARD;
d3695aa4 462
da6f59e1
AK
463 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
464 if (mem)
76fa4975
AK
465 prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
466 IOMMU_PAGE_SHIFT_4K, &tces) == 0;
da6f59e1
AK
467 }
468
469 if (!prereg) {
470 /*
471 * This is usually a case of a guest with emulated devices only
472 * when TCE list is not in preregistered memory.
473 * We do not require memory to be preregistered in this case
474 * so lock rmap and do __find_linux_pte_or_hugepte().
475 */
476 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
477 return H_TOO_HARD;
478
479 rmap = (void *) vmalloc_to_phys(rmap);
121f80ba
AK
480 if (WARN_ON_ONCE_RM(!rmap))
481 return H_HARDWARE;
da6f59e1
AK
482
483 /*
484 * Synchronize with the MMU notifier callbacks in
39c983ea 485 * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
da6f59e1
AK
486 * While we have the rmap lock, code running on other CPUs
487 * cannot finish unmapping the host real page that backs
488 * this guest real page, so we are OK to access the host
489 * real page.
490 */
491 lock_rmap(rmap);
492 if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
493 ret = H_TOO_HARD;
494 goto unlock_exit;
495 }
d3695aa4
AK
496 }
497
498 for (i = 0; i < npages; ++i) {
499 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
500
501 ret = kvmppc_tce_validate(stt, tce);
502 if (ret != H_SUCCESS)
503 goto unlock_exit;
504
121f80ba
AK
505 ua = 0;
506 if (kvmppc_gpa_to_ua(vcpu->kvm,
507 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
508 &ua, NULL))
509 return H_PARAMETER;
510
511 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
ca1fc489 512 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
121f80ba
AK
513 stit->tbl, entry + i, ua,
514 iommu_tce_direction(tce));
515
516 if (ret == H_SUCCESS)
517 continue;
518
519 if (ret == H_TOO_HARD)
520 goto unlock_exit;
521
522 WARN_ON_ONCE_RM(1);
523 kvmppc_rm_clear_tce(stit->tbl, entry);
524 }
525
d3695aa4
AK
526 kvmppc_tce_put(stt, entry + i, tce);
527 }
528
529unlock_exit:
da6f59e1
AK
530 if (rmap)
531 unlock_rmap(rmap);
d3695aa4
AK
532
533 return ret;
534}
535
31217db7 536long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
d3695aa4
AK
537 unsigned long liobn, unsigned long ioba,
538 unsigned long tce_value, unsigned long npages)
539{
540 struct kvmppc_spapr_tce_table *stt;
541 long i, ret;
121f80ba 542 struct kvmppc_spapr_tce_iommu_table *stit;
d3695aa4 543
acde2572
PM
544 /* For radix, we might be in virtual mode, so punt */
545 if (kvm_is_radix(vcpu->kvm))
546 return H_TOO_HARD;
547
503bfcbe 548 stt = kvmppc_find_table(vcpu->kvm, liobn);
d3695aa4
AK
549 if (!stt)
550 return H_TOO_HARD;
551
552 ret = kvmppc_ioba_validate(stt, ioba, npages);
553 if (ret != H_SUCCESS)
554 return ret;
555
556 /* Check permission bits only to allow userspace poison TCE for debug */
557 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
558 return H_PARAMETER;
559
121f80ba 560 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
c6b61661 561 unsigned long entry = ioba >> stt->page_shift;
121f80ba
AK
562
563 for (i = 0; i < npages; ++i) {
ca1fc489 564 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
121f80ba
AK
565 stit->tbl, entry + i);
566
567 if (ret == H_SUCCESS)
568 continue;
569
570 if (ret == H_TOO_HARD)
571 return ret;
572
573 WARN_ON_ONCE_RM(1);
574 kvmppc_rm_clear_tce(stit->tbl, entry);
575 }
576 }
577
fe26e527
AK
578 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
579 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
d3695aa4
AK
580
581 return H_SUCCESS;
582}
d3695aa4 583
acde2572 584/* This can be called in either virtual mode or real mode */
69e9fbb2
LD
585long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
586 unsigned long ioba)
587{
503bfcbe 588 struct kvmppc_spapr_tce_table *stt;
fcbb2ce6
AK
589 long ret;
590 unsigned long idx;
591 struct page *page;
592 u64 *tbl;
69e9fbb2 593
503bfcbe 594 stt = kvmppc_find_table(vcpu->kvm, liobn);
fcbb2ce6
AK
595 if (!stt)
596 return H_TOO_HARD;
69e9fbb2 597
fcbb2ce6
AK
598 ret = kvmppc_ioba_validate(stt, ioba, 1);
599 if (ret != H_SUCCESS)
600 return ret;
69e9fbb2 601
14f853f1 602 idx = (ioba >> stt->page_shift) - stt->offset;
fcbb2ce6
AK
603 page = stt->pages[idx / TCES_PER_PAGE];
604 tbl = (u64 *)page_address(page);
69e9fbb2 605
1143a706 606 vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
69e9fbb2 607
fcbb2ce6 608 return H_SUCCESS;
69e9fbb2
LD
609}
610EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
d3695aa4
AK
611
612#endif /* KVM_BOOK3S_HV_POSSIBLE */