Commit | Line | Data |
---|---|---|
54738c09 DG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
16 | * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com> | |
d3695aa4 | 17 | * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com> |
54738c09 DG |
18 | */ |
19 | ||
20 | #include <linux/types.h> | |
21 | #include <linux/string.h> | |
22 | #include <linux/kvm.h> | |
23 | #include <linux/kvm_host.h> | |
24 | #include <linux/highmem.h> | |
25 | #include <linux/gfp.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/hugetlb.h> | |
28 | #include <linux/list.h> | |
29 | ||
30 | #include <asm/tlbflush.h> | |
31 | #include <asm/kvm_ppc.h> | |
32 | #include <asm/kvm_book3s.h> | |
f64e8084 | 33 | #include <asm/book3s/64/mmu-hash.h> |
d3695aa4 | 34 | #include <asm/mmu_context.h> |
54738c09 DG |
35 | #include <asm/hvcall.h> |
36 | #include <asm/synch.h> | |
37 | #include <asm/ppc-opcode.h> | |
38 | #include <asm/kvm_host.h> | |
39 | #include <asm/udbg.h> | |
fcbb2ce6 | 40 | #include <asm/iommu.h> |
5ee7af18 | 41 | #include <asm/tce.h> |
94171b19 | 42 | #include <asm/pte-walk.h> |
54738c09 | 43 | |
121f80ba AK |
44 | #ifdef CONFIG_BUG |
45 | ||
46 | #define WARN_ON_ONCE_RM(condition) ({ \ | |
47 | static bool __section(.data.unlikely) __warned; \ | |
48 | int __ret_warn_once = !!(condition); \ | |
49 | \ | |
50 | if (unlikely(__ret_warn_once && !__warned)) { \ | |
51 | __warned = true; \ | |
52 | pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \ | |
53 | __stringify(condition), \ | |
54 | __func__, __LINE__); \ | |
55 | dump_stack(); \ | |
56 | } \ | |
57 | unlikely(__ret_warn_once); \ | |
58 | }) | |
59 | ||
60 | #else | |
61 | ||
62 | #define WARN_ON_ONCE_RM(condition) ({ \ | |
63 | int __ret_warn_on = !!(condition); \ | |
64 | unlikely(__ret_warn_on); \ | |
65 | }) | |
66 | ||
67 | #endif | |
68 | ||
54738c09 DG |
69 | #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) |
70 | ||
fcbb2ce6 AK |
71 | /* |
72 | * Finds a TCE table descriptor by LIOBN. | |
73 | * | |
74 | * WARNING: This will be called in real or virtual mode on HV KVM and virtual | |
75 | * mode on PR KVM | |
76 | */ | |
503bfcbe | 77 | struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, |
fcbb2ce6 AK |
78 | unsigned long liobn) |
79 | { | |
fcbb2ce6 AK |
80 | struct kvmppc_spapr_tce_table *stt; |
81 | ||
366baf28 | 82 | list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list) |
fcbb2ce6 AK |
83 | if (stt->liobn == liobn) |
84 | return stt; | |
85 | ||
86 | return NULL; | |
87 | } | |
d3695aa4 | 88 | EXPORT_SYMBOL_GPL(kvmppc_find_table); |
fcbb2ce6 | 89 | |
5ee7af18 AK |
90 | /* |
91 | * Validates TCE address. | |
92 | * At the moment flags and page mask are validated. | |
93 | * As the host kernel does not access those addresses (just puts them | |
94 | * to the table and user space is supposed to process them), we can skip | |
95 | * checking other things (such as TCE is a guest RAM address or the page | |
96 | * was actually allocated). | |
97 | * | |
98 | * WARNING: This will be called in real-mode on HV KVM and virtual | |
99 | * mode on PR KVM | |
100 | */ | |
101 | long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) | |
102 | { | |
b1af23d8 AK |
103 | unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); |
104 | enum dma_data_direction dir = iommu_tce_direction(tce); | |
105 | ||
106 | /* Allow userspace to poison TCE table */ | |
107 | if (dir == DMA_NONE) | |
108 | return H_SUCCESS; | |
5ee7af18 | 109 | |
b1af23d8 | 110 | if (iommu_tce_check_gpa(stt->page_shift, gpa)) |
5ee7af18 AK |
111 | return H_PARAMETER; |
112 | ||
113 | return H_SUCCESS; | |
114 | } | |
115 | EXPORT_SYMBOL_GPL(kvmppc_tce_validate); | |
116 | ||
117 | /* Note on the use of page_address() in real mode, | |
118 | * | |
119 | * It is safe to use page_address() in real mode on ppc64 because | |
120 | * page_address() is always defined as lowmem_page_address() | |
121 | * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic | |
122 | * operation and does not access page struct. | |
123 | * | |
124 | * Theoretically page_address() could be defined different | |
125 | * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL | |
126 | * would have to be enabled. | |
127 | * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64, | |
128 | * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only | |
129 | * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP | |
130 | * is not expected to be enabled on ppc32, page_address() | |
131 | * is safe for ppc32 as well. | |
132 | * | |
133 | * WARNING: This will be called in real-mode on HV KVM and virtual | |
134 | * mode on PR KVM | |
135 | */ | |
136 | static u64 *kvmppc_page_address(struct page *page) | |
137 | { | |
138 | #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL) | |
139 | #error TODO: fix to avoid page_address() here | |
140 | #endif | |
141 | return (u64 *) page_address(page); | |
142 | } | |
143 | ||
144 | /* | |
145 | * Handles TCE requests for emulated devices. | |
146 | * Puts guest TCE values to the table and expects user space to convert them. | |
147 | * Called in both real and virtual modes. | |
148 | * Cannot fail so kvmppc_tce_validate must be called before it. | |
149 | * | |
150 | * WARNING: This will be called in real-mode on HV KVM and virtual | |
151 | * mode on PR KVM | |
152 | */ | |
153 | void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, | |
154 | unsigned long idx, unsigned long tce) | |
155 | { | |
156 | struct page *page; | |
157 | u64 *tbl; | |
158 | ||
14f853f1 | 159 | idx -= stt->offset; |
5ee7af18 AK |
160 | page = stt->pages[idx / TCES_PER_PAGE]; |
161 | tbl = kvmppc_page_address(page); | |
162 | ||
163 | tbl[idx % TCES_PER_PAGE] = tce; | |
164 | } | |
165 | EXPORT_SYMBOL_GPL(kvmppc_tce_put); | |
fcbb2ce6 | 166 | |
d3695aa4 AK |
167 | long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, |
168 | unsigned long *ua, unsigned long **prmap) | |
169 | { | |
170 | unsigned long gfn = gpa >> PAGE_SHIFT; | |
171 | struct kvm_memory_slot *memslot; | |
172 | ||
173 | memslot = search_memslots(kvm_memslots(kvm), gfn); | |
174 | if (!memslot) | |
175 | return -EINVAL; | |
176 | ||
177 | *ua = __gfn_to_hva_memslot(memslot, gfn) | | |
178 | (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); | |
179 | ||
180 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
181 | if (prmap) | |
182 | *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; | |
183 | #endif | |
184 | ||
185 | return 0; | |
186 | } | |
187 | EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); | |
188 | ||
189 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
121f80ba AK |
190 | static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry) |
191 | { | |
192 | unsigned long hpa = 0; | |
193 | enum dma_data_direction dir = DMA_NONE; | |
194 | ||
195 | iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); | |
196 | } | |
197 | ||
198 | static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, | |
199 | struct iommu_table *tbl, unsigned long entry) | |
200 | { | |
201 | struct mm_iommu_table_group_mem_t *mem = NULL; | |
202 | const unsigned long pgsize = 1ULL << tbl->it_page_shift; | |
203 | unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); | |
204 | ||
205 | if (!pua) | |
206 | /* it_userspace allocation might be delayed */ | |
207 | return H_TOO_HARD; | |
208 | ||
209 | pua = (void *) vmalloc_to_phys(pua); | |
210 | if (WARN_ON_ONCE_RM(!pua)) | |
211 | return H_HARDWARE; | |
212 | ||
213 | mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize); | |
214 | if (!mem) | |
215 | return H_TOO_HARD; | |
216 | ||
217 | mm_iommu_mapped_dec(mem); | |
218 | ||
219 | *pua = 0; | |
220 | ||
221 | return H_SUCCESS; | |
222 | } | |
223 | ||
224 | static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, | |
225 | struct iommu_table *tbl, unsigned long entry) | |
226 | { | |
227 | enum dma_data_direction dir = DMA_NONE; | |
228 | unsigned long hpa = 0; | |
229 | long ret; | |
230 | ||
231 | if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir)) | |
232 | /* | |
233 | * real mode xchg can fail if struct page crosses | |
234 | * a page boundary | |
235 | */ | |
236 | return H_TOO_HARD; | |
237 | ||
238 | if (dir == DMA_NONE) | |
239 | return H_SUCCESS; | |
240 | ||
241 | ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); | |
242 | if (ret) | |
243 | iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); | |
244 | ||
245 | return ret; | |
246 | } | |
247 | ||
248 | static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, | |
249 | unsigned long entry, unsigned long ua, | |
250 | enum dma_data_direction dir) | |
251 | { | |
252 | long ret; | |
253 | unsigned long hpa = 0; | |
254 | unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); | |
255 | struct mm_iommu_table_group_mem_t *mem; | |
256 | ||
257 | if (!pua) | |
258 | /* it_userspace allocation might be delayed */ | |
259 | return H_TOO_HARD; | |
260 | ||
261 | mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift); | |
262 | if (!mem) | |
263 | return H_TOO_HARD; | |
264 | ||
265 | if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa))) | |
266 | return H_HARDWARE; | |
267 | ||
268 | pua = (void *) vmalloc_to_phys(pua); | |
269 | if (WARN_ON_ONCE_RM(!pua)) | |
270 | return H_HARDWARE; | |
271 | ||
272 | if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) | |
273 | return H_CLOSED; | |
274 | ||
275 | ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); | |
276 | if (ret) { | |
277 | mm_iommu_mapped_dec(mem); | |
278 | /* | |
279 | * real mode xchg can fail if struct page crosses | |
280 | * a page boundary | |
281 | */ | |
282 | return H_TOO_HARD; | |
283 | } | |
284 | ||
285 | if (dir != DMA_NONE) | |
286 | kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); | |
287 | ||
288 | *pua = ua; | |
289 | ||
290 | return 0; | |
291 | } | |
292 | ||
31217db7 AK |
293 | long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
294 | unsigned long ioba, unsigned long tce) | |
54738c09 | 295 | { |
503bfcbe | 296 | struct kvmppc_spapr_tce_table *stt; |
fcbb2ce6 | 297 | long ret; |
121f80ba AK |
298 | struct kvmppc_spapr_tce_iommu_table *stit; |
299 | unsigned long entry, ua = 0; | |
300 | enum dma_data_direction dir; | |
54738c09 DG |
301 | |
302 | /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ | |
303 | /* liobn, ioba, tce); */ | |
304 | ||
acde2572 PM |
305 | /* For radix, we might be in virtual mode, so punt */ |
306 | if (kvm_is_radix(vcpu->kvm)) | |
307 | return H_TOO_HARD; | |
308 | ||
503bfcbe | 309 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
fcbb2ce6 AK |
310 | if (!stt) |
311 | return H_TOO_HARD; | |
312 | ||
313 | ret = kvmppc_ioba_validate(stt, ioba, 1); | |
314 | if (ret != H_SUCCESS) | |
315 | return ret; | |
316 | ||
5ee7af18 AK |
317 | ret = kvmppc_tce_validate(stt, tce); |
318 | if (ret != H_SUCCESS) | |
319 | return ret; | |
fcbb2ce6 | 320 | |
121f80ba AK |
321 | dir = iommu_tce_direction(tce); |
322 | if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, | |
323 | tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) | |
324 | return H_PARAMETER; | |
325 | ||
326 | entry = ioba >> stt->page_shift; | |
327 | ||
328 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | |
329 | if (dir == DMA_NONE) | |
330 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, | |
331 | stit->tbl, entry); | |
332 | else | |
333 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, | |
334 | stit->tbl, entry, ua, dir); | |
335 | ||
336 | if (ret == H_SUCCESS) | |
337 | continue; | |
338 | ||
339 | if (ret == H_TOO_HARD) | |
340 | return ret; | |
341 | ||
342 | WARN_ON_ONCE_RM(1); | |
343 | kvmppc_rm_clear_tce(stit->tbl, entry); | |
344 | } | |
345 | ||
346 | kvmppc_tce_put(stt, entry, tce); | |
fcbb2ce6 AK |
347 | |
348 | return H_SUCCESS; | |
54738c09 | 349 | } |
69e9fbb2 | 350 | |
d3695aa4 AK |
351 | static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, |
352 | unsigned long ua, unsigned long *phpa) | |
353 | { | |
354 | pte_t *ptep, pte; | |
355 | unsigned shift = 0; | |
356 | ||
94171b19 AK |
357 | /* |
358 | * Called in real mode with MSR_EE = 0. We are safe here. | |
359 | * It is ok to do the lookup with arch.pgdir here, because | |
360 | * we are doing this on secondary cpus and current task there | |
361 | * is not the hypervisor. Also this is safe against THP in the | |
362 | * host, because an IPI to primary thread will wait for the secondary | |
363 | * to exit which will agains result in the below page table walk | |
364 | * to finish. | |
365 | */ | |
366 | ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift); | |
d3695aa4 AK |
367 | if (!ptep || !pte_present(*ptep)) |
368 | return -ENXIO; | |
369 | pte = *ptep; | |
370 | ||
371 | if (!shift) | |
372 | shift = PAGE_SHIFT; | |
373 | ||
374 | /* Avoid handling anything potentially complicated in realmode */ | |
375 | if (shift > PAGE_SHIFT) | |
376 | return -EAGAIN; | |
377 | ||
378 | if (!pte_young(pte)) | |
379 | return -EAGAIN; | |
380 | ||
381 | *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) | | |
382 | (ua & ~PAGE_MASK); | |
383 | ||
384 | return 0; | |
385 | } | |
386 | ||
387 | long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |
388 | unsigned long liobn, unsigned long ioba, | |
389 | unsigned long tce_list, unsigned long npages) | |
390 | { | |
391 | struct kvmppc_spapr_tce_table *stt; | |
392 | long i, ret = H_SUCCESS; | |
393 | unsigned long tces, entry, ua = 0; | |
394 | unsigned long *rmap = NULL; | |
da6f59e1 | 395 | bool prereg = false; |
121f80ba | 396 | struct kvmppc_spapr_tce_iommu_table *stit; |
d3695aa4 | 397 | |
acde2572 PM |
398 | /* For radix, we might be in virtual mode, so punt */ |
399 | if (kvm_is_radix(vcpu->kvm)) | |
400 | return H_TOO_HARD; | |
401 | ||
503bfcbe | 402 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
d3695aa4 AK |
403 | if (!stt) |
404 | return H_TOO_HARD; | |
405 | ||
fe26e527 | 406 | entry = ioba >> stt->page_shift; |
d3695aa4 AK |
407 | /* |
408 | * The spec says that the maximum size of the list is 512 TCEs | |
409 | * so the whole table addressed resides in 4K page | |
410 | */ | |
411 | if (npages > 512) | |
412 | return H_PARAMETER; | |
413 | ||
414 | if (tce_list & (SZ_4K - 1)) | |
415 | return H_PARAMETER; | |
416 | ||
417 | ret = kvmppc_ioba_validate(stt, ioba, npages); | |
418 | if (ret != H_SUCCESS) | |
419 | return ret; | |
420 | ||
da6f59e1 AK |
421 | if (mm_iommu_preregistered(vcpu->kvm->mm)) { |
422 | /* | |
423 | * We get here if guest memory was pre-registered which | |
424 | * is normally VFIO case and gpa->hpa translation does not | |
425 | * depend on hpt. | |
426 | */ | |
427 | struct mm_iommu_table_group_mem_t *mem; | |
d3695aa4 | 428 | |
da6f59e1 AK |
429 | if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) |
430 | return H_TOO_HARD; | |
d3695aa4 | 431 | |
da6f59e1 AK |
432 | mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); |
433 | if (mem) | |
434 | prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0; | |
435 | } | |
436 | ||
437 | if (!prereg) { | |
438 | /* | |
439 | * This is usually a case of a guest with emulated devices only | |
440 | * when TCE list is not in preregistered memory. | |
441 | * We do not require memory to be preregistered in this case | |
442 | * so lock rmap and do __find_linux_pte_or_hugepte(). | |
443 | */ | |
444 | if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) | |
445 | return H_TOO_HARD; | |
446 | ||
447 | rmap = (void *) vmalloc_to_phys(rmap); | |
121f80ba AK |
448 | if (WARN_ON_ONCE_RM(!rmap)) |
449 | return H_HARDWARE; | |
da6f59e1 AK |
450 | |
451 | /* | |
452 | * Synchronize with the MMU notifier callbacks in | |
453 | * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.). | |
454 | * While we have the rmap lock, code running on other CPUs | |
455 | * cannot finish unmapping the host real page that backs | |
456 | * this guest real page, so we are OK to access the host | |
457 | * real page. | |
458 | */ | |
459 | lock_rmap(rmap); | |
460 | if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) { | |
461 | ret = H_TOO_HARD; | |
462 | goto unlock_exit; | |
463 | } | |
d3695aa4 AK |
464 | } |
465 | ||
466 | for (i = 0; i < npages; ++i) { | |
467 | unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); | |
468 | ||
469 | ret = kvmppc_tce_validate(stt, tce); | |
470 | if (ret != H_SUCCESS) | |
471 | goto unlock_exit; | |
472 | ||
121f80ba AK |
473 | ua = 0; |
474 | if (kvmppc_gpa_to_ua(vcpu->kvm, | |
475 | tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), | |
476 | &ua, NULL)) | |
477 | return H_PARAMETER; | |
478 | ||
479 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | |
480 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, | |
481 | stit->tbl, entry + i, ua, | |
482 | iommu_tce_direction(tce)); | |
483 | ||
484 | if (ret == H_SUCCESS) | |
485 | continue; | |
486 | ||
487 | if (ret == H_TOO_HARD) | |
488 | goto unlock_exit; | |
489 | ||
490 | WARN_ON_ONCE_RM(1); | |
491 | kvmppc_rm_clear_tce(stit->tbl, entry); | |
492 | } | |
493 | ||
d3695aa4 AK |
494 | kvmppc_tce_put(stt, entry + i, tce); |
495 | } | |
496 | ||
497 | unlock_exit: | |
da6f59e1 AK |
498 | if (rmap) |
499 | unlock_rmap(rmap); | |
d3695aa4 AK |
500 | |
501 | return ret; | |
502 | } | |
503 | ||
31217db7 | 504 | long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, |
d3695aa4 AK |
505 | unsigned long liobn, unsigned long ioba, |
506 | unsigned long tce_value, unsigned long npages) | |
507 | { | |
508 | struct kvmppc_spapr_tce_table *stt; | |
509 | long i, ret; | |
121f80ba | 510 | struct kvmppc_spapr_tce_iommu_table *stit; |
d3695aa4 | 511 | |
acde2572 PM |
512 | /* For radix, we might be in virtual mode, so punt */ |
513 | if (kvm_is_radix(vcpu->kvm)) | |
514 | return H_TOO_HARD; | |
515 | ||
503bfcbe | 516 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
d3695aa4 AK |
517 | if (!stt) |
518 | return H_TOO_HARD; | |
519 | ||
520 | ret = kvmppc_ioba_validate(stt, ioba, npages); | |
521 | if (ret != H_SUCCESS) | |
522 | return ret; | |
523 | ||
524 | /* Check permission bits only to allow userspace poison TCE for debug */ | |
525 | if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) | |
526 | return H_PARAMETER; | |
527 | ||
121f80ba AK |
528 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
529 | unsigned long entry = ioba >> stit->tbl->it_page_shift; | |
530 | ||
531 | for (i = 0; i < npages; ++i) { | |
532 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, | |
533 | stit->tbl, entry + i); | |
534 | ||
535 | if (ret == H_SUCCESS) | |
536 | continue; | |
537 | ||
538 | if (ret == H_TOO_HARD) | |
539 | return ret; | |
540 | ||
541 | WARN_ON_ONCE_RM(1); | |
542 | kvmppc_rm_clear_tce(stit->tbl, entry); | |
543 | } | |
544 | } | |
545 | ||
fe26e527 AK |
546 | for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) |
547 | kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); | |
d3695aa4 AK |
548 | |
549 | return H_SUCCESS; | |
550 | } | |
d3695aa4 | 551 | |
acde2572 | 552 | /* This can be called in either virtual mode or real mode */ |
69e9fbb2 LD |
553 | long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
554 | unsigned long ioba) | |
555 | { | |
503bfcbe | 556 | struct kvmppc_spapr_tce_table *stt; |
fcbb2ce6 AK |
557 | long ret; |
558 | unsigned long idx; | |
559 | struct page *page; | |
560 | u64 *tbl; | |
69e9fbb2 | 561 | |
503bfcbe | 562 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
fcbb2ce6 AK |
563 | if (!stt) |
564 | return H_TOO_HARD; | |
69e9fbb2 | 565 | |
fcbb2ce6 AK |
566 | ret = kvmppc_ioba_validate(stt, ioba, 1); |
567 | if (ret != H_SUCCESS) | |
568 | return ret; | |
69e9fbb2 | 569 | |
14f853f1 | 570 | idx = (ioba >> stt->page_shift) - stt->offset; |
fcbb2ce6 AK |
571 | page = stt->pages[idx / TCES_PER_PAGE]; |
572 | tbl = (u64 *)page_address(page); | |
69e9fbb2 | 573 | |
fcbb2ce6 | 574 | vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; |
69e9fbb2 | 575 | |
fcbb2ce6 | 576 | return H_SUCCESS; |
69e9fbb2 LD |
577 | } |
578 | EXPORT_SYMBOL_GPL(kvmppc_h_get_tce); | |
d3695aa4 AK |
579 | |
580 | #endif /* KVM_BOOK3S_HV_POSSIBLE */ |