Commit | Line | Data |
---|---|---|
d94d71cb | 1 | // SPDX-License-Identifier: GPL-2.0-only |
54738c09 | 2 | /* |
54738c09 DG |
3 | * |
4 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
5 | * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com> | |
d3695aa4 | 6 | * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com> |
54738c09 DG |
7 | */ |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <linux/string.h> | |
11 | #include <linux/kvm.h> | |
12 | #include <linux/kvm_host.h> | |
13 | #include <linux/highmem.h> | |
14 | #include <linux/gfp.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/hugetlb.h> | |
17 | #include <linux/list.h> | |
5c35a02c | 18 | #include <linux/stringify.h> |
54738c09 | 19 | |
54738c09 DG |
20 | #include <asm/kvm_ppc.h> |
21 | #include <asm/kvm_book3s.h> | |
f64e8084 | 22 | #include <asm/book3s/64/mmu-hash.h> |
d3695aa4 | 23 | #include <asm/mmu_context.h> |
54738c09 DG |
24 | #include <asm/hvcall.h> |
25 | #include <asm/synch.h> | |
26 | #include <asm/ppc-opcode.h> | |
27 | #include <asm/kvm_host.h> | |
28 | #include <asm/udbg.h> | |
fcbb2ce6 | 29 | #include <asm/iommu.h> |
5ee7af18 | 30 | #include <asm/tce.h> |
94171b19 | 31 | #include <asm/pte-walk.h> |
54738c09 | 32 | |
121f80ba AK |
33 | #ifdef CONFIG_BUG |
34 | ||
35 | #define WARN_ON_ONCE_RM(condition) ({ \ | |
36 | static bool __section(.data.unlikely) __warned; \ | |
37 | int __ret_warn_once = !!(condition); \ | |
38 | \ | |
39 | if (unlikely(__ret_warn_once && !__warned)) { \ | |
40 | __warned = true; \ | |
41 | pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \ | |
42 | __stringify(condition), \ | |
43 | __func__, __LINE__); \ | |
44 | dump_stack(); \ | |
45 | } \ | |
46 | unlikely(__ret_warn_once); \ | |
47 | }) | |
48 | ||
49 | #else | |
50 | ||
51 | #define WARN_ON_ONCE_RM(condition) ({ \ | |
52 | int __ret_warn_on = !!(condition); \ | |
53 | unlikely(__ret_warn_on); \ | |
54 | }) | |
55 | ||
56 | #endif | |
57 | ||
fcbb2ce6 AK |
58 | /* |
59 | * Finds a TCE table descriptor by LIOBN. | |
60 | * | |
61 | * WARNING: This will be called in real or virtual mode on HV KVM and virtual | |
62 | * mode on PR KVM | |
63 | */ | |
503bfcbe | 64 | struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, |
fcbb2ce6 AK |
65 | unsigned long liobn) |
66 | { | |
fcbb2ce6 AK |
67 | struct kvmppc_spapr_tce_table *stt; |
68 | ||
366baf28 | 69 | list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list) |
fcbb2ce6 AK |
70 | if (stt->liobn == liobn) |
71 | return stt; | |
72 | ||
73 | return NULL; | |
74 | } | |
d3695aa4 | 75 | EXPORT_SYMBOL_GPL(kvmppc_find_table); |
fcbb2ce6 | 76 | |
42de7b9e | 77 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
2001825e AK |
78 | static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce, |
79 | unsigned long *ua, unsigned long **prmap) | |
80 | { | |
81 | unsigned long gfn = tce >> PAGE_SHIFT; | |
82 | struct kvm_memory_slot *memslot; | |
83 | ||
84 | memslot = search_memslots(kvm_memslots_raw(kvm), gfn); | |
85 | if (!memslot) | |
86 | return -EINVAL; | |
87 | ||
88 | *ua = __gfn_to_hva_memslot(memslot, gfn) | | |
89 | (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); | |
90 | ||
91 | if (prmap) | |
92 | *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; | |
93 | ||
94 | return 0; | |
95 | } | |
96 | ||
5ee7af18 AK |
97 | /* |
98 | * Validates TCE address. | |
99 | * At the moment flags and page mask are validated. | |
100 | * As the host kernel does not access those addresses (just puts them | |
101 | * to the table and user space is supposed to process them), we can skip | |
102 | * checking other things (such as TCE is a guest RAM address or the page | |
103 | * was actually allocated). | |
5ee7af18 | 104 | */ |
42de7b9e AK |
105 | static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt, |
106 | unsigned long tce) | |
5ee7af18 | 107 | { |
b1af23d8 AK |
108 | unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); |
109 | enum dma_data_direction dir = iommu_tce_direction(tce); | |
42de7b9e AK |
110 | struct kvmppc_spapr_tce_iommu_table *stit; |
111 | unsigned long ua = 0; | |
b1af23d8 AK |
112 | |
113 | /* Allow userspace to poison TCE table */ | |
114 | if (dir == DMA_NONE) | |
115 | return H_SUCCESS; | |
5ee7af18 | 116 | |
b1af23d8 | 117 | if (iommu_tce_check_gpa(stt->page_shift, gpa)) |
5ee7af18 AK |
118 | return H_PARAMETER; |
119 | ||
2001825e | 120 | if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL)) |
42de7b9e AK |
121 | return H_TOO_HARD; |
122 | ||
123 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | |
124 | unsigned long hpa = 0; | |
125 | struct mm_iommu_table_group_mem_t *mem; | |
126 | long shift = stit->tbl->it_page_shift; | |
127 | ||
128 | mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift); | |
129 | if (!mem) | |
130 | return H_TOO_HARD; | |
131 | ||
132 | if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa)) | |
133 | return H_TOO_HARD; | |
134 | } | |
135 | ||
5ee7af18 AK |
136 | return H_SUCCESS; |
137 | } | |
5ee7af18 AK |
138 | |
139 | /* Note on the use of page_address() in real mode, | |
140 | * | |
141 | * It is safe to use page_address() in real mode on ppc64 because | |
142 | * page_address() is always defined as lowmem_page_address() | |
143 | * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic | |
144 | * operation and does not access page struct. | |
145 | * | |
146 | * Theoretically page_address() could be defined different | |
147 | * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL | |
148 | * would have to be enabled. | |
149 | * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64, | |
150 | * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only | |
151 | * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP | |
152 | * is not expected to be enabled on ppc32, page_address() | |
153 | * is safe for ppc32 as well. | |
154 | * | |
155 | * WARNING: This will be called in real-mode on HV KVM and virtual | |
156 | * mode on PR KVM | |
157 | */ | |
158 | static u64 *kvmppc_page_address(struct page *page) | |
159 | { | |
160 | #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL) | |
161 | #error TODO: fix to avoid page_address() here | |
162 | #endif | |
163 | return (u64 *) page_address(page); | |
164 | } | |
165 | ||
166 | /* | |
167 | * Handles TCE requests for emulated devices. | |
168 | * Puts guest TCE values to the table and expects user space to convert them. | |
e1a1ef84 | 169 | * Cannot fail so kvmppc_rm_tce_validate must be called before it. |
5ee7af18 | 170 | */ |
e1a1ef84 | 171 | static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt, |
5ee7af18 AK |
172 | unsigned long idx, unsigned long tce) |
173 | { | |
174 | struct page *page; | |
175 | u64 *tbl; | |
176 | ||
14f853f1 | 177 | idx -= stt->offset; |
5ee7af18 | 178 | page = stt->pages[idx / TCES_PER_PAGE]; |
e1a1ef84 AK |
179 | /* |
180 | * page must not be NULL in real mode, | |
181 | * kvmppc_rm_ioba_validate() must have taken care of this. | |
182 | */ | |
183 | WARN_ON_ONCE_RM(!page); | |
5ee7af18 AK |
184 | tbl = kvmppc_page_address(page); |
185 | ||
186 | tbl[idx % TCES_PER_PAGE] = tce; | |
187 | } | |
fcbb2ce6 | 188 | |
e1a1ef84 AK |
189 | /* |
190 | * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so | |
191 | * in real mode. | |
192 | * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is | |
193 | * allocated or not required (when clearing a tce entry). | |
194 | */ | |
195 | static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt, | |
196 | unsigned long ioba, unsigned long npages, bool clearing) | |
197 | { | |
198 | unsigned long i, idx, sttpage, sttpages; | |
199 | unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages); | |
200 | ||
201 | if (ret) | |
202 | return ret; | |
203 | /* | |
204 | * clearing==true says kvmppc_rm_tce_put won't be allocating pages | |
205 | * for empty tces. | |
206 | */ | |
207 | if (clearing) | |
208 | return H_SUCCESS; | |
209 | ||
210 | idx = (ioba >> stt->page_shift) - stt->offset; | |
211 | sttpage = idx / TCES_PER_PAGE; | |
212 | sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) / | |
213 | TCES_PER_PAGE; | |
214 | for (i = sttpage; i < sttpage + sttpages; ++i) | |
215 | if (!stt->pages[i]) | |
216 | return H_TOO_HARD; | |
217 | ||
218 | return H_SUCCESS; | |
219 | } | |
220 | ||
425333bf AK |
221 | static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, |
222 | unsigned long entry, unsigned long *hpa, | |
223 | enum dma_data_direction *direction) | |
224 | { | |
225 | long ret; | |
226 | ||
227 | ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction); | |
228 | ||
229 | if (!ret && ((*direction == DMA_FROM_DEVICE) || | |
230 | (*direction == DMA_BIDIRECTIONAL))) { | |
6e301a8e | 231 | __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); |
425333bf AK |
232 | /* |
233 | * kvmppc_rm_tce_iommu_do_map() updates the UA cache after | |
234 | * calling this so we still get here a valid UA. | |
235 | */ | |
236 | if (pua && *pua) | |
237 | mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua)); | |
238 | } | |
239 | ||
240 | return ret; | |
241 | } | |
242 | ||
243 | static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl, | |
244 | unsigned long entry) | |
121f80ba AK |
245 | { |
246 | unsigned long hpa = 0; | |
247 | enum dma_data_direction dir = DMA_NONE; | |
248 | ||
425333bf | 249 | iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); |
121f80ba AK |
250 | } |
251 | ||
252 | static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, | |
253 | struct iommu_table *tbl, unsigned long entry) | |
254 | { | |
255 | struct mm_iommu_table_group_mem_t *mem = NULL; | |
256 | const unsigned long pgsize = 1ULL << tbl->it_page_shift; | |
6e301a8e | 257 | __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); |
121f80ba AK |
258 | |
259 | if (!pua) | |
260 | /* it_userspace allocation might be delayed */ | |
261 | return H_TOO_HARD; | |
262 | ||
00a5c58d | 263 | mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize); |
121f80ba AK |
264 | if (!mem) |
265 | return H_TOO_HARD; | |
266 | ||
267 | mm_iommu_mapped_dec(mem); | |
268 | ||
00a5c58d | 269 | *pua = cpu_to_be64(0); |
121f80ba AK |
270 | |
271 | return H_SUCCESS; | |
272 | } | |
273 | ||
ca1fc489 | 274 | static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, |
121f80ba AK |
275 | struct iommu_table *tbl, unsigned long entry) |
276 | { | |
277 | enum dma_data_direction dir = DMA_NONE; | |
278 | unsigned long hpa = 0; | |
279 | long ret; | |
280 | ||
425333bf | 281 | if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir)) |
121f80ba AK |
282 | /* |
283 | * real mode xchg can fail if struct page crosses | |
284 | * a page boundary | |
285 | */ | |
286 | return H_TOO_HARD; | |
287 | ||
288 | if (dir == DMA_NONE) | |
289 | return H_SUCCESS; | |
290 | ||
291 | ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); | |
292 | if (ret) | |
425333bf | 293 | iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); |
121f80ba AK |
294 | |
295 | return ret; | |
296 | } | |
297 | ||
ca1fc489 AK |
298 | static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, |
299 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | |
300 | unsigned long entry) | |
301 | { | |
302 | unsigned long i, ret = H_SUCCESS; | |
303 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | |
304 | unsigned long io_entry = entry * subpages; | |
305 | ||
306 | for (i = 0; i < subpages; ++i) { | |
307 | ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i); | |
308 | if (ret != H_SUCCESS) | |
309 | break; | |
310 | } | |
311 | ||
312 | return ret; | |
313 | } | |
314 | ||
315 | static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, | |
121f80ba AK |
316 | unsigned long entry, unsigned long ua, |
317 | enum dma_data_direction dir) | |
318 | { | |
319 | long ret; | |
320 | unsigned long hpa = 0; | |
6e301a8e | 321 | __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); |
121f80ba AK |
322 | struct mm_iommu_table_group_mem_t *mem; |
323 | ||
324 | if (!pua) | |
325 | /* it_userspace allocation might be delayed */ | |
326 | return H_TOO_HARD; | |
327 | ||
328 | mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift); | |
329 | if (!mem) | |
330 | return H_TOO_HARD; | |
331 | ||
76fa4975 AK |
332 | if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, |
333 | &hpa))) | |
f7960e29 | 334 | return H_TOO_HARD; |
121f80ba | 335 | |
121f80ba | 336 | if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) |
f7960e29 | 337 | return H_TOO_HARD; |
121f80ba | 338 | |
425333bf | 339 | ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); |
121f80ba AK |
340 | if (ret) { |
341 | mm_iommu_mapped_dec(mem); | |
342 | /* | |
343 | * real mode xchg can fail if struct page crosses | |
344 | * a page boundary | |
345 | */ | |
346 | return H_TOO_HARD; | |
347 | } | |
348 | ||
349 | if (dir != DMA_NONE) | |
350 | kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); | |
351 | ||
00a5c58d | 352 | *pua = cpu_to_be64(ua); |
121f80ba AK |
353 | |
354 | return 0; | |
355 | } | |
356 | ||
ca1fc489 AK |
357 | static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, |
358 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | |
359 | unsigned long entry, unsigned long ua, | |
360 | enum dma_data_direction dir) | |
361 | { | |
362 | unsigned long i, pgoff, ret = H_SUCCESS; | |
363 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | |
364 | unsigned long io_entry = entry * subpages; | |
365 | ||
366 | for (i = 0, pgoff = 0; i < subpages; | |
367 | ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { | |
368 | ||
369 | ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl, | |
370 | io_entry + i, ua + pgoff, dir); | |
371 | if (ret != H_SUCCESS) | |
372 | break; | |
373 | } | |
374 | ||
375 | return ret; | |
376 | } | |
377 | ||
31217db7 AK |
378 | long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
379 | unsigned long ioba, unsigned long tce) | |
54738c09 | 380 | { |
503bfcbe | 381 | struct kvmppc_spapr_tce_table *stt; |
fcbb2ce6 | 382 | long ret; |
121f80ba AK |
383 | struct kvmppc_spapr_tce_iommu_table *stit; |
384 | unsigned long entry, ua = 0; | |
385 | enum dma_data_direction dir; | |
54738c09 DG |
386 | |
387 | /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ | |
388 | /* liobn, ioba, tce); */ | |
389 | ||
acde2572 PM |
390 | /* For radix, we might be in virtual mode, so punt */ |
391 | if (kvm_is_radix(vcpu->kvm)) | |
392 | return H_TOO_HARD; | |
393 | ||
503bfcbe | 394 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
fcbb2ce6 AK |
395 | if (!stt) |
396 | return H_TOO_HARD; | |
397 | ||
e1a1ef84 | 398 | ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0); |
fcbb2ce6 AK |
399 | if (ret != H_SUCCESS) |
400 | return ret; | |
401 | ||
42de7b9e | 402 | ret = kvmppc_rm_tce_validate(stt, tce); |
5ee7af18 AK |
403 | if (ret != H_SUCCESS) |
404 | return ret; | |
fcbb2ce6 | 405 | |
121f80ba | 406 | dir = iommu_tce_direction(tce); |
2001825e | 407 | if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) |
121f80ba AK |
408 | return H_PARAMETER; |
409 | ||
410 | entry = ioba >> stt->page_shift; | |
411 | ||
412 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | |
413 | if (dir == DMA_NONE) | |
ca1fc489 | 414 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt, |
121f80ba AK |
415 | stit->tbl, entry); |
416 | else | |
ca1fc489 | 417 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, |
121f80ba AK |
418 | stit->tbl, entry, ua, dir); |
419 | ||
2691f0ff AK |
420 | if (ret != H_SUCCESS) { |
421 | kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); | |
121f80ba | 422 | return ret; |
2691f0ff | 423 | } |
121f80ba AK |
424 | } |
425 | ||
e1a1ef84 | 426 | kvmppc_rm_tce_put(stt, entry, tce); |
fcbb2ce6 AK |
427 | |
428 | return H_SUCCESS; | |
54738c09 | 429 | } |
69e9fbb2 | 430 | |
d3695aa4 AK |
431 | static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, |
432 | unsigned long ua, unsigned long *phpa) | |
433 | { | |
434 | pte_t *ptep, pte; | |
435 | unsigned shift = 0; | |
436 | ||
94171b19 AK |
437 | /* |
438 | * Called in real mode with MSR_EE = 0. We are safe here. | |
439 | * It is ok to do the lookup with arch.pgdir here, because | |
440 | * we are doing this on secondary cpus and current task there | |
441 | * is not the hypervisor. Also this is safe against THP in the | |
442 | * host, because an IPI to primary thread will wait for the secondary | |
443 | * to exit which will agains result in the below page table walk | |
444 | * to finish. | |
445 | */ | |
446 | ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift); | |
d3695aa4 AK |
447 | if (!ptep || !pte_present(*ptep)) |
448 | return -ENXIO; | |
449 | pte = *ptep; | |
450 | ||
451 | if (!shift) | |
452 | shift = PAGE_SHIFT; | |
453 | ||
454 | /* Avoid handling anything potentially complicated in realmode */ | |
455 | if (shift > PAGE_SHIFT) | |
456 | return -EAGAIN; | |
457 | ||
458 | if (!pte_young(pte)) | |
459 | return -EAGAIN; | |
460 | ||
461 | *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) | | |
462 | (ua & ~PAGE_MASK); | |
463 | ||
464 | return 0; | |
465 | } | |
466 | ||
467 | long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |
468 | unsigned long liobn, unsigned long ioba, | |
469 | unsigned long tce_list, unsigned long npages) | |
470 | { | |
471 | struct kvmppc_spapr_tce_table *stt; | |
472 | long i, ret = H_SUCCESS; | |
473 | unsigned long tces, entry, ua = 0; | |
474 | unsigned long *rmap = NULL; | |
da6f59e1 | 475 | bool prereg = false; |
121f80ba | 476 | struct kvmppc_spapr_tce_iommu_table *stit; |
d3695aa4 | 477 | |
acde2572 PM |
478 | /* For radix, we might be in virtual mode, so punt */ |
479 | if (kvm_is_radix(vcpu->kvm)) | |
480 | return H_TOO_HARD; | |
481 | ||
503bfcbe | 482 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
d3695aa4 AK |
483 | if (!stt) |
484 | return H_TOO_HARD; | |
485 | ||
fe26e527 | 486 | entry = ioba >> stt->page_shift; |
d3695aa4 AK |
487 | /* |
488 | * The spec says that the maximum size of the list is 512 TCEs | |
489 | * so the whole table addressed resides in 4K page | |
490 | */ | |
491 | if (npages > 512) | |
492 | return H_PARAMETER; | |
493 | ||
494 | if (tce_list & (SZ_4K - 1)) | |
495 | return H_PARAMETER; | |
496 | ||
e1a1ef84 | 497 | ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false); |
d3695aa4 AK |
498 | if (ret != H_SUCCESS) |
499 | return ret; | |
500 | ||
da6f59e1 AK |
501 | if (mm_iommu_preregistered(vcpu->kvm->mm)) { |
502 | /* | |
503 | * We get here if guest memory was pre-registered which | |
504 | * is normally VFIO case and gpa->hpa translation does not | |
505 | * depend on hpt. | |
506 | */ | |
507 | struct mm_iommu_table_group_mem_t *mem; | |
d3695aa4 | 508 | |
2001825e | 509 | if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) |
da6f59e1 | 510 | return H_TOO_HARD; |
d3695aa4 | 511 | |
da6f59e1 AK |
512 | mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); |
513 | if (mem) | |
76fa4975 AK |
514 | prereg = mm_iommu_ua_to_hpa_rm(mem, ua, |
515 | IOMMU_PAGE_SHIFT_4K, &tces) == 0; | |
da6f59e1 AK |
516 | } |
517 | ||
518 | if (!prereg) { | |
519 | /* | |
520 | * This is usually a case of a guest with emulated devices only | |
521 | * when TCE list is not in preregistered memory. | |
522 | * We do not require memory to be preregistered in this case | |
523 | * so lock rmap and do __find_linux_pte_or_hugepte(). | |
524 | */ | |
2001825e | 525 | if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) |
da6f59e1 AK |
526 | return H_TOO_HARD; |
527 | ||
528 | rmap = (void *) vmalloc_to_phys(rmap); | |
121f80ba | 529 | if (WARN_ON_ONCE_RM(!rmap)) |
f7960e29 | 530 | return H_TOO_HARD; |
da6f59e1 AK |
531 | |
532 | /* | |
533 | * Synchronize with the MMU notifier callbacks in | |
39c983ea | 534 | * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.). |
da6f59e1 AK |
535 | * While we have the rmap lock, code running on other CPUs |
536 | * cannot finish unmapping the host real page that backs | |
537 | * this guest real page, so we are OK to access the host | |
538 | * real page. | |
539 | */ | |
540 | lock_rmap(rmap); | |
541 | if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) { | |
542 | ret = H_TOO_HARD; | |
543 | goto unlock_exit; | |
544 | } | |
d3695aa4 AK |
545 | } |
546 | ||
547 | for (i = 0; i < npages; ++i) { | |
548 | unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); | |
549 | ||
42de7b9e | 550 | ret = kvmppc_rm_tce_validate(stt, tce); |
d3695aa4 AK |
551 | if (ret != H_SUCCESS) |
552 | goto unlock_exit; | |
e199ad2b AK |
553 | } |
554 | ||
555 | for (i = 0; i < npages; ++i) { | |
556 | unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); | |
d3695aa4 | 557 | |
121f80ba | 558 | ua = 0; |
2001825e | 559 | if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) |
121f80ba AK |
560 | return H_PARAMETER; |
561 | ||
562 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | |
ca1fc489 | 563 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, |
121f80ba AK |
564 | stit->tbl, entry + i, ua, |
565 | iommu_tce_direction(tce)); | |
566 | ||
2691f0ff AK |
567 | if (ret != H_SUCCESS) { |
568 | kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, | |
569 | entry); | |
121f80ba | 570 | goto unlock_exit; |
2691f0ff | 571 | } |
121f80ba AK |
572 | } |
573 | ||
e1a1ef84 | 574 | kvmppc_rm_tce_put(stt, entry + i, tce); |
d3695aa4 AK |
575 | } |
576 | ||
577 | unlock_exit: | |
da6f59e1 AK |
578 | if (rmap) |
579 | unlock_rmap(rmap); | |
d3695aa4 AK |
580 | |
581 | return ret; | |
582 | } | |
583 | ||
31217db7 | 584 | long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, |
d3695aa4 AK |
585 | unsigned long liobn, unsigned long ioba, |
586 | unsigned long tce_value, unsigned long npages) | |
587 | { | |
588 | struct kvmppc_spapr_tce_table *stt; | |
589 | long i, ret; | |
121f80ba | 590 | struct kvmppc_spapr_tce_iommu_table *stit; |
d3695aa4 | 591 | |
acde2572 PM |
592 | /* For radix, we might be in virtual mode, so punt */ |
593 | if (kvm_is_radix(vcpu->kvm)) | |
594 | return H_TOO_HARD; | |
595 | ||
503bfcbe | 596 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
d3695aa4 AK |
597 | if (!stt) |
598 | return H_TOO_HARD; | |
599 | ||
e1a1ef84 | 600 | ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0); |
d3695aa4 AK |
601 | if (ret != H_SUCCESS) |
602 | return ret; | |
603 | ||
604 | /* Check permission bits only to allow userspace poison TCE for debug */ | |
605 | if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) | |
606 | return H_PARAMETER; | |
607 | ||
121f80ba | 608 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
c6b61661 | 609 | unsigned long entry = ioba >> stt->page_shift; |
121f80ba AK |
610 | |
611 | for (i = 0; i < npages; ++i) { | |
ca1fc489 | 612 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt, |
121f80ba AK |
613 | stit->tbl, entry + i); |
614 | ||
615 | if (ret == H_SUCCESS) | |
616 | continue; | |
617 | ||
618 | if (ret == H_TOO_HARD) | |
619 | return ret; | |
620 | ||
621 | WARN_ON_ONCE_RM(1); | |
425333bf | 622 | kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); |
121f80ba AK |
623 | } |
624 | } | |
625 | ||
fe26e527 | 626 | for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) |
e1a1ef84 | 627 | kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value); |
d3695aa4 AK |
628 | |
629 | return H_SUCCESS; | |
630 | } | |
d3695aa4 | 631 | |
acde2572 | 632 | /* This can be called in either virtual mode or real mode */ |
69e9fbb2 LD |
633 | long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
634 | unsigned long ioba) | |
635 | { | |
503bfcbe | 636 | struct kvmppc_spapr_tce_table *stt; |
fcbb2ce6 AK |
637 | long ret; |
638 | unsigned long idx; | |
639 | struct page *page; | |
640 | u64 *tbl; | |
69e9fbb2 | 641 | |
503bfcbe | 642 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
fcbb2ce6 AK |
643 | if (!stt) |
644 | return H_TOO_HARD; | |
69e9fbb2 | 645 | |
fcbb2ce6 AK |
646 | ret = kvmppc_ioba_validate(stt, ioba, 1); |
647 | if (ret != H_SUCCESS) | |
648 | return ret; | |
69e9fbb2 | 649 | |
14f853f1 | 650 | idx = (ioba >> stt->page_shift) - stt->offset; |
fcbb2ce6 | 651 | page = stt->pages[idx / TCES_PER_PAGE]; |
e1a1ef84 AK |
652 | if (!page) { |
653 | vcpu->arch.regs.gpr[4] = 0; | |
654 | return H_SUCCESS; | |
655 | } | |
fcbb2ce6 | 656 | tbl = (u64 *)page_address(page); |
69e9fbb2 | 657 | |
1143a706 | 658 | vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; |
69e9fbb2 | 659 | |
fcbb2ce6 | 660 | return H_SUCCESS; |
69e9fbb2 LD |
661 | } |
662 | EXPORT_SYMBOL_GPL(kvmppc_h_get_tce); | |
d3695aa4 AK |
663 | |
664 | #endif /* KVM_BOOK3S_HV_POSSIBLE */ |