Commit | Line | Data |
---|---|---|
f31e65e1 BH |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
16 | * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com> | |
d3695aa4 | 17 | * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com> |
f31e65e1 BH |
18 | */ |
19 | ||
20 | #include <linux/types.h> | |
21 | #include <linux/string.h> | |
22 | #include <linux/kvm.h> | |
23 | #include <linux/kvm_host.h> | |
24 | #include <linux/highmem.h> | |
25 | #include <linux/gfp.h> | |
26 | #include <linux/slab.h> | |
3f07c014 | 27 | #include <linux/sched/signal.h> |
f31e65e1 BH |
28 | #include <linux/hugetlb.h> |
29 | #include <linux/list.h> | |
30 | #include <linux/anon_inodes.h> | |
121f80ba AK |
31 | #include <linux/iommu.h> |
32 | #include <linux/file.h> | |
f31e65e1 | 33 | |
f31e65e1 BH |
34 | #include <asm/kvm_ppc.h> |
35 | #include <asm/kvm_book3s.h> | |
f64e8084 | 36 | #include <asm/book3s/64/mmu-hash.h> |
f31e65e1 BH |
37 | #include <asm/hvcall.h> |
38 | #include <asm/synch.h> | |
39 | #include <asm/ppc-opcode.h> | |
40 | #include <asm/kvm_host.h> | |
41 | #include <asm/udbg.h> | |
462ee11e | 42 | #include <asm/iommu.h> |
d3695aa4 | 43 | #include <asm/tce.h> |
121f80ba | 44 | #include <asm/mmu_context.h> |
f31e65e1 | 45 | |
fe26e527 | 46 | static unsigned long kvmppc_tce_pages(unsigned long iommu_pages) |
f31e65e1 | 47 | { |
fe26e527 | 48 | return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; |
f31e65e1 BH |
49 | } |
50 | ||
f8626985 AK |
51 | static unsigned long kvmppc_stt_pages(unsigned long tce_pages) |
52 | { | |
53 | unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) + | |
54 | (tce_pages * sizeof(struct page *)); | |
55 | ||
56 | return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE; | |
57 | } | |
58 | ||
59 | static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc) | |
60 | { | |
61 | long ret = 0; | |
62 | ||
63 | if (!current || !current->mm) | |
64 | return ret; /* process exited */ | |
65 | ||
66 | down_write(¤t->mm->mmap_sem); | |
67 | ||
68 | if (inc) { | |
69 | unsigned long locked, lock_limit; | |
70 | ||
71 | locked = current->mm->locked_vm + stt_pages; | |
72 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | |
73 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) | |
74 | ret = -ENOMEM; | |
75 | else | |
76 | current->mm->locked_vm += stt_pages; | |
77 | } else { | |
78 | if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm)) | |
79 | stt_pages = current->mm->locked_vm; | |
80 | ||
81 | current->mm->locked_vm -= stt_pages; | |
82 | } | |
83 | ||
84 | pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid, | |
85 | inc ? '+' : '-', | |
86 | stt_pages << PAGE_SHIFT, | |
87 | current->mm->locked_vm << PAGE_SHIFT, | |
88 | rlimit(RLIMIT_MEMLOCK), | |
89 | ret ? " - exceeded" : ""); | |
90 | ||
91 | up_write(¤t->mm->mmap_sem); | |
92 | ||
93 | return ret; | |
94 | } | |
95 | ||
121f80ba AK |
96 | static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head) |
97 | { | |
98 | struct kvmppc_spapr_tce_iommu_table *stit = container_of(head, | |
99 | struct kvmppc_spapr_tce_iommu_table, rcu); | |
100 | ||
101 | iommu_tce_table_put(stit->tbl); | |
102 | ||
103 | kfree(stit); | |
104 | } | |
105 | ||
106 | static void kvm_spapr_tce_liobn_put(struct kref *kref) | |
107 | { | |
108 | struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref, | |
109 | struct kvmppc_spapr_tce_iommu_table, kref); | |
110 | ||
111 | list_del_rcu(&stit->next); | |
112 | ||
113 | call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free); | |
114 | } | |
115 | ||
116 | extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, | |
117 | struct iommu_group *grp) | |
118 | { | |
119 | int i; | |
120 | struct kvmppc_spapr_tce_table *stt; | |
121 | struct kvmppc_spapr_tce_iommu_table *stit, *tmp; | |
122 | struct iommu_table_group *table_group = NULL; | |
123 | ||
124 | list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { | |
125 | ||
126 | table_group = iommu_group_get_iommudata(grp); | |
127 | if (WARN_ON(!table_group)) | |
128 | continue; | |
129 | ||
130 | list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { | |
131 | for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { | |
132 | if (table_group->tables[i] != stit->tbl) | |
133 | continue; | |
134 | ||
135 | kref_put(&stit->kref, kvm_spapr_tce_liobn_put); | |
121f80ba AK |
136 | } |
137 | } | |
138 | } | |
139 | } | |
140 | ||
141 | extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, | |
142 | struct iommu_group *grp) | |
143 | { | |
144 | struct kvmppc_spapr_tce_table *stt = NULL; | |
145 | bool found = false; | |
146 | struct iommu_table *tbl = NULL; | |
147 | struct iommu_table_group *table_group; | |
148 | long i; | |
149 | struct kvmppc_spapr_tce_iommu_table *stit; | |
150 | struct fd f; | |
151 | ||
152 | f = fdget(tablefd); | |
153 | if (!f.file) | |
154 | return -EBADF; | |
155 | ||
156 | list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { | |
157 | if (stt == f.file->private_data) { | |
158 | found = true; | |
159 | break; | |
160 | } | |
161 | } | |
162 | ||
163 | fdput(f); | |
164 | ||
165 | if (!found) | |
166 | return -EINVAL; | |
167 | ||
168 | table_group = iommu_group_get_iommudata(grp); | |
169 | if (WARN_ON(!table_group)) | |
170 | return -EFAULT; | |
171 | ||
172 | for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { | |
173 | struct iommu_table *tbltmp = table_group->tables[i]; | |
174 | ||
175 | if (!tbltmp) | |
176 | continue; | |
ca1fc489 AK |
177 | /* Make sure hardware table parameters are compatible */ |
178 | if ((tbltmp->it_page_shift <= stt->page_shift) && | |
179 | (tbltmp->it_offset << tbltmp->it_page_shift == | |
180 | stt->offset << stt->page_shift) && | |
76346cd9 | 181 | (tbltmp->it_size << tbltmp->it_page_shift >= |
ca1fc489 | 182 | stt->size << stt->page_shift)) { |
121f80ba AK |
183 | /* |
184 | * Reference the table to avoid races with | |
185 | * add/remove DMA windows. | |
186 | */ | |
187 | tbl = iommu_tce_table_get(tbltmp); | |
188 | break; | |
189 | } | |
190 | } | |
191 | if (!tbl) | |
192 | return -EINVAL; | |
193 | ||
194 | list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { | |
195 | if (tbl != stit->tbl) | |
196 | continue; | |
197 | ||
198 | if (!kref_get_unless_zero(&stit->kref)) { | |
199 | /* stit is being destroyed */ | |
200 | iommu_tce_table_put(tbl); | |
201 | return -ENOTTY; | |
202 | } | |
203 | /* | |
204 | * The table is already known to this KVM, we just increased | |
205 | * its KVM reference counter and can return. | |
206 | */ | |
207 | return 0; | |
208 | } | |
209 | ||
210 | stit = kzalloc(sizeof(*stit), GFP_KERNEL); | |
211 | if (!stit) { | |
212 | iommu_tce_table_put(tbl); | |
213 | return -ENOMEM; | |
214 | } | |
215 | ||
216 | stit->tbl = tbl; | |
217 | kref_init(&stit->kref); | |
218 | ||
219 | list_add_rcu(&stit->next, &stt->iommu_tables); | |
220 | ||
221 | return 0; | |
222 | } | |
223 | ||
366baf28 | 224 | static void release_spapr_tce_table(struct rcu_head *head) |
f31e65e1 | 225 | { |
366baf28 AK |
226 | struct kvmppc_spapr_tce_table *stt = container_of(head, |
227 | struct kvmppc_spapr_tce_table, rcu); | |
fe26e527 | 228 | unsigned long i, npages = kvmppc_tce_pages(stt->size); |
f31e65e1 | 229 | |
f8626985 | 230 | for (i = 0; i < npages; i++) |
f31e65e1 | 231 | __free_page(stt->pages[i]); |
f31e65e1 | 232 | |
366baf28 | 233 | kfree(stt); |
f31e65e1 BH |
234 | } |
235 | ||
16d5c39d | 236 | static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) |
f31e65e1 | 237 | { |
11bac800 | 238 | struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; |
f31e65e1 BH |
239 | struct page *page; |
240 | ||
fe26e527 | 241 | if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) |
f31e65e1 BH |
242 | return VM_FAULT_SIGBUS; |
243 | ||
244 | page = stt->pages[vmf->pgoff]; | |
245 | get_page(page); | |
246 | vmf->page = page; | |
247 | return 0; | |
248 | } | |
249 | ||
250 | static const struct vm_operations_struct kvm_spapr_tce_vm_ops = { | |
251 | .fault = kvm_spapr_tce_fault, | |
252 | }; | |
253 | ||
254 | static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) | |
255 | { | |
256 | vma->vm_ops = &kvm_spapr_tce_vm_ops; | |
257 | return 0; | |
258 | } | |
259 | ||
260 | static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) | |
261 | { | |
262 | struct kvmppc_spapr_tce_table *stt = filp->private_data; | |
121f80ba | 263 | struct kvmppc_spapr_tce_iommu_table *stit, *tmp; |
edd03602 | 264 | struct kvm *kvm = stt->kvm; |
f31e65e1 | 265 | |
edd03602 | 266 | mutex_lock(&kvm->lock); |
366baf28 | 267 | list_del_rcu(&stt->list); |
edd03602 | 268 | mutex_unlock(&kvm->lock); |
366baf28 | 269 | |
121f80ba AK |
270 | list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { |
271 | WARN_ON(!kref_read(&stit->kref)); | |
272 | while (1) { | |
273 | if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put)) | |
274 | break; | |
275 | } | |
276 | } | |
277 | ||
366baf28 AK |
278 | kvm_put_kvm(stt->kvm); |
279 | ||
f8626985 | 280 | kvmppc_account_memlimit( |
fe26e527 | 281 | kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false); |
366baf28 AK |
282 | call_rcu(&stt->rcu, release_spapr_tce_table); |
283 | ||
f31e65e1 BH |
284 | return 0; |
285 | } | |
286 | ||
75ef9de1 | 287 | static const struct file_operations kvm_spapr_tce_fops = { |
f31e65e1 BH |
288 | .mmap = kvm_spapr_tce_mmap, |
289 | .release = kvm_spapr_tce_release, | |
290 | }; | |
291 | ||
292 | long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | |
58ded420 | 293 | struct kvm_create_spapr_tce_64 *args) |
f31e65e1 BH |
294 | { |
295 | struct kvmppc_spapr_tce_table *stt = NULL; | |
47c5310a | 296 | struct kvmppc_spapr_tce_table *siter; |
76346cd9 | 297 | unsigned long npages, size = args->size; |
f31e65e1 BH |
298 | int ret = -ENOMEM; |
299 | int i; | |
300 | ||
e45719af AK |
301 | if (!args->size || args->page_shift < 12 || args->page_shift > 34 || |
302 | (args->offset + args->size > (ULLONG_MAX >> args->page_shift))) | |
58ded420 AK |
303 | return -EINVAL; |
304 | ||
fe26e527 | 305 | npages = kvmppc_tce_pages(size); |
f8626985 | 306 | ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); |
47c5310a PM |
307 | if (ret) |
308 | return ret; | |
f31e65e1 | 309 | |
5982f084 | 310 | ret = -ENOMEM; |
f31e65e1 BH |
311 | stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), |
312 | GFP_KERNEL); | |
313 | if (!stt) | |
47c5310a | 314 | goto fail_acct; |
f31e65e1 BH |
315 | |
316 | stt->liobn = args->liobn; | |
58ded420 AK |
317 | stt->page_shift = args->page_shift; |
318 | stt->offset = args->offset; | |
fe26e527 | 319 | stt->size = size; |
f31e65e1 | 320 | stt->kvm = kvm; |
121f80ba | 321 | INIT_LIST_HEAD_RCU(&stt->iommu_tables); |
f31e65e1 BH |
322 | |
323 | for (i = 0; i < npages; i++) { | |
324 | stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); | |
325 | if (!stt->pages[i]) | |
326 | goto fail; | |
327 | } | |
328 | ||
f31e65e1 | 329 | mutex_lock(&kvm->lock); |
47c5310a PM |
330 | |
331 | /* Check this LIOBN hasn't been previously allocated */ | |
332 | ret = 0; | |
333 | list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) { | |
334 | if (siter->liobn == args->liobn) { | |
335 | ret = -EBUSY; | |
336 | break; | |
337 | } | |
338 | } | |
339 | ||
716cb116 | 340 | kvm_get_kvm(kvm); |
edd03602 PM |
341 | if (!ret) |
342 | ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, | |
343 | stt, O_RDWR | O_CLOEXEC); | |
344 | ||
716cb116 | 345 | if (ret >= 0) |
47c5310a | 346 | list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); |
716cb116 AK |
347 | else |
348 | kvm_put_kvm(kvm); | |
f31e65e1 BH |
349 | |
350 | mutex_unlock(&kvm->lock); | |
351 | ||
edd03602 PM |
352 | if (ret >= 0) |
353 | return ret; | |
f31e65e1 | 354 | |
47c5310a PM |
355 | fail: |
356 | for (i = 0; i < npages; i++) | |
357 | if (stt->pages[i]) | |
358 | __free_page(stt->pages[i]); | |
359 | ||
360 | kfree(stt); | |
361 | fail_acct: | |
362 | kvmppc_account_memlimit(kvmppc_stt_pages(npages), false); | |
f31e65e1 BH |
363 | return ret; |
364 | } | |
d3695aa4 | 365 | |
42de7b9e AK |
366 | static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, |
367 | unsigned long tce) | |
368 | { | |
369 | unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); | |
370 | enum dma_data_direction dir = iommu_tce_direction(tce); | |
371 | struct kvmppc_spapr_tce_iommu_table *stit; | |
372 | unsigned long ua = 0; | |
373 | ||
374 | /* Allow userspace to poison TCE table */ | |
375 | if (dir == DMA_NONE) | |
376 | return H_SUCCESS; | |
377 | ||
378 | if (iommu_tce_check_gpa(stt->page_shift, gpa)) | |
379 | return H_TOO_HARD; | |
380 | ||
a3ac077b | 381 | if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL)) |
42de7b9e AK |
382 | return H_TOO_HARD; |
383 | ||
384 | list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { | |
385 | unsigned long hpa = 0; | |
386 | struct mm_iommu_table_group_mem_t *mem; | |
387 | long shift = stit->tbl->it_page_shift; | |
388 | ||
389 | mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); | |
390 | if (!mem) | |
391 | return H_TOO_HARD; | |
392 | ||
393 | if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) | |
394 | return H_TOO_HARD; | |
395 | } | |
396 | ||
397 | return H_SUCCESS; | |
398 | } | |
399 | ||
c10c21ef AK |
400 | static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl, |
401 | unsigned long entry) | |
121f80ba AK |
402 | { |
403 | unsigned long hpa = 0; | |
404 | enum dma_data_direction dir = DMA_NONE; | |
405 | ||
c10c21ef | 406 | iommu_tce_xchg(mm, tbl, entry, &hpa, &dir); |
121f80ba AK |
407 | } |
408 | ||
409 | static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, | |
410 | struct iommu_table *tbl, unsigned long entry) | |
411 | { | |
412 | struct mm_iommu_table_group_mem_t *mem = NULL; | |
413 | const unsigned long pgsize = 1ULL << tbl->it_page_shift; | |
6e301a8e | 414 | __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); |
121f80ba AK |
415 | |
416 | if (!pua) | |
6e301a8e | 417 | return H_SUCCESS; |
121f80ba | 418 | |
00a5c58d | 419 | mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize); |
121f80ba AK |
420 | if (!mem) |
421 | return H_TOO_HARD; | |
422 | ||
423 | mm_iommu_mapped_dec(mem); | |
424 | ||
00a5c58d | 425 | *pua = cpu_to_be64(0); |
121f80ba AK |
426 | |
427 | return H_SUCCESS; | |
428 | } | |
429 | ||
ca1fc489 | 430 | static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, |
121f80ba AK |
431 | struct iommu_table *tbl, unsigned long entry) |
432 | { | |
433 | enum dma_data_direction dir = DMA_NONE; | |
434 | unsigned long hpa = 0; | |
435 | long ret; | |
436 | ||
c10c21ef | 437 | if (WARN_ON_ONCE(iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir))) |
f7960e29 | 438 | return H_TOO_HARD; |
121f80ba AK |
439 | |
440 | if (dir == DMA_NONE) | |
441 | return H_SUCCESS; | |
442 | ||
443 | ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); | |
444 | if (ret != H_SUCCESS) | |
c10c21ef | 445 | iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir); |
121f80ba AK |
446 | |
447 | return ret; | |
448 | } | |
449 | ||
ca1fc489 AK |
450 | static long kvmppc_tce_iommu_unmap(struct kvm *kvm, |
451 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | |
452 | unsigned long entry) | |
453 | { | |
454 | unsigned long i, ret = H_SUCCESS; | |
455 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | |
456 | unsigned long io_entry = entry * subpages; | |
457 | ||
458 | for (i = 0; i < subpages; ++i) { | |
459 | ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i); | |
460 | if (ret != H_SUCCESS) | |
461 | break; | |
462 | } | |
463 | ||
464 | return ret; | |
465 | } | |
466 | ||
467 | long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, | |
121f80ba AK |
468 | unsigned long entry, unsigned long ua, |
469 | enum dma_data_direction dir) | |
470 | { | |
471 | long ret; | |
00a5c58d AK |
472 | unsigned long hpa; |
473 | __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); | |
121f80ba AK |
474 | struct mm_iommu_table_group_mem_t *mem; |
475 | ||
476 | if (!pua) | |
477 | /* it_userspace allocation might be delayed */ | |
478 | return H_TOO_HARD; | |
479 | ||
480 | mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift); | |
481 | if (!mem) | |
482 | /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ | |
483 | return H_TOO_HARD; | |
484 | ||
76fa4975 | 485 | if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) |
f7960e29 | 486 | return H_TOO_HARD; |
121f80ba AK |
487 | |
488 | if (mm_iommu_mapped_inc(mem)) | |
f7960e29 | 489 | return H_TOO_HARD; |
121f80ba | 490 | |
c10c21ef | 491 | ret = iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir); |
121f80ba AK |
492 | if (WARN_ON_ONCE(ret)) { |
493 | mm_iommu_mapped_dec(mem); | |
f7960e29 | 494 | return H_TOO_HARD; |
121f80ba AK |
495 | } |
496 | ||
497 | if (dir != DMA_NONE) | |
498 | kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); | |
499 | ||
00a5c58d | 500 | *pua = cpu_to_be64(ua); |
121f80ba AK |
501 | |
502 | return 0; | |
503 | } | |
504 | ||
ca1fc489 AK |
505 | static long kvmppc_tce_iommu_map(struct kvm *kvm, |
506 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | |
507 | unsigned long entry, unsigned long ua, | |
508 | enum dma_data_direction dir) | |
509 | { | |
510 | unsigned long i, pgoff, ret = H_SUCCESS; | |
511 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | |
512 | unsigned long io_entry = entry * subpages; | |
513 | ||
514 | for (i = 0, pgoff = 0; i < subpages; | |
515 | ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { | |
516 | ||
517 | ret = kvmppc_tce_iommu_do_map(kvm, tbl, | |
518 | io_entry + i, ua + pgoff, dir); | |
519 | if (ret != H_SUCCESS) | |
520 | break; | |
521 | } | |
522 | ||
523 | return ret; | |
524 | } | |
525 | ||
31217db7 AK |
526 | long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
527 | unsigned long ioba, unsigned long tce) | |
528 | { | |
503bfcbe | 529 | struct kvmppc_spapr_tce_table *stt; |
121f80ba AK |
530 | long ret, idx; |
531 | struct kvmppc_spapr_tce_iommu_table *stit; | |
532 | unsigned long entry, ua = 0; | |
533 | enum dma_data_direction dir; | |
31217db7 AK |
534 | |
535 | /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ | |
536 | /* liobn, ioba, tce); */ | |
537 | ||
503bfcbe | 538 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
31217db7 AK |
539 | if (!stt) |
540 | return H_TOO_HARD; | |
541 | ||
542 | ret = kvmppc_ioba_validate(stt, ioba, 1); | |
543 | if (ret != H_SUCCESS) | |
544 | return ret; | |
545 | ||
345077c8 AK |
546 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
547 | ||
31217db7 AK |
548 | ret = kvmppc_tce_validate(stt, tce); |
549 | if (ret != H_SUCCESS) | |
345077c8 | 550 | goto unlock_exit; |
31217db7 | 551 | |
121f80ba | 552 | dir = iommu_tce_direction(tce); |
8f6a9f0d | 553 | |
a3ac077b | 554 | if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) { |
8f6a9f0d AK |
555 | ret = H_PARAMETER; |
556 | goto unlock_exit; | |
557 | } | |
121f80ba AK |
558 | |
559 | entry = ioba >> stt->page_shift; | |
560 | ||
561 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | |
8f6a9f0d | 562 | if (dir == DMA_NONE) |
ca1fc489 | 563 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, |
121f80ba | 564 | stit->tbl, entry); |
8f6a9f0d | 565 | else |
ca1fc489 | 566 | ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, |
121f80ba | 567 | entry, ua, dir); |
121f80ba | 568 | |
2691f0ff | 569 | if (ret != H_SUCCESS) { |
c10c21ef | 570 | kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); |
8f6a9f0d | 571 | goto unlock_exit; |
2691f0ff | 572 | } |
121f80ba AK |
573 | } |
574 | ||
575 | kvmppc_tce_put(stt, entry, tce); | |
31217db7 | 576 | |
8f6a9f0d AK |
577 | unlock_exit: |
578 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
579 | ||
580 | return ret; | |
31217db7 AK |
581 | } |
582 | EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); | |
583 | ||
d3695aa4 AK |
584 | long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, |
585 | unsigned long liobn, unsigned long ioba, | |
586 | unsigned long tce_list, unsigned long npages) | |
587 | { | |
588 | struct kvmppc_spapr_tce_table *stt; | |
589 | long i, ret = H_SUCCESS, idx; | |
590 | unsigned long entry, ua = 0; | |
f8750513 DA |
591 | u64 __user *tces; |
592 | u64 tce; | |
121f80ba | 593 | struct kvmppc_spapr_tce_iommu_table *stit; |
d3695aa4 | 594 | |
503bfcbe | 595 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
d3695aa4 AK |
596 | if (!stt) |
597 | return H_TOO_HARD; | |
598 | ||
fe26e527 | 599 | entry = ioba >> stt->page_shift; |
d3695aa4 AK |
600 | /* |
601 | * SPAPR spec says that the maximum size of the list is 512 TCEs | |
602 | * so the whole table fits in 4K page | |
603 | */ | |
604 | if (npages > 512) | |
605 | return H_PARAMETER; | |
606 | ||
607 | if (tce_list & (SZ_4K - 1)) | |
608 | return H_PARAMETER; | |
609 | ||
610 | ret = kvmppc_ioba_validate(stt, ioba, npages); | |
611 | if (ret != H_SUCCESS) | |
612 | return ret; | |
613 | ||
614 | idx = srcu_read_lock(&vcpu->kvm->srcu); | |
a3ac077b | 615 | if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) { |
d3695aa4 AK |
616 | ret = H_TOO_HARD; |
617 | goto unlock_exit; | |
618 | } | |
619 | tces = (u64 __user *) ua; | |
620 | ||
621 | for (i = 0; i < npages; ++i) { | |
622 | if (get_user(tce, tces + i)) { | |
623 | ret = H_TOO_HARD; | |
624 | goto unlock_exit; | |
625 | } | |
626 | tce = be64_to_cpu(tce); | |
627 | ||
628 | ret = kvmppc_tce_validate(stt, tce); | |
629 | if (ret != H_SUCCESS) | |
630 | goto unlock_exit; | |
e199ad2b AK |
631 | } |
632 | ||
633 | for (i = 0; i < npages; ++i) { | |
634 | /* | |
635 | * This looks unsafe, because we validate, then regrab | |
636 | * the TCE from userspace which could have been changed by | |
637 | * another thread. | |
638 | * | |
639 | * But it actually is safe, because the relevant checks will be | |
640 | * re-executed in the following code. If userspace tries to | |
641 | * change this dodgily it will result in a messier failure mode | |
642 | * but won't threaten the host. | |
643 | */ | |
644 | if (get_user(tce, tces + i)) { | |
645 | ret = H_TOO_HARD; | |
646 | goto unlock_exit; | |
647 | } | |
648 | tce = be64_to_cpu(tce); | |
d3695aa4 | 649 | |
a3ac077b | 650 | if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) |
121f80ba AK |
651 | return H_PARAMETER; |
652 | ||
653 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | |
ca1fc489 | 654 | ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, |
121f80ba AK |
655 | stit->tbl, entry + i, ua, |
656 | iommu_tce_direction(tce)); | |
657 | ||
2691f0ff | 658 | if (ret != H_SUCCESS) { |
c10c21ef AK |
659 | kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, |
660 | entry); | |
121f80ba | 661 | goto unlock_exit; |
2691f0ff | 662 | } |
121f80ba AK |
663 | } |
664 | ||
d3695aa4 AK |
665 | kvmppc_tce_put(stt, entry + i, tce); |
666 | } | |
667 | ||
668 | unlock_exit: | |
669 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
670 | ||
671 | return ret; | |
672 | } | |
673 | EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect); | |
31217db7 AK |
674 | |
675 | long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, | |
676 | unsigned long liobn, unsigned long ioba, | |
677 | unsigned long tce_value, unsigned long npages) | |
678 | { | |
679 | struct kvmppc_spapr_tce_table *stt; | |
680 | long i, ret; | |
121f80ba | 681 | struct kvmppc_spapr_tce_iommu_table *stit; |
31217db7 | 682 | |
503bfcbe | 683 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
31217db7 AK |
684 | if (!stt) |
685 | return H_TOO_HARD; | |
686 | ||
687 | ret = kvmppc_ioba_validate(stt, ioba, npages); | |
688 | if (ret != H_SUCCESS) | |
689 | return ret; | |
690 | ||
691 | /* Check permission bits only to allow userspace poison TCE for debug */ | |
692 | if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) | |
693 | return H_PARAMETER; | |
694 | ||
121f80ba | 695 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
c6b61661 | 696 | unsigned long entry = ioba >> stt->page_shift; |
121f80ba AK |
697 | |
698 | for (i = 0; i < npages; ++i) { | |
ca1fc489 | 699 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, |
121f80ba AK |
700 | stit->tbl, entry + i); |
701 | ||
702 | if (ret == H_SUCCESS) | |
703 | continue; | |
704 | ||
705 | if (ret == H_TOO_HARD) | |
706 | return ret; | |
707 | ||
708 | WARN_ON_ONCE(1); | |
c10c21ef | 709 | kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); |
121f80ba AK |
710 | } |
711 | } | |
712 | ||
31217db7 AK |
713 | for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) |
714 | kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); | |
715 | ||
716 | return H_SUCCESS; | |
717 | } | |
718 | EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); |