Commit | Line | Data |
---|---|---|
69d3a84a HD |
1 | /* |
2 | * omap iommu: simple virtual address space management | |
3 | * | |
4 | * Copyright (C) 2008-2009 Nokia Corporation | |
5 | * | |
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/err.h> | |
14 | #include <linux/vmalloc.h> | |
15 | #include <linux/device.h> | |
16 | #include <linux/scatterlist.h> | |
17 | ||
18 | #include <asm/cacheflush.h> | |
19 | #include <asm/mach/map.h> | |
20 | ||
ce491cf8 TL |
21 | #include <plat/iommu.h> |
22 | #include <plat/iovmm.h> | |
69d3a84a HD |
23 | |
24 | #include "iopgtable.h" | |
25 | ||
26 | /* | |
27 | * A device driver needs to create address mappings between: | |
28 | * | |
29 | * - iommu/device address | |
30 | * - physical address | |
31 | * - mpu virtual address | |
32 | * | |
33 | * There are 4 possible patterns for them: | |
34 | * | |
35 | * |iova/ mapping iommu_ page | |
36 | * | da pa va (d)-(p)-(v) function type | |
37 | * --------------------------------------------------------------------------- | |
38 | * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s | |
39 | * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s | |
40 | * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s | |
41 | * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n* | |
42 | * | |
43 | * | |
44 | * 'iova': device iommu virtual address | |
45 | * 'da': alias of 'iova' | |
46 | * 'pa': physical address | |
47 | * 'va': mpu virtual address | |
48 | * | |
49 | * 'c': contiguous memory area | |
ba6a1179 | 50 | * 'd': discontiguous memory area |
69d3a84a HD |
51 | * 'a': anonymous memory allocation |
52 | * '()': optional feature | |
53 | * | |
54 | * 'n': a normal page(4KB) size is used. | |
55 | * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used. | |
56 | * | |
57 | * '*': not yet, but feasible. | |
58 | */ | |
59 | ||
60 | static struct kmem_cache *iovm_area_cachep; | |
61 | ||
62 | /* return total bytes of sg buffers */ | |
63 | static size_t sgtable_len(const struct sg_table *sgt) | |
64 | { | |
65 | unsigned int i, total = 0; | |
66 | struct scatterlist *sg; | |
67 | ||
68 | if (!sgt) | |
69 | return 0; | |
70 | ||
71 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
72 | size_t bytes; | |
73 | ||
74 | bytes = sg_dma_len(sg); | |
75 | ||
76 | if (!iopgsz_ok(bytes)) { | |
77 | pr_err("%s: sg[%d] not iommu pagesize(%x)\n", | |
78 | __func__, i, bytes); | |
79 | return 0; | |
80 | } | |
81 | ||
82 | total += bytes; | |
83 | } | |
84 | ||
85 | return total; | |
86 | } | |
87 | #define sgtable_ok(x) (!!sgtable_len(x)) | |
88 | ||
89 | /* | |
90 | * calculate the optimal number sg elements from total bytes based on | |
91 | * iommu superpages | |
92 | */ | |
93 | static unsigned int sgtable_nents(size_t bytes) | |
94 | { | |
95 | int i; | |
96 | unsigned int nr_entries; | |
97 | const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | |
98 | ||
99 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { | |
100 | pr_err("%s: wrong size %08x\n", __func__, bytes); | |
101 | return 0; | |
102 | } | |
103 | ||
104 | nr_entries = 0; | |
105 | for (i = 0; i < ARRAY_SIZE(pagesize); i++) { | |
106 | if (bytes >= pagesize[i]) { | |
107 | nr_entries += (bytes / pagesize[i]); | |
108 | bytes %= pagesize[i]; | |
109 | } | |
110 | } | |
111 | BUG_ON(bytes); | |
112 | ||
113 | return nr_entries; | |
114 | } | |
115 | ||
116 | /* allocate and initialize sg_table header(a kind of 'superblock') */ | |
117 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) | |
118 | { | |
119 | unsigned int nr_entries; | |
120 | int err; | |
121 | struct sg_table *sgt; | |
122 | ||
123 | if (!bytes) | |
124 | return ERR_PTR(-EINVAL); | |
125 | ||
126 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) | |
127 | return ERR_PTR(-EINVAL); | |
128 | ||
129 | /* FIXME: IOVMF_DA_FIXED should support 'superpages' */ | |
130 | if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) { | |
131 | nr_entries = sgtable_nents(bytes); | |
132 | if (!nr_entries) | |
133 | return ERR_PTR(-EINVAL); | |
134 | } else | |
135 | nr_entries = bytes / PAGE_SIZE; | |
136 | ||
137 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | |
138 | if (!sgt) | |
139 | return ERR_PTR(-ENOMEM); | |
140 | ||
141 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); | |
142 | if (err) | |
143 | return ERR_PTR(err); | |
144 | ||
145 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); | |
146 | ||
147 | return sgt; | |
148 | } | |
149 | ||
150 | /* free sg_table header(a kind of superblock) */ | |
151 | static void sgtable_free(struct sg_table *sgt) | |
152 | { | |
153 | if (!sgt) | |
154 | return; | |
155 | ||
156 | sg_free_table(sgt); | |
157 | kfree(sgt); | |
158 | ||
159 | pr_debug("%s: sgt:%p\n", __func__, sgt); | |
160 | } | |
161 | ||
162 | /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ | |
163 | static void *vmap_sg(const struct sg_table *sgt) | |
164 | { | |
165 | u32 va; | |
166 | size_t total; | |
167 | unsigned int i; | |
168 | struct scatterlist *sg; | |
169 | struct vm_struct *new; | |
170 | const struct mem_type *mtype; | |
171 | ||
172 | mtype = get_mem_type(MT_DEVICE); | |
173 | if (!mtype) | |
174 | return ERR_PTR(-EINVAL); | |
175 | ||
176 | total = sgtable_len(sgt); | |
177 | if (!total) | |
178 | return ERR_PTR(-EINVAL); | |
179 | ||
180 | new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); | |
181 | if (!new) | |
182 | return ERR_PTR(-ENOMEM); | |
183 | va = (u32)new->addr; | |
184 | ||
185 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
186 | size_t bytes; | |
187 | u32 pa; | |
188 | int err; | |
189 | ||
190 | pa = sg_phys(sg); | |
191 | bytes = sg_dma_len(sg); | |
192 | ||
193 | BUG_ON(bytes != PAGE_SIZE); | |
194 | ||
195 | err = ioremap_page(va, pa, mtype); | |
196 | if (err) | |
197 | goto err_out; | |
198 | ||
199 | va += bytes; | |
200 | } | |
201 | ||
6716bd06 SP |
202 | flush_cache_vmap((unsigned long)new->addr, |
203 | (unsigned long)(new->addr + total)); | |
69d3a84a HD |
204 | return new->addr; |
205 | ||
206 | err_out: | |
207 | WARN_ON(1); /* FIXME: cleanup some mpu mappings */ | |
208 | vunmap(new->addr); | |
209 | return ERR_PTR(-EAGAIN); | |
210 | } | |
211 | ||
212 | static inline void vunmap_sg(const void *va) | |
213 | { | |
214 | vunmap(va); | |
215 | } | |
216 | ||
217 | static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) | |
218 | { | |
219 | struct iovm_struct *tmp; | |
220 | ||
221 | list_for_each_entry(tmp, &obj->mmap, list) { | |
222 | if ((da >= tmp->da_start) && (da < tmp->da_end)) { | |
223 | size_t len; | |
224 | ||
225 | len = tmp->da_end - tmp->da_start; | |
226 | ||
227 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", | |
228 | __func__, tmp->da_start, da, tmp->da_end, len, | |
229 | tmp->flags); | |
230 | ||
231 | return tmp; | |
232 | } | |
233 | } | |
234 | ||
235 | return NULL; | |
236 | } | |
237 | ||
238 | /** | |
239 | * find_iovm_area - find iovma which includes @da | |
240 | * @da: iommu device virtual address | |
241 | * | |
242 | * Find the existing iovma starting at @da | |
243 | */ | |
244 | struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) | |
245 | { | |
246 | struct iovm_struct *area; | |
247 | ||
248 | mutex_lock(&obj->mmap_lock); | |
249 | area = __find_iovm_area(obj, da); | |
250 | mutex_unlock(&obj->mmap_lock); | |
251 | ||
252 | return area; | |
253 | } | |
254 | EXPORT_SYMBOL_GPL(find_iovm_area); | |
255 | ||
256 | /* | |
257 | * This finds the hole(area) which fits the requested address and len | |
258 | * in iovmas mmap, and returns the new allocated iovma. | |
259 | */ | |
260 | static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, | |
261 | size_t bytes, u32 flags) | |
262 | { | |
263 | struct iovm_struct *new, *tmp; | |
264 | u32 start, prev_end, alignement; | |
265 | ||
266 | if (!obj || !bytes) | |
267 | return ERR_PTR(-EINVAL); | |
268 | ||
269 | start = da; | |
270 | alignement = PAGE_SIZE; | |
271 | ||
272 | if (flags & IOVMF_DA_ANON) { | |
273 | /* | |
274 | * Reserve the first page for NULL | |
275 | */ | |
276 | start = PAGE_SIZE; | |
277 | if (flags & IOVMF_LINEAR) | |
278 | alignement = iopgsz_max(bytes); | |
279 | start = roundup(start, alignement); | |
280 | } | |
281 | ||
282 | tmp = NULL; | |
283 | if (list_empty(&obj->mmap)) | |
284 | goto found; | |
285 | ||
286 | prev_end = 0; | |
287 | list_for_each_entry(tmp, &obj->mmap, list) { | |
288 | ||
289 | if ((prev_end <= start) && (start + bytes < tmp->da_start)) | |
290 | goto found; | |
291 | ||
292 | if (flags & IOVMF_DA_ANON) | |
293 | start = roundup(tmp->da_end, alignement); | |
294 | ||
295 | prev_end = tmp->da_end; | |
296 | } | |
297 | ||
298 | if ((start >= prev_end) && (ULONG_MAX - start >= bytes)) | |
299 | goto found; | |
300 | ||
301 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", | |
302 | __func__, da, bytes, flags); | |
303 | ||
304 | return ERR_PTR(-EINVAL); | |
305 | ||
306 | found: | |
307 | new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); | |
308 | if (!new) | |
309 | return ERR_PTR(-ENOMEM); | |
310 | ||
311 | new->iommu = obj; | |
312 | new->da_start = start; | |
313 | new->da_end = start + bytes; | |
314 | new->flags = flags; | |
315 | ||
316 | /* | |
317 | * keep ascending order of iovmas | |
318 | */ | |
319 | if (tmp) | |
320 | list_add_tail(&new->list, &tmp->list); | |
321 | else | |
322 | list_add(&new->list, &obj->mmap); | |
323 | ||
324 | dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", | |
325 | __func__, new->da_start, start, new->da_end, bytes, flags); | |
326 | ||
327 | return new; | |
328 | } | |
329 | ||
330 | static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) | |
331 | { | |
332 | size_t bytes; | |
333 | ||
334 | BUG_ON(!obj || !area); | |
335 | ||
336 | bytes = area->da_end - area->da_start; | |
337 | ||
338 | dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", | |
339 | __func__, area->da_start, area->da_end, bytes, area->flags); | |
340 | ||
341 | list_del(&area->list); | |
342 | kmem_cache_free(iovm_area_cachep, area); | |
343 | } | |
344 | ||
345 | /** | |
346 | * da_to_va - convert (d) to (v) | |
347 | * @obj: objective iommu | |
348 | * @da: iommu device virtual address | |
349 | * @va: mpu virtual address | |
350 | * | |
351 | * Returns mpu virtual addr which corresponds to a given device virtual addr | |
352 | */ | |
353 | void *da_to_va(struct iommu *obj, u32 da) | |
354 | { | |
355 | void *va = NULL; | |
356 | struct iovm_struct *area; | |
357 | ||
358 | mutex_lock(&obj->mmap_lock); | |
359 | ||
360 | area = __find_iovm_area(obj, da); | |
361 | if (!area) { | |
362 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | |
363 | goto out; | |
364 | } | |
365 | va = area->va; | |
69d3a84a | 366 | out: |
26548900 DW |
367 | mutex_unlock(&obj->mmap_lock); |
368 | ||
69d3a84a HD |
369 | return va; |
370 | } | |
371 | EXPORT_SYMBOL_GPL(da_to_va); | |
372 | ||
373 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | |
374 | { | |
375 | unsigned int i; | |
376 | struct scatterlist *sg; | |
377 | void *va = _va; | |
378 | void *va_end; | |
379 | ||
380 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
381 | struct page *pg; | |
382 | const size_t bytes = PAGE_SIZE; | |
383 | ||
384 | /* | |
385 | * iommu 'superpage' isn't supported with 'iommu_vmalloc()' | |
386 | */ | |
387 | pg = vmalloc_to_page(va); | |
388 | BUG_ON(!pg); | |
389 | sg_set_page(sg, pg, bytes, 0); | |
390 | ||
391 | va += bytes; | |
392 | } | |
393 | ||
394 | va_end = _va + PAGE_SIZE * i; | |
6716bd06 | 395 | flush_cache_vmap((unsigned long)_va, (unsigned long)va_end); |
69d3a84a HD |
396 | } |
397 | ||
398 | static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | |
399 | { | |
400 | /* | |
401 | * Actually this is not necessary at all, just exists for | |
ba6a1179 | 402 | * consistency of the code readability. |
69d3a84a HD |
403 | */ |
404 | BUG_ON(!sgt); | |
405 | } | |
406 | ||
407 | static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) | |
408 | { | |
409 | unsigned int i; | |
410 | struct scatterlist *sg; | |
411 | void *va; | |
412 | ||
413 | va = phys_to_virt(pa); | |
414 | ||
415 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
416 | size_t bytes; | |
417 | ||
418 | bytes = iopgsz_max(len); | |
419 | ||
420 | BUG_ON(!iopgsz_ok(bytes)); | |
421 | ||
422 | sg_set_buf(sg, phys_to_virt(pa), bytes); | |
423 | /* | |
424 | * 'pa' is cotinuous(linear). | |
425 | */ | |
426 | pa += bytes; | |
427 | len -= bytes; | |
428 | } | |
429 | BUG_ON(len); | |
430 | ||
431 | clean_dcache_area(va, len); | |
432 | } | |
433 | ||
434 | static inline void sgtable_drain_kmalloc(struct sg_table *sgt) | |
435 | { | |
436 | /* | |
437 | * Actually this is not necessary at all, just exists for | |
ba6a1179 | 438 | * consistency of the code readability |
69d3a84a HD |
439 | */ |
440 | BUG_ON(!sgt); | |
441 | } | |
442 | ||
443 | /* create 'da' <-> 'pa' mapping from 'sgt' */ | |
444 | static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, | |
445 | const struct sg_table *sgt, u32 flags) | |
446 | { | |
447 | int err; | |
448 | unsigned int i, j; | |
449 | struct scatterlist *sg; | |
450 | u32 da = new->da_start; | |
451 | ||
452 | if (!obj || !new || !sgt) | |
453 | return -EINVAL; | |
454 | ||
455 | BUG_ON(!sgtable_ok(sgt)); | |
456 | ||
457 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
458 | u32 pa; | |
459 | int pgsz; | |
460 | size_t bytes; | |
461 | struct iotlb_entry e; | |
462 | ||
463 | pa = sg_phys(sg); | |
464 | bytes = sg_dma_len(sg); | |
465 | ||
466 | flags &= ~IOVMF_PGSZ_MASK; | |
467 | pgsz = bytes_to_iopgsz(bytes); | |
468 | if (pgsz < 0) | |
469 | goto err_out; | |
470 | flags |= pgsz; | |
471 | ||
472 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, | |
473 | i, da, pa, bytes); | |
474 | ||
475 | iotlb_init_entry(&e, da, pa, flags); | |
476 | err = iopgtable_store_entry(obj, &e); | |
477 | if (err) | |
478 | goto err_out; | |
479 | ||
480 | da += bytes; | |
481 | } | |
482 | return 0; | |
483 | ||
484 | err_out: | |
485 | da = new->da_start; | |
486 | ||
487 | for_each_sg(sgt->sgl, sg, i, j) { | |
488 | size_t bytes; | |
489 | ||
490 | bytes = iopgtable_clear_entry(obj, da); | |
491 | ||
492 | BUG_ON(!iopgsz_ok(bytes)); | |
493 | ||
494 | da += bytes; | |
495 | } | |
496 | return err; | |
497 | } | |
498 | ||
499 | /* release 'da' <-> 'pa' mapping */ | |
500 | static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) | |
501 | { | |
502 | u32 start; | |
503 | size_t total = area->da_end - area->da_start; | |
504 | ||
505 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); | |
506 | ||
507 | start = area->da_start; | |
508 | while (total > 0) { | |
509 | size_t bytes; | |
510 | ||
511 | bytes = iopgtable_clear_entry(obj, start); | |
512 | if (bytes == 0) | |
513 | bytes = PAGE_SIZE; | |
514 | else | |
515 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", | |
516 | __func__, start, bytes, area->flags); | |
517 | ||
518 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | |
519 | ||
520 | total -= bytes; | |
521 | start += bytes; | |
522 | } | |
523 | BUG_ON(total); | |
524 | } | |
525 | ||
526 | /* template function for all unmapping */ | |
527 | static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, | |
528 | void (*fn)(const void *), u32 flags) | |
529 | { | |
530 | struct sg_table *sgt = NULL; | |
531 | struct iovm_struct *area; | |
532 | ||
533 | if (!IS_ALIGNED(da, PAGE_SIZE)) { | |
534 | dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); | |
535 | return NULL; | |
536 | } | |
537 | ||
538 | mutex_lock(&obj->mmap_lock); | |
539 | ||
540 | area = __find_iovm_area(obj, da); | |
541 | if (!area) { | |
542 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | |
543 | goto out; | |
544 | } | |
545 | ||
546 | if ((area->flags & flags) != flags) { | |
547 | dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, | |
548 | area->flags); | |
549 | goto out; | |
550 | } | |
551 | sgt = (struct sg_table *)area->sgt; | |
552 | ||
553 | unmap_iovm_area(obj, area); | |
554 | ||
555 | fn(area->va); | |
556 | ||
557 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, | |
558 | area->da_start, da, area->da_end, | |
559 | area->da_end - area->da_start, area->flags); | |
560 | ||
561 | free_iovm_area(obj, area); | |
562 | out: | |
563 | mutex_unlock(&obj->mmap_lock); | |
564 | ||
565 | return sgt; | |
566 | } | |
567 | ||
568 | static u32 map_iommu_region(struct iommu *obj, u32 da, | |
569 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | |
570 | { | |
571 | int err = -ENOMEM; | |
572 | struct iovm_struct *new; | |
573 | ||
574 | mutex_lock(&obj->mmap_lock); | |
575 | ||
576 | new = alloc_iovm_area(obj, da, bytes, flags); | |
577 | if (IS_ERR(new)) { | |
578 | err = PTR_ERR(new); | |
579 | goto err_alloc_iovma; | |
580 | } | |
581 | new->va = va; | |
582 | new->sgt = sgt; | |
583 | ||
584 | if (map_iovm_area(obj, new, sgt, new->flags)) | |
585 | goto err_map; | |
586 | ||
587 | mutex_unlock(&obj->mmap_lock); | |
588 | ||
589 | dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", | |
590 | __func__, new->da_start, bytes, new->flags, va); | |
591 | ||
592 | return new->da_start; | |
593 | ||
594 | err_map: | |
595 | free_iovm_area(obj, new); | |
596 | err_alloc_iovma: | |
597 | mutex_unlock(&obj->mmap_lock); | |
598 | return err; | |
599 | } | |
600 | ||
601 | static inline u32 __iommu_vmap(struct iommu *obj, u32 da, | |
602 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | |
603 | { | |
604 | return map_iommu_region(obj, da, sgt, va, bytes, flags); | |
605 | } | |
606 | ||
607 | /** | |
608 | * iommu_vmap - (d)-(p)-(v) address mapper | |
609 | * @obj: objective iommu | |
610 | * @sgt: address of scatter gather table | |
611 | * @flags: iovma and page property | |
612 | * | |
613 | * Creates 1-n-1 mapping with given @sgt and returns @da. | |
614 | * All @sgt element must be io page size aligned. | |
615 | */ | |
616 | u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, | |
617 | u32 flags) | |
618 | { | |
619 | size_t bytes; | |
620 | void *va; | |
621 | ||
622 | if (!obj || !obj->dev || !sgt) | |
623 | return -EINVAL; | |
624 | ||
625 | bytes = sgtable_len(sgt); | |
626 | if (!bytes) | |
627 | return -EINVAL; | |
628 | bytes = PAGE_ALIGN(bytes); | |
629 | ||
630 | va = vmap_sg(sgt); | |
631 | if (IS_ERR(va)) | |
632 | return PTR_ERR(va); | |
633 | ||
634 | flags &= IOVMF_HW_MASK; | |
635 | flags |= IOVMF_DISCONT; | |
636 | flags |= IOVMF_MMIO; | |
637 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | |
638 | ||
639 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | |
640 | if (IS_ERR_VALUE(da)) | |
641 | vunmap_sg(va); | |
642 | ||
643 | return da; | |
644 | } | |
645 | EXPORT_SYMBOL_GPL(iommu_vmap); | |
646 | ||
647 | /** | |
648 | * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' | |
649 | * @obj: objective iommu | |
650 | * @da: iommu device virtual address | |
651 | * | |
652 | * Free the iommu virtually contiguous memory area starting at | |
653 | * @da, which was returned by 'iommu_vmap()'. | |
654 | */ | |
655 | struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) | |
656 | { | |
657 | struct sg_table *sgt; | |
658 | /* | |
659 | * 'sgt' is allocated before 'iommu_vmalloc()' is called. | |
660 | * Just returns 'sgt' to the caller to free | |
661 | */ | |
662 | sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); | |
663 | if (!sgt) | |
664 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
665 | return sgt; | |
666 | } | |
667 | EXPORT_SYMBOL_GPL(iommu_vunmap); | |
668 | ||
669 | /** | |
670 | * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper | |
671 | * @obj: objective iommu | |
672 | * @da: contiguous iommu virtual memory | |
673 | * @bytes: allocation size | |
674 | * @flags: iovma and page property | |
675 | * | |
676 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns | |
677 | * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set. | |
678 | */ | |
679 | u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | |
680 | { | |
681 | void *va; | |
682 | struct sg_table *sgt; | |
683 | ||
684 | if (!obj || !obj->dev || !bytes) | |
685 | return -EINVAL; | |
686 | ||
687 | bytes = PAGE_ALIGN(bytes); | |
688 | ||
689 | va = vmalloc(bytes); | |
690 | if (!va) | |
691 | return -ENOMEM; | |
692 | ||
693 | sgt = sgtable_alloc(bytes, flags); | |
694 | if (IS_ERR(sgt)) { | |
695 | da = PTR_ERR(sgt); | |
696 | goto err_sgt_alloc; | |
697 | } | |
698 | sgtable_fill_vmalloc(sgt, va); | |
699 | ||
700 | flags &= IOVMF_HW_MASK; | |
701 | flags |= IOVMF_DISCONT; | |
702 | flags |= IOVMF_ALLOC; | |
703 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | |
704 | ||
705 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | |
706 | if (IS_ERR_VALUE(da)) | |
707 | goto err_iommu_vmap; | |
708 | ||
709 | return da; | |
710 | ||
711 | err_iommu_vmap: | |
712 | sgtable_drain_vmalloc(sgt); | |
713 | sgtable_free(sgt); | |
714 | err_sgt_alloc: | |
715 | vfree(va); | |
716 | return da; | |
717 | } | |
718 | EXPORT_SYMBOL_GPL(iommu_vmalloc); | |
719 | ||
720 | /** | |
721 | * iommu_vfree - release memory allocated by 'iommu_vmalloc()' | |
722 | * @obj: objective iommu | |
723 | * @da: iommu device virtual address | |
724 | * | |
725 | * Frees the iommu virtually continuous memory area starting at | |
726 | * @da, as obtained from 'iommu_vmalloc()'. | |
727 | */ | |
728 | void iommu_vfree(struct iommu *obj, const u32 da) | |
729 | { | |
730 | struct sg_table *sgt; | |
731 | ||
732 | sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); | |
733 | if (!sgt) | |
734 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
735 | sgtable_free(sgt); | |
736 | } | |
737 | EXPORT_SYMBOL_GPL(iommu_vfree); | |
738 | ||
739 | static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, | |
740 | size_t bytes, u32 flags) | |
741 | { | |
742 | struct sg_table *sgt; | |
743 | ||
744 | sgt = sgtable_alloc(bytes, flags); | |
745 | if (IS_ERR(sgt)) | |
746 | return PTR_ERR(sgt); | |
747 | ||
748 | sgtable_fill_kmalloc(sgt, pa, bytes); | |
749 | ||
750 | da = map_iommu_region(obj, da, sgt, va, bytes, flags); | |
751 | if (IS_ERR_VALUE(da)) { | |
752 | sgtable_drain_kmalloc(sgt); | |
753 | sgtable_free(sgt); | |
754 | } | |
755 | ||
756 | return da; | |
757 | } | |
758 | ||
759 | /** | |
760 | * iommu_kmap - (d)-(p)-(v) address mapper | |
761 | * @obj: objective iommu | |
762 | * @da: contiguous iommu virtual memory | |
763 | * @pa: contiguous physical memory | |
764 | * @flags: iovma and page property | |
765 | * | |
766 | * Creates 1-1-1 mapping and returns @da again, which can be | |
767 | * adjusted if 'IOVMF_DA_ANON' is set. | |
768 | */ | |
769 | u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, | |
770 | u32 flags) | |
771 | { | |
772 | void *va; | |
773 | ||
774 | if (!obj || !obj->dev || !bytes) | |
775 | return -EINVAL; | |
776 | ||
777 | bytes = PAGE_ALIGN(bytes); | |
778 | ||
779 | va = ioremap(pa, bytes); | |
780 | if (!va) | |
781 | return -ENOMEM; | |
782 | ||
783 | flags &= IOVMF_HW_MASK; | |
784 | flags |= IOVMF_LINEAR; | |
785 | flags |= IOVMF_MMIO; | |
786 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | |
787 | ||
788 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | |
789 | if (IS_ERR_VALUE(da)) | |
790 | iounmap(va); | |
791 | ||
792 | return da; | |
793 | } | |
794 | EXPORT_SYMBOL_GPL(iommu_kmap); | |
795 | ||
796 | /** | |
797 | * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()' | |
798 | * @obj: objective iommu | |
799 | * @da: iommu device virtual address | |
800 | * | |
801 | * Frees the iommu virtually contiguous memory area starting at | |
802 | * @da, which was passed to and was returned by'iommu_kmap()'. | |
803 | */ | |
804 | void iommu_kunmap(struct iommu *obj, u32 da) | |
805 | { | |
806 | struct sg_table *sgt; | |
807 | typedef void (*func_t)(const void *); | |
808 | ||
809 | sgt = unmap_vm_area(obj, da, (func_t)__iounmap, | |
810 | IOVMF_LINEAR | IOVMF_MMIO); | |
811 | if (!sgt) | |
812 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
813 | sgtable_free(sgt); | |
814 | } | |
815 | EXPORT_SYMBOL_GPL(iommu_kunmap); | |
816 | ||
817 | /** | |
818 | * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper | |
819 | * @obj: objective iommu | |
820 | * @da: contiguous iommu virtual memory | |
821 | * @bytes: bytes for allocation | |
822 | * @flags: iovma and page property | |
823 | * | |
824 | * Allocate @bytes linearly and creates 1-1-1 mapping and returns | |
825 | * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set. | |
826 | */ | |
827 | u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | |
828 | { | |
829 | void *va; | |
830 | u32 pa; | |
831 | ||
832 | if (!obj || !obj->dev || !bytes) | |
833 | return -EINVAL; | |
834 | ||
835 | bytes = PAGE_ALIGN(bytes); | |
836 | ||
837 | va = kmalloc(bytes, GFP_KERNEL | GFP_DMA); | |
838 | if (!va) | |
839 | return -ENOMEM; | |
840 | pa = virt_to_phys(va); | |
841 | ||
842 | flags &= IOVMF_HW_MASK; | |
843 | flags |= IOVMF_LINEAR; | |
844 | flags |= IOVMF_ALLOC; | |
845 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | |
846 | ||
847 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | |
848 | if (IS_ERR_VALUE(da)) | |
849 | kfree(va); | |
850 | ||
851 | return da; | |
852 | } | |
853 | EXPORT_SYMBOL_GPL(iommu_kmalloc); | |
854 | ||
855 | /** | |
856 | * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()' | |
857 | * @obj: objective iommu | |
858 | * @da: iommu device virtual address | |
859 | * | |
860 | * Frees the iommu virtually contiguous memory area starting at | |
861 | * @da, which was passed to and was returned by'iommu_kmalloc()'. | |
862 | */ | |
863 | void iommu_kfree(struct iommu *obj, u32 da) | |
864 | { | |
865 | struct sg_table *sgt; | |
866 | ||
867 | sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); | |
868 | if (!sgt) | |
869 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
870 | sgtable_free(sgt); | |
871 | } | |
872 | EXPORT_SYMBOL_GPL(iommu_kfree); | |
873 | ||
874 | ||
875 | static int __init iovmm_init(void) | |
876 | { | |
877 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | |
878 | struct kmem_cache *p; | |
879 | ||
880 | p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, | |
881 | flags, NULL); | |
882 | if (!p) | |
883 | return -ENOMEM; | |
884 | iovm_area_cachep = p; | |
885 | ||
886 | return 0; | |
887 | } | |
888 | module_init(iovmm_init); | |
889 | ||
890 | static void __exit iovmm_exit(void) | |
891 | { | |
892 | kmem_cache_destroy(iovm_area_cachep); | |
893 | } | |
894 | module_exit(iovmm_exit); | |
895 | ||
896 | MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); | |
897 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | |
898 | MODULE_LICENSE("GPL v2"); |