Commit | Line | Data |
---|---|---|
69d3a84a HD |
1 | /* |
2 | * omap iommu: simple virtual address space management | |
3 | * | |
4 | * Copyright (C) 2008-2009 Nokia Corporation | |
5 | * | |
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
08f2e631 | 13 | #include <linux/module.h> |
69d3a84a | 14 | #include <linux/err.h> |
5a0e3ad6 | 15 | #include <linux/slab.h> |
69d3a84a HD |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/device.h> | |
18 | #include <linux/scatterlist.h> | |
f626b52d | 19 | #include <linux/iommu.h> |
c8d35c84 | 20 | #include <linux/omap-iommu.h> |
2ab7c848 | 21 | #include <linux/platform_data/iommu-omap.h> |
69d3a84a HD |
22 | |
23 | #include <asm/cacheflush.h> | |
24 | #include <asm/mach/map.h> | |
25 | ||
2f7702af | 26 | #include "omap-iopgtable.h" |
ed1c7de2 | 27 | #include "omap-iommu.h" |
69d3a84a | 28 | |
c8d35c84 TL |
29 | /* |
30 | * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma) | |
31 | * | |
32 | * lower 16 bit is used for h/w and upper 16 bit is for s/w. | |
33 | */ | |
34 | #define IOVMF_SW_SHIFT 16 | |
35 | ||
36 | /* | |
37 | * iovma: h/w flags derived from cam and ram attribute | |
38 | */ | |
39 | #define IOVMF_CAM_MASK (~((1 << 10) - 1)) | |
40 | #define IOVMF_RAM_MASK (~IOVMF_CAM_MASK) | |
41 | ||
42 | #define IOVMF_PGSZ_MASK (3 << 0) | |
43 | #define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M | |
44 | #define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K | |
45 | #define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K | |
46 | #define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M | |
47 | ||
48 | #define IOVMF_ENDIAN_MASK (1 << 9) | |
49 | #define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG | |
50 | ||
51 | #define IOVMF_ELSZ_MASK (3 << 7) | |
52 | #define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16 | |
53 | #define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32 | |
54 | #define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE | |
55 | ||
56 | #define IOVMF_MIXED_MASK (1 << 6) | |
57 | #define IOVMF_MIXED MMU_RAM_MIXED | |
58 | ||
59 | /* | |
60 | * iovma: s/w flags, used for mapping and umapping internally. | |
61 | */ | |
62 | #define IOVMF_MMIO (1 << IOVMF_SW_SHIFT) | |
63 | #define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT) | |
64 | #define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT) | |
65 | ||
66 | /* "superpages" is supported just with physically linear pages */ | |
67 | #define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT)) | |
68 | #define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT)) | |
69 | #define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT)) | |
70 | ||
71 | #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) | |
72 | ||
69d3a84a HD |
73 | static struct kmem_cache *iovm_area_cachep; |
74 | ||
329d8d3b LP |
75 | /* return the offset of the first scatterlist entry in a sg table */ |
76 | static unsigned int sgtable_offset(const struct sg_table *sgt) | |
77 | { | |
78 | if (!sgt || !sgt->nents) | |
79 | return 0; | |
80 | ||
81 | return sgt->sgl->offset; | |
82 | } | |
83 | ||
69d3a84a HD |
84 | /* return total bytes of sg buffers */ |
85 | static size_t sgtable_len(const struct sg_table *sgt) | |
86 | { | |
87 | unsigned int i, total = 0; | |
88 | struct scatterlist *sg; | |
89 | ||
90 | if (!sgt) | |
91 | return 0; | |
92 | ||
93 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
94 | size_t bytes; | |
95 | ||
329d8d3b | 96 | bytes = sg->length + sg->offset; |
69d3a84a HD |
97 | |
98 | if (!iopgsz_ok(bytes)) { | |
329d8d3b LP |
99 | pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n", |
100 | __func__, i, bytes, sg->offset); | |
101 | return 0; | |
102 | } | |
103 | ||
104 | if (i && sg->offset) { | |
105 | pr_err("%s: sg[%d] offset not allowed in internal " | |
106 | "entries\n", __func__, i); | |
69d3a84a HD |
107 | return 0; |
108 | } | |
109 | ||
110 | total += bytes; | |
111 | } | |
112 | ||
113 | return total; | |
114 | } | |
115 | #define sgtable_ok(x) (!!sgtable_len(x)) | |
116 | ||
ad108121 GLF |
117 | static unsigned max_alignment(u32 addr) |
118 | { | |
119 | int i; | |
120 | unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | |
121 | for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) | |
122 | ; | |
123 | return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; | |
124 | } | |
125 | ||
69d3a84a HD |
126 | /* |
127 | * calculate the optimal number sg elements from total bytes based on | |
128 | * iommu superpages | |
129 | */ | |
ad108121 | 130 | static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) |
69d3a84a | 131 | { |
ad108121 | 132 | unsigned nr_entries = 0, ent_sz; |
69d3a84a HD |
133 | |
134 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { | |
135 | pr_err("%s: wrong size %08x\n", __func__, bytes); | |
136 | return 0; | |
137 | } | |
138 | ||
ad108121 GLF |
139 | while (bytes) { |
140 | ent_sz = max_alignment(da | pa); | |
141 | ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); | |
142 | nr_entries++; | |
143 | da += ent_sz; | |
144 | pa += ent_sz; | |
145 | bytes -= ent_sz; | |
69d3a84a | 146 | } |
69d3a84a HD |
147 | |
148 | return nr_entries; | |
149 | } | |
150 | ||
151 | /* allocate and initialize sg_table header(a kind of 'superblock') */ | |
ad108121 GLF |
152 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, |
153 | u32 da, u32 pa) | |
69d3a84a HD |
154 | { |
155 | unsigned int nr_entries; | |
156 | int err; | |
157 | struct sg_table *sgt; | |
158 | ||
159 | if (!bytes) | |
160 | return ERR_PTR(-EINVAL); | |
161 | ||
162 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) | |
163 | return ERR_PTR(-EINVAL); | |
164 | ||
ad108121 GLF |
165 | if (flags & IOVMF_LINEAR) { |
166 | nr_entries = sgtable_nents(bytes, da, pa); | |
69d3a84a HD |
167 | if (!nr_entries) |
168 | return ERR_PTR(-EINVAL); | |
169 | } else | |
170 | nr_entries = bytes / PAGE_SIZE; | |
171 | ||
172 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | |
173 | if (!sgt) | |
174 | return ERR_PTR(-ENOMEM); | |
175 | ||
176 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); | |
7f1225bd S |
177 | if (err) { |
178 | kfree(sgt); | |
69d3a84a | 179 | return ERR_PTR(err); |
7f1225bd | 180 | } |
69d3a84a HD |
181 | |
182 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); | |
183 | ||
184 | return sgt; | |
185 | } | |
186 | ||
187 | /* free sg_table header(a kind of superblock) */ | |
188 | static void sgtable_free(struct sg_table *sgt) | |
189 | { | |
190 | if (!sgt) | |
191 | return; | |
192 | ||
193 | sg_free_table(sgt); | |
194 | kfree(sgt); | |
195 | ||
196 | pr_debug("%s: sgt:%p\n", __func__, sgt); | |
197 | } | |
198 | ||
199 | /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ | |
200 | static void *vmap_sg(const struct sg_table *sgt) | |
201 | { | |
202 | u32 va; | |
203 | size_t total; | |
204 | unsigned int i; | |
205 | struct scatterlist *sg; | |
206 | struct vm_struct *new; | |
207 | const struct mem_type *mtype; | |
208 | ||
209 | mtype = get_mem_type(MT_DEVICE); | |
210 | if (!mtype) | |
211 | return ERR_PTR(-EINVAL); | |
212 | ||
213 | total = sgtable_len(sgt); | |
214 | if (!total) | |
215 | return ERR_PTR(-EINVAL); | |
216 | ||
217 | new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); | |
218 | if (!new) | |
219 | return ERR_PTR(-ENOMEM); | |
220 | va = (u32)new->addr; | |
221 | ||
222 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
223 | size_t bytes; | |
224 | u32 pa; | |
225 | int err; | |
226 | ||
329d8d3b LP |
227 | pa = sg_phys(sg) - sg->offset; |
228 | bytes = sg->length + sg->offset; | |
69d3a84a HD |
229 | |
230 | BUG_ON(bytes != PAGE_SIZE); | |
231 | ||
232 | err = ioremap_page(va, pa, mtype); | |
233 | if (err) | |
234 | goto err_out; | |
235 | ||
236 | va += bytes; | |
237 | } | |
238 | ||
6716bd06 SP |
239 | flush_cache_vmap((unsigned long)new->addr, |
240 | (unsigned long)(new->addr + total)); | |
69d3a84a HD |
241 | return new->addr; |
242 | ||
243 | err_out: | |
244 | WARN_ON(1); /* FIXME: cleanup some mpu mappings */ | |
245 | vunmap(new->addr); | |
246 | return ERR_PTR(-EAGAIN); | |
247 | } | |
248 | ||
249 | static inline void vunmap_sg(const void *va) | |
250 | { | |
251 | vunmap(va); | |
252 | } | |
253 | ||
6c32df43 OBC |
254 | static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj, |
255 | const u32 da) | |
69d3a84a HD |
256 | { |
257 | struct iovm_struct *tmp; | |
258 | ||
259 | list_for_each_entry(tmp, &obj->mmap, list) { | |
260 | if ((da >= tmp->da_start) && (da < tmp->da_end)) { | |
261 | size_t len; | |
262 | ||
263 | len = tmp->da_end - tmp->da_start; | |
264 | ||
265 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", | |
266 | __func__, tmp->da_start, da, tmp->da_end, len, | |
267 | tmp->flags); | |
268 | ||
269 | return tmp; | |
270 | } | |
271 | } | |
272 | ||
273 | return NULL; | |
274 | } | |
275 | ||
276 | /** | |
6c32df43 | 277 | * omap_find_iovm_area - find iovma which includes @da |
fabdbca8 | 278 | * @dev: client device |
69d3a84a HD |
279 | * @da: iommu device virtual address |
280 | * | |
281 | * Find the existing iovma starting at @da | |
282 | */ | |
fabdbca8 | 283 | struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da) |
69d3a84a | 284 | { |
fabdbca8 | 285 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
69d3a84a HD |
286 | struct iovm_struct *area; |
287 | ||
288 | mutex_lock(&obj->mmap_lock); | |
289 | area = __find_iovm_area(obj, da); | |
290 | mutex_unlock(&obj->mmap_lock); | |
291 | ||
292 | return area; | |
293 | } | |
6c32df43 | 294 | EXPORT_SYMBOL_GPL(omap_find_iovm_area); |
69d3a84a HD |
295 | |
296 | /* | |
297 | * This finds the hole(area) which fits the requested address and len | |
298 | * in iovmas mmap, and returns the new allocated iovma. | |
299 | */ | |
6c32df43 | 300 | static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da, |
69d3a84a HD |
301 | size_t bytes, u32 flags) |
302 | { | |
303 | struct iovm_struct *new, *tmp; | |
4359d38d | 304 | u32 start, prev_end, alignment; |
69d3a84a HD |
305 | |
306 | if (!obj || !bytes) | |
307 | return ERR_PTR(-EINVAL); | |
308 | ||
309 | start = da; | |
4359d38d | 310 | alignment = PAGE_SIZE; |
69d3a84a | 311 | |
d038aee2 | 312 | if (~flags & IOVMF_DA_FIXED) { |
4359d38d MJ |
313 | /* Don't map address 0 */ |
314 | start = obj->da_start ? obj->da_start : alignment; | |
c7f4ab26 | 315 | |
69d3a84a | 316 | if (flags & IOVMF_LINEAR) |
4359d38d MJ |
317 | alignment = iopgsz_max(bytes); |
318 | start = roundup(start, alignment); | |
c7f4ab26 GLF |
319 | } else if (start < obj->da_start || start > obj->da_end || |
320 | obj->da_end - start < bytes) { | |
321 | return ERR_PTR(-EINVAL); | |
69d3a84a HD |
322 | } |
323 | ||
324 | tmp = NULL; | |
325 | if (list_empty(&obj->mmap)) | |
326 | goto found; | |
327 | ||
328 | prev_end = 0; | |
329 | list_for_each_entry(tmp, &obj->mmap, list) { | |
330 | ||
ba6e1f4f | 331 | if (prev_end > start) |
e0a42e4f HD |
332 | break; |
333 | ||
c7f4ab26 | 334 | if (tmp->da_start > start && (tmp->da_start - start) >= bytes) |
69d3a84a HD |
335 | goto found; |
336 | ||
d038aee2 | 337 | if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) |
4359d38d | 338 | start = roundup(tmp->da_end + 1, alignment); |
69d3a84a HD |
339 | |
340 | prev_end = tmp->da_end; | |
341 | } | |
342 | ||
c7f4ab26 | 343 | if ((start >= prev_end) && (obj->da_end - start >= bytes)) |
69d3a84a HD |
344 | goto found; |
345 | ||
346 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", | |
347 | __func__, da, bytes, flags); | |
348 | ||
349 | return ERR_PTR(-EINVAL); | |
350 | ||
351 | found: | |
352 | new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); | |
353 | if (!new) | |
354 | return ERR_PTR(-ENOMEM); | |
355 | ||
356 | new->iommu = obj; | |
357 | new->da_start = start; | |
358 | new->da_end = start + bytes; | |
359 | new->flags = flags; | |
360 | ||
361 | /* | |
362 | * keep ascending order of iovmas | |
363 | */ | |
364 | if (tmp) | |
365 | list_add_tail(&new->list, &tmp->list); | |
366 | else | |
367 | list_add(&new->list, &obj->mmap); | |
368 | ||
369 | dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", | |
370 | __func__, new->da_start, start, new->da_end, bytes, flags); | |
371 | ||
372 | return new; | |
373 | } | |
374 | ||
6c32df43 | 375 | static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area) |
69d3a84a HD |
376 | { |
377 | size_t bytes; | |
378 | ||
379 | BUG_ON(!obj || !area); | |
380 | ||
381 | bytes = area->da_end - area->da_start; | |
382 | ||
383 | dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", | |
384 | __func__, area->da_start, area->da_end, bytes, area->flags); | |
385 | ||
386 | list_del(&area->list); | |
387 | kmem_cache_free(iovm_area_cachep, area); | |
388 | } | |
389 | ||
390 | /** | |
6c32df43 | 391 | * omap_da_to_va - convert (d) to (v) |
fabdbca8 | 392 | * @dev: client device |
69d3a84a HD |
393 | * @da: iommu device virtual address |
394 | * @va: mpu virtual address | |
395 | * | |
396 | * Returns mpu virtual addr which corresponds to a given device virtual addr | |
397 | */ | |
fabdbca8 | 398 | void *omap_da_to_va(struct device *dev, u32 da) |
69d3a84a | 399 | { |
fabdbca8 | 400 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
69d3a84a HD |
401 | void *va = NULL; |
402 | struct iovm_struct *area; | |
403 | ||
404 | mutex_lock(&obj->mmap_lock); | |
405 | ||
406 | area = __find_iovm_area(obj, da); | |
407 | if (!area) { | |
408 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | |
409 | goto out; | |
410 | } | |
411 | va = area->va; | |
69d3a84a | 412 | out: |
26548900 DW |
413 | mutex_unlock(&obj->mmap_lock); |
414 | ||
69d3a84a HD |
415 | return va; |
416 | } | |
6c32df43 | 417 | EXPORT_SYMBOL_GPL(omap_da_to_va); |
69d3a84a HD |
418 | |
419 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | |
420 | { | |
421 | unsigned int i; | |
422 | struct scatterlist *sg; | |
423 | void *va = _va; | |
424 | void *va_end; | |
425 | ||
426 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
427 | struct page *pg; | |
428 | const size_t bytes = PAGE_SIZE; | |
429 | ||
430 | /* | |
6c32df43 | 431 | * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()' |
69d3a84a HD |
432 | */ |
433 | pg = vmalloc_to_page(va); | |
434 | BUG_ON(!pg); | |
435 | sg_set_page(sg, pg, bytes, 0); | |
436 | ||
437 | va += bytes; | |
438 | } | |
439 | ||
440 | va_end = _va + PAGE_SIZE * i; | |
69d3a84a HD |
441 | } |
442 | ||
443 | static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | |
444 | { | |
445 | /* | |
446 | * Actually this is not necessary at all, just exists for | |
ba6a1179 | 447 | * consistency of the code readability. |
69d3a84a HD |
448 | */ |
449 | BUG_ON(!sgt); | |
450 | } | |
451 | ||
69d3a84a | 452 | /* create 'da' <-> 'pa' mapping from 'sgt' */ |
f626b52d OBC |
453 | static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, |
454 | const struct sg_table *sgt, u32 flags) | |
69d3a84a HD |
455 | { |
456 | int err; | |
457 | unsigned int i, j; | |
458 | struct scatterlist *sg; | |
459 | u32 da = new->da_start; | |
460 | ||
f626b52d | 461 | if (!domain || !sgt) |
69d3a84a HD |
462 | return -EINVAL; |
463 | ||
464 | BUG_ON(!sgtable_ok(sgt)); | |
465 | ||
466 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
467 | u32 pa; | |
69d3a84a | 468 | size_t bytes; |
69d3a84a | 469 | |
329d8d3b LP |
470 | pa = sg_phys(sg) - sg->offset; |
471 | bytes = sg->length + sg->offset; | |
69d3a84a HD |
472 | |
473 | flags &= ~IOVMF_PGSZ_MASK; | |
f626b52d OBC |
474 | |
475 | if (bytes_to_iopgsz(bytes) < 0) | |
69d3a84a | 476 | goto err_out; |
f626b52d | 477 | |
69d3a84a HD |
478 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, |
479 | i, da, pa, bytes); | |
480 | ||
7d3002cc | 481 | err = iommu_map(domain, da, pa, bytes, flags); |
69d3a84a HD |
482 | if (err) |
483 | goto err_out; | |
484 | ||
485 | da += bytes; | |
486 | } | |
487 | return 0; | |
488 | ||
489 | err_out: | |
490 | da = new->da_start; | |
491 | ||
492 | for_each_sg(sgt->sgl, sg, i, j) { | |
493 | size_t bytes; | |
494 | ||
329d8d3b | 495 | bytes = sg->length + sg->offset; |
69d3a84a | 496 | |
f626b52d | 497 | /* ignore failures.. we're already handling one */ |
7d3002cc | 498 | iommu_unmap(domain, da, bytes); |
69d3a84a HD |
499 | |
500 | da += bytes; | |
501 | } | |
502 | return err; | |
503 | } | |
504 | ||
505 | /* release 'da' <-> 'pa' mapping */ | |
6c32df43 | 506 | static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, |
f626b52d | 507 | struct iovm_struct *area) |
69d3a84a HD |
508 | { |
509 | u32 start; | |
510 | size_t total = area->da_end - area->da_start; | |
f626b52d OBC |
511 | const struct sg_table *sgt = area->sgt; |
512 | struct scatterlist *sg; | |
7d3002cc OBC |
513 | int i; |
514 | size_t unmapped; | |
69d3a84a | 515 | |
f626b52d | 516 | BUG_ON(!sgtable_ok(sgt)); |
69d3a84a HD |
517 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); |
518 | ||
519 | start = area->da_start; | |
f626b52d | 520 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { |
69d3a84a | 521 | size_t bytes; |
f626b52d | 522 | |
329d8d3b | 523 | bytes = sg->length + sg->offset; |
f626b52d | 524 | |
7d3002cc OBC |
525 | unmapped = iommu_unmap(domain, start, bytes); |
526 | if (unmapped < bytes) | |
f626b52d | 527 | break; |
69d3a84a | 528 | |
f626b52d | 529 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", |
69d3a84a HD |
530 | __func__, start, bytes, area->flags); |
531 | ||
532 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | |
533 | ||
534 | total -= bytes; | |
535 | start += bytes; | |
536 | } | |
537 | BUG_ON(total); | |
538 | } | |
539 | ||
540 | /* template function for all unmapping */ | |
f626b52d | 541 | static struct sg_table *unmap_vm_area(struct iommu_domain *domain, |
6c32df43 | 542 | struct omap_iommu *obj, const u32 da, |
69d3a84a HD |
543 | void (*fn)(const void *), u32 flags) |
544 | { | |
545 | struct sg_table *sgt = NULL; | |
546 | struct iovm_struct *area; | |
547 | ||
548 | if (!IS_ALIGNED(da, PAGE_SIZE)) { | |
549 | dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); | |
550 | return NULL; | |
551 | } | |
552 | ||
553 | mutex_lock(&obj->mmap_lock); | |
554 | ||
555 | area = __find_iovm_area(obj, da); | |
556 | if (!area) { | |
557 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | |
558 | goto out; | |
559 | } | |
560 | ||
561 | if ((area->flags & flags) != flags) { | |
562 | dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, | |
563 | area->flags); | |
564 | goto out; | |
565 | } | |
566 | sgt = (struct sg_table *)area->sgt; | |
567 | ||
f626b52d | 568 | unmap_iovm_area(domain, obj, area); |
69d3a84a HD |
569 | |
570 | fn(area->va); | |
571 | ||
572 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, | |
573 | area->da_start, da, area->da_end, | |
574 | area->da_end - area->da_start, area->flags); | |
575 | ||
576 | free_iovm_area(obj, area); | |
577 | out: | |
578 | mutex_unlock(&obj->mmap_lock); | |
579 | ||
580 | return sgt; | |
581 | } | |
582 | ||
6c32df43 | 583 | static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj, |
f626b52d OBC |
584 | u32 da, const struct sg_table *sgt, void *va, |
585 | size_t bytes, u32 flags) | |
69d3a84a HD |
586 | { |
587 | int err = -ENOMEM; | |
588 | struct iovm_struct *new; | |
589 | ||
590 | mutex_lock(&obj->mmap_lock); | |
591 | ||
592 | new = alloc_iovm_area(obj, da, bytes, flags); | |
593 | if (IS_ERR(new)) { | |
594 | err = PTR_ERR(new); | |
595 | goto err_alloc_iovma; | |
596 | } | |
597 | new->va = va; | |
598 | new->sgt = sgt; | |
599 | ||
f626b52d | 600 | if (map_iovm_area(domain, new, sgt, new->flags)) |
69d3a84a HD |
601 | goto err_map; |
602 | ||
603 | mutex_unlock(&obj->mmap_lock); | |
604 | ||
605 | dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", | |
606 | __func__, new->da_start, bytes, new->flags, va); | |
607 | ||
608 | return new->da_start; | |
609 | ||
610 | err_map: | |
611 | free_iovm_area(obj, new); | |
612 | err_alloc_iovma: | |
613 | mutex_unlock(&obj->mmap_lock); | |
614 | return err; | |
615 | } | |
616 | ||
6c32df43 OBC |
617 | static inline u32 |
618 | __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, | |
f626b52d OBC |
619 | u32 da, const struct sg_table *sgt, |
620 | void *va, size_t bytes, u32 flags) | |
69d3a84a | 621 | { |
f626b52d | 622 | return map_iommu_region(domain, obj, da, sgt, va, bytes, flags); |
69d3a84a HD |
623 | } |
624 | ||
625 | /** | |
6c32df43 | 626 | * omap_iommu_vmap - (d)-(p)-(v) address mapper |
fabdbca8 OBC |
627 | * @domain: iommu domain |
628 | * @dev: client device | |
69d3a84a HD |
629 | * @sgt: address of scatter gather table |
630 | * @flags: iovma and page property | |
631 | * | |
632 | * Creates 1-n-1 mapping with given @sgt and returns @da. | |
633 | * All @sgt element must be io page size aligned. | |
634 | */ | |
fabdbca8 | 635 | u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da, |
f626b52d | 636 | const struct sg_table *sgt, u32 flags) |
69d3a84a | 637 | { |
fabdbca8 | 638 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
69d3a84a | 639 | size_t bytes; |
935e4739 | 640 | void *va = NULL; |
69d3a84a HD |
641 | |
642 | if (!obj || !obj->dev || !sgt) | |
643 | return -EINVAL; | |
644 | ||
645 | bytes = sgtable_len(sgt); | |
646 | if (!bytes) | |
647 | return -EINVAL; | |
648 | bytes = PAGE_ALIGN(bytes); | |
649 | ||
935e4739 HD |
650 | if (flags & IOVMF_MMIO) { |
651 | va = vmap_sg(sgt); | |
652 | if (IS_ERR(va)) | |
653 | return PTR_ERR(va); | |
654 | } | |
69d3a84a | 655 | |
69d3a84a HD |
656 | flags |= IOVMF_DISCONT; |
657 | flags |= IOVMF_MMIO; | |
69d3a84a | 658 | |
f626b52d | 659 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); |
69d3a84a HD |
660 | if (IS_ERR_VALUE(da)) |
661 | vunmap_sg(va); | |
662 | ||
329d8d3b | 663 | return da + sgtable_offset(sgt); |
69d3a84a | 664 | } |
6c32df43 | 665 | EXPORT_SYMBOL_GPL(omap_iommu_vmap); |
69d3a84a HD |
666 | |
667 | /** | |
6c32df43 | 668 | * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()' |
fabdbca8 OBC |
669 | * @domain: iommu domain |
670 | * @dev: client device | |
69d3a84a HD |
671 | * @da: iommu device virtual address |
672 | * | |
673 | * Free the iommu virtually contiguous memory area starting at | |
6c32df43 | 674 | * @da, which was returned by 'omap_iommu_vmap()'. |
69d3a84a | 675 | */ |
f626b52d | 676 | struct sg_table * |
fabdbca8 | 677 | omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da) |
69d3a84a | 678 | { |
fabdbca8 | 679 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
69d3a84a HD |
680 | struct sg_table *sgt; |
681 | /* | |
6c32df43 | 682 | * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. |
69d3a84a HD |
683 | * Just returns 'sgt' to the caller to free |
684 | */ | |
329d8d3b | 685 | da &= PAGE_MASK; |
f626b52d OBC |
686 | sgt = unmap_vm_area(domain, obj, da, vunmap_sg, |
687 | IOVMF_DISCONT | IOVMF_MMIO); | |
69d3a84a HD |
688 | if (!sgt) |
689 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
690 | return sgt; | |
691 | } | |
6c32df43 | 692 | EXPORT_SYMBOL_GPL(omap_iommu_vunmap); |
69d3a84a HD |
693 | |
694 | /** | |
6c32df43 | 695 | * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper |
fabdbca8 | 696 | * @dev: client device |
69d3a84a HD |
697 | * @da: contiguous iommu virtual memory |
698 | * @bytes: allocation size | |
699 | * @flags: iovma and page property | |
700 | * | |
701 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns | |
d038aee2 | 702 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. |
69d3a84a | 703 | */ |
6c32df43 | 704 | u32 |
fabdbca8 | 705 | omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da, |
f626b52d | 706 | size_t bytes, u32 flags) |
69d3a84a | 707 | { |
fabdbca8 | 708 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
69d3a84a HD |
709 | void *va; |
710 | struct sg_table *sgt; | |
711 | ||
712 | if (!obj || !obj->dev || !bytes) | |
713 | return -EINVAL; | |
714 | ||
715 | bytes = PAGE_ALIGN(bytes); | |
716 | ||
717 | va = vmalloc(bytes); | |
718 | if (!va) | |
719 | return -ENOMEM; | |
720 | ||
ad108121 GLF |
721 | flags |= IOVMF_DISCONT; |
722 | flags |= IOVMF_ALLOC; | |
ad108121 GLF |
723 | |
724 | sgt = sgtable_alloc(bytes, flags, da, 0); | |
69d3a84a HD |
725 | if (IS_ERR(sgt)) { |
726 | da = PTR_ERR(sgt); | |
727 | goto err_sgt_alloc; | |
728 | } | |
729 | sgtable_fill_vmalloc(sgt, va); | |
730 | ||
f626b52d | 731 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); |
69d3a84a HD |
732 | if (IS_ERR_VALUE(da)) |
733 | goto err_iommu_vmap; | |
734 | ||
735 | return da; | |
736 | ||
737 | err_iommu_vmap: | |
738 | sgtable_drain_vmalloc(sgt); | |
739 | sgtable_free(sgt); | |
740 | err_sgt_alloc: | |
741 | vfree(va); | |
742 | return da; | |
743 | } | |
6c32df43 | 744 | EXPORT_SYMBOL_GPL(omap_iommu_vmalloc); |
69d3a84a HD |
745 | |
746 | /** | |
6c32df43 | 747 | * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()' |
fabdbca8 | 748 | * @dev: client device |
69d3a84a HD |
749 | * @da: iommu device virtual address |
750 | * | |
751 | * Frees the iommu virtually continuous memory area starting at | |
6c32df43 | 752 | * @da, as obtained from 'omap_iommu_vmalloc()'. |
69d3a84a | 753 | */ |
fabdbca8 | 754 | void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev, |
6c32df43 | 755 | const u32 da) |
69d3a84a | 756 | { |
fabdbca8 | 757 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
69d3a84a HD |
758 | struct sg_table *sgt; |
759 | ||
f626b52d OBC |
760 | sgt = unmap_vm_area(domain, obj, da, vfree, |
761 | IOVMF_DISCONT | IOVMF_ALLOC); | |
69d3a84a HD |
762 | if (!sgt) |
763 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
764 | sgtable_free(sgt); | |
765 | } | |
6c32df43 | 766 | EXPORT_SYMBOL_GPL(omap_iommu_vfree); |
69d3a84a | 767 | |
69d3a84a HD |
768 | static int __init iovmm_init(void) |
769 | { | |
770 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | |
771 | struct kmem_cache *p; | |
772 | ||
773 | p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, | |
774 | flags, NULL); | |
775 | if (!p) | |
776 | return -ENOMEM; | |
777 | iovm_area_cachep = p; | |
778 | ||
779 | return 0; | |
780 | } | |
781 | module_init(iovmm_init); | |
782 | ||
783 | static void __exit iovmm_exit(void) | |
784 | { | |
785 | kmem_cache_destroy(iovm_area_cachep); | |
786 | } | |
787 | module_exit(iovmm_exit); | |
788 | ||
789 | MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); | |
790 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | |
791 | MODULE_LICENSE("GPL v2"); |