Commit | Line | Data |
---|---|---|
a9dcad5e HD |
1 | /* |
2 | * omap iommu: tlb and pagetable primitives | |
3 | * | |
c127c7dc | 4 | * Copyright (C) 2008-2010 Nokia Corporation |
a9dcad5e HD |
5 | * |
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, | |
7 | * Paul Mundt and Toshihiro Kobayashi | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | ||
14 | #include <linux/err.h> | |
15 | #include <linux/module.h> | |
5a0e3ad6 | 16 | #include <linux/slab.h> |
a9dcad5e HD |
17 | #include <linux/interrupt.h> |
18 | #include <linux/ioport.h> | |
19 | #include <linux/clk.h> | |
20 | #include <linux/platform_device.h> | |
21 | ||
22 | #include <asm/cacheflush.h> | |
23 | ||
ce491cf8 | 24 | #include <plat/iommu.h> |
a9dcad5e HD |
25 | |
26 | #include "iopgtable.h" | |
27 | ||
28 | /* accommodate the difference between omap1 and omap2/3 */ | |
29 | static const struct iommu_functions *arch_iommu; | |
30 | ||
31 | static struct platform_driver omap_iommu_driver; | |
32 | static struct kmem_cache *iopte_cachep; | |
33 | ||
34 | /** | |
35 | * install_iommu_arch - Install archtecure specific iommu functions | |
36 | * @ops: a pointer to architecture specific iommu functions | |
37 | * | |
38 | * There are several kind of iommu algorithm(tlb, pagetable) among | |
39 | * omap series. This interface installs such an iommu algorighm. | |
40 | **/ | |
41 | int install_iommu_arch(const struct iommu_functions *ops) | |
42 | { | |
43 | if (arch_iommu) | |
44 | return -EBUSY; | |
45 | ||
46 | arch_iommu = ops; | |
47 | return 0; | |
48 | } | |
49 | EXPORT_SYMBOL_GPL(install_iommu_arch); | |
50 | ||
51 | /** | |
52 | * uninstall_iommu_arch - Uninstall archtecure specific iommu functions | |
53 | * @ops: a pointer to architecture specific iommu functions | |
54 | * | |
55 | * This interface uninstalls the iommu algorighm installed previously. | |
56 | **/ | |
57 | void uninstall_iommu_arch(const struct iommu_functions *ops) | |
58 | { | |
59 | if (arch_iommu != ops) | |
60 | pr_err("%s: not your arch\n", __func__); | |
61 | ||
62 | arch_iommu = NULL; | |
63 | } | |
64 | EXPORT_SYMBOL_GPL(uninstall_iommu_arch); | |
65 | ||
66 | /** | |
67 | * iommu_save_ctx - Save registers for pm off-mode support | |
68 | * @obj: target iommu | |
69 | **/ | |
70 | void iommu_save_ctx(struct iommu *obj) | |
71 | { | |
72 | arch_iommu->save_ctx(obj); | |
73 | } | |
74 | EXPORT_SYMBOL_GPL(iommu_save_ctx); | |
75 | ||
76 | /** | |
77 | * iommu_restore_ctx - Restore registers for pm off-mode support | |
78 | * @obj: target iommu | |
79 | **/ | |
80 | void iommu_restore_ctx(struct iommu *obj) | |
81 | { | |
82 | arch_iommu->restore_ctx(obj); | |
83 | } | |
84 | EXPORT_SYMBOL_GPL(iommu_restore_ctx); | |
85 | ||
86 | /** | |
87 | * iommu_arch_version - Return running iommu arch version | |
88 | **/ | |
89 | u32 iommu_arch_version(void) | |
90 | { | |
91 | return arch_iommu->version; | |
92 | } | |
93 | EXPORT_SYMBOL_GPL(iommu_arch_version); | |
94 | ||
95 | static int iommu_enable(struct iommu *obj) | |
96 | { | |
97 | int err; | |
98 | ||
99 | if (!obj) | |
100 | return -EINVAL; | |
101 | ||
102 | clk_enable(obj->clk); | |
103 | ||
104 | err = arch_iommu->enable(obj); | |
105 | ||
106 | clk_disable(obj->clk); | |
107 | return err; | |
108 | } | |
109 | ||
110 | static void iommu_disable(struct iommu *obj) | |
111 | { | |
112 | if (!obj) | |
113 | return; | |
114 | ||
115 | clk_enable(obj->clk); | |
116 | ||
117 | arch_iommu->disable(obj); | |
118 | ||
119 | clk_disable(obj->clk); | |
120 | } | |
121 | ||
122 | /* | |
123 | * TLB operations | |
124 | */ | |
125 | void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) | |
126 | { | |
127 | BUG_ON(!cr || !e); | |
128 | ||
129 | arch_iommu->cr_to_e(cr, e); | |
130 | } | |
131 | EXPORT_SYMBOL_GPL(iotlb_cr_to_e); | |
132 | ||
133 | static inline int iotlb_cr_valid(struct cr_regs *cr) | |
134 | { | |
135 | if (!cr) | |
136 | return -EINVAL; | |
137 | ||
138 | return arch_iommu->cr_valid(cr); | |
139 | } | |
140 | ||
141 | static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, | |
142 | struct iotlb_entry *e) | |
143 | { | |
144 | if (!e) | |
145 | return NULL; | |
146 | ||
147 | return arch_iommu->alloc_cr(obj, e); | |
148 | } | |
149 | ||
150 | u32 iotlb_cr_to_virt(struct cr_regs *cr) | |
151 | { | |
152 | return arch_iommu->cr_to_virt(cr); | |
153 | } | |
154 | EXPORT_SYMBOL_GPL(iotlb_cr_to_virt); | |
155 | ||
156 | static u32 get_iopte_attr(struct iotlb_entry *e) | |
157 | { | |
158 | return arch_iommu->get_pte_attr(e); | |
159 | } | |
160 | ||
161 | static u32 iommu_report_fault(struct iommu *obj, u32 *da) | |
162 | { | |
163 | return arch_iommu->fault_isr(obj, da); | |
164 | } | |
165 | ||
166 | static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) | |
167 | { | |
168 | u32 val; | |
169 | ||
170 | val = iommu_read_reg(obj, MMU_LOCK); | |
171 | ||
172 | l->base = MMU_LOCK_BASE(val); | |
173 | l->vict = MMU_LOCK_VICT(val); | |
174 | ||
a9dcad5e HD |
175 | } |
176 | ||
177 | static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) | |
178 | { | |
179 | u32 val; | |
180 | ||
a9dcad5e HD |
181 | val = (l->base << MMU_LOCK_BASE_SHIFT); |
182 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); | |
183 | ||
184 | iommu_write_reg(obj, val, MMU_LOCK); | |
185 | } | |
186 | ||
187 | static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) | |
188 | { | |
189 | arch_iommu->tlb_read_cr(obj, cr); | |
190 | } | |
191 | ||
192 | static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) | |
193 | { | |
194 | arch_iommu->tlb_load_cr(obj, cr); | |
195 | ||
196 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | |
197 | iommu_write_reg(obj, 1, MMU_LD_TLB); | |
198 | } | |
199 | ||
200 | /** | |
201 | * iotlb_dump_cr - Dump an iommu tlb entry into buf | |
202 | * @obj: target iommu | |
203 | * @cr: contents of cam and ram register | |
204 | * @buf: output buffer | |
205 | **/ | |
206 | static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, | |
207 | char *buf) | |
208 | { | |
209 | BUG_ON(!cr || !buf); | |
210 | ||
211 | return arch_iommu->dump_cr(obj, cr, buf); | |
212 | } | |
213 | ||
214 | /** | |
215 | * load_iotlb_entry - Set an iommu tlb entry | |
216 | * @obj: target iommu | |
217 | * @e: an iommu tlb entry info | |
218 | **/ | |
219 | int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) | |
220 | { | |
221 | int i; | |
222 | int err = 0; | |
223 | struct iotlb_lock l; | |
224 | struct cr_regs *cr; | |
225 | ||
226 | if (!obj || !obj->nr_tlb_entries || !e) | |
227 | return -EINVAL; | |
228 | ||
229 | clk_enable(obj->clk); | |
230 | ||
be6d8026 KH |
231 | iotlb_lock_get(obj, &l); |
232 | if (l.base == obj->nr_tlb_entries) { | |
233 | dev_warn(obj->dev, "%s: preserve entries full\n", __func__); | |
a9dcad5e HD |
234 | err = -EBUSY; |
235 | goto out; | |
236 | } | |
be6d8026 KH |
237 | if (!e->prsvd) { |
238 | for (i = l.base; i < obj->nr_tlb_entries; i++) { | |
239 | struct cr_regs tmp; | |
240 | ||
241 | iotlb_lock_get(obj, &l); | |
242 | l.vict = i; | |
243 | iotlb_lock_set(obj, &l); | |
244 | iotlb_read_cr(obj, &tmp); | |
245 | if (!iotlb_cr_valid(&tmp)) | |
246 | break; | |
247 | } | |
248 | if (i == obj->nr_tlb_entries) { | |
249 | dev_dbg(obj->dev, "%s: full: no entry\n", __func__); | |
250 | err = -EBUSY; | |
251 | goto out; | |
252 | } | |
253 | } else { | |
254 | l.vict = l.base; | |
255 | iotlb_lock_set(obj, &l); | |
256 | } | |
a9dcad5e HD |
257 | |
258 | cr = iotlb_alloc_cr(obj, e); | |
259 | if (IS_ERR(cr)) { | |
260 | clk_disable(obj->clk); | |
261 | return PTR_ERR(cr); | |
262 | } | |
263 | ||
264 | iotlb_load_cr(obj, cr); | |
265 | kfree(cr); | |
266 | ||
be6d8026 KH |
267 | if (e->prsvd) |
268 | l.base++; | |
a9dcad5e HD |
269 | /* increment victim for next tlb load */ |
270 | if (++l.vict == obj->nr_tlb_entries) | |
be6d8026 | 271 | l.vict = l.base; |
a9dcad5e HD |
272 | iotlb_lock_set(obj, &l); |
273 | out: | |
274 | clk_disable(obj->clk); | |
275 | return err; | |
276 | } | |
277 | EXPORT_SYMBOL_GPL(load_iotlb_entry); | |
278 | ||
279 | /** | |
280 | * flush_iotlb_page - Clear an iommu tlb entry | |
281 | * @obj: target iommu | |
282 | * @da: iommu device virtual address | |
283 | * | |
284 | * Clear an iommu tlb entry which includes 'da' address. | |
285 | **/ | |
286 | void flush_iotlb_page(struct iommu *obj, u32 da) | |
287 | { | |
288 | struct iotlb_lock l; | |
289 | int i; | |
290 | ||
291 | clk_enable(obj->clk); | |
292 | ||
293 | for (i = 0; i < obj->nr_tlb_entries; i++) { | |
294 | struct cr_regs cr; | |
295 | u32 start; | |
296 | size_t bytes; | |
297 | ||
298 | iotlb_lock_get(obj, &l); | |
299 | l.vict = i; | |
300 | iotlb_lock_set(obj, &l); | |
301 | iotlb_read_cr(obj, &cr); | |
302 | if (!iotlb_cr_valid(&cr)) | |
303 | continue; | |
304 | ||
305 | start = iotlb_cr_to_virt(&cr); | |
306 | bytes = iopgsz_to_bytes(cr.cam & 3); | |
307 | ||
308 | if ((start <= da) && (da < start + bytes)) { | |
309 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", | |
310 | __func__, start, da, bytes); | |
f48ef99c | 311 | iotlb_load_cr(obj, &cr); |
a9dcad5e HD |
312 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
313 | } | |
314 | } | |
315 | clk_disable(obj->clk); | |
316 | ||
317 | if (i == obj->nr_tlb_entries) | |
318 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); | |
319 | } | |
320 | EXPORT_SYMBOL_GPL(flush_iotlb_page); | |
321 | ||
322 | /** | |
323 | * flush_iotlb_range - Clear an iommu tlb entries | |
324 | * @obj: target iommu | |
325 | * @start: iommu device virtual address(start) | |
326 | * @end: iommu device virtual address(end) | |
327 | * | |
328 | * Clear an iommu tlb entry which includes 'da' address. | |
329 | **/ | |
330 | void flush_iotlb_range(struct iommu *obj, u32 start, u32 end) | |
331 | { | |
332 | u32 da = start; | |
333 | ||
334 | while (da < end) { | |
335 | flush_iotlb_page(obj, da); | |
336 | /* FIXME: Optimize for multiple page size */ | |
337 | da += IOPTE_SIZE; | |
338 | } | |
339 | } | |
340 | EXPORT_SYMBOL_GPL(flush_iotlb_range); | |
341 | ||
342 | /** | |
343 | * flush_iotlb_all - Clear all iommu tlb entries | |
344 | * @obj: target iommu | |
345 | **/ | |
346 | void flush_iotlb_all(struct iommu *obj) | |
347 | { | |
348 | struct iotlb_lock l; | |
349 | ||
350 | clk_enable(obj->clk); | |
351 | ||
352 | l.base = 0; | |
353 | l.vict = 0; | |
354 | iotlb_lock_set(obj, &l); | |
355 | ||
356 | iommu_write_reg(obj, 1, MMU_GFLUSH); | |
357 | ||
358 | clk_disable(obj->clk); | |
359 | } | |
360 | EXPORT_SYMBOL_GPL(flush_iotlb_all); | |
361 | ||
362 | #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) | |
363 | ||
14e0e679 | 364 | ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes) |
a9dcad5e | 365 | { |
a9dcad5e HD |
366 | if (!obj || !buf) |
367 | return -EINVAL; | |
368 | ||
369 | clk_enable(obj->clk); | |
370 | ||
14e0e679 | 371 | bytes = arch_iommu->dump_ctx(obj, buf, bytes); |
a9dcad5e HD |
372 | |
373 | clk_disable(obj->clk); | |
374 | ||
375 | return bytes; | |
376 | } | |
377 | EXPORT_SYMBOL_GPL(iommu_dump_ctx); | |
378 | ||
14e0e679 | 379 | static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) |
a9dcad5e HD |
380 | { |
381 | int i; | |
382 | struct iotlb_lock saved, l; | |
383 | struct cr_regs *p = crs; | |
384 | ||
385 | clk_enable(obj->clk); | |
386 | ||
387 | iotlb_lock_get(obj, &saved); | |
388 | memcpy(&l, &saved, sizeof(saved)); | |
389 | ||
14e0e679 | 390 | for (i = 0; i < num; i++) { |
a9dcad5e HD |
391 | struct cr_regs tmp; |
392 | ||
393 | iotlb_lock_get(obj, &l); | |
394 | l.vict = i; | |
395 | iotlb_lock_set(obj, &l); | |
396 | iotlb_read_cr(obj, &tmp); | |
397 | if (!iotlb_cr_valid(&tmp)) | |
398 | continue; | |
399 | ||
400 | *p++ = tmp; | |
401 | } | |
402 | iotlb_lock_set(obj, &saved); | |
403 | clk_disable(obj->clk); | |
404 | ||
405 | return p - crs; | |
406 | } | |
407 | ||
408 | /** | |
409 | * dump_tlb_entries - dump cr arrays to given buffer | |
410 | * @obj: target iommu | |
411 | * @buf: output buffer | |
412 | **/ | |
14e0e679 | 413 | size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) |
a9dcad5e | 414 | { |
14e0e679 | 415 | int i, num; |
a9dcad5e HD |
416 | struct cr_regs *cr; |
417 | char *p = buf; | |
418 | ||
14e0e679 HD |
419 | num = bytes / sizeof(*cr); |
420 | num = min(obj->nr_tlb_entries, num); | |
421 | ||
422 | cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); | |
a9dcad5e HD |
423 | if (!cr) |
424 | return 0; | |
425 | ||
14e0e679 HD |
426 | num = __dump_tlb_entries(obj, cr, num); |
427 | for (i = 0; i < num; i++) | |
a9dcad5e HD |
428 | p += iotlb_dump_cr(obj, cr + i, p); |
429 | kfree(cr); | |
430 | ||
431 | return p - buf; | |
432 | } | |
433 | EXPORT_SYMBOL_GPL(dump_tlb_entries); | |
434 | ||
435 | int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) | |
436 | { | |
437 | return driver_for_each_device(&omap_iommu_driver.driver, | |
438 | NULL, data, fn); | |
439 | } | |
440 | EXPORT_SYMBOL_GPL(foreach_iommu_device); | |
441 | ||
442 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ | |
443 | ||
444 | /* | |
445 | * H/W pagetable operations | |
446 | */ | |
447 | static void flush_iopgd_range(u32 *first, u32 *last) | |
448 | { | |
449 | /* FIXME: L2 cache should be taken care of if it exists */ | |
450 | do { | |
451 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" | |
452 | : : "r" (first)); | |
453 | first += L1_CACHE_BYTES / sizeof(*first); | |
454 | } while (first <= last); | |
455 | } | |
456 | ||
457 | static void flush_iopte_range(u32 *first, u32 *last) | |
458 | { | |
459 | /* FIXME: L2 cache should be taken care of if it exists */ | |
460 | do { | |
461 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" | |
462 | : : "r" (first)); | |
463 | first += L1_CACHE_BYTES / sizeof(*first); | |
464 | } while (first <= last); | |
465 | } | |
466 | ||
467 | static void iopte_free(u32 *iopte) | |
468 | { | |
469 | /* Note: freed iopte's must be clean ready for re-use */ | |
470 | kmem_cache_free(iopte_cachep, iopte); | |
471 | } | |
472 | ||
473 | static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) | |
474 | { | |
475 | u32 *iopte; | |
476 | ||
477 | /* a table has already existed */ | |
478 | if (*iopgd) | |
479 | goto pte_ready; | |
480 | ||
481 | /* | |
482 | * do the allocation outside the page table lock | |
483 | */ | |
484 | spin_unlock(&obj->page_table_lock); | |
485 | iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); | |
486 | spin_lock(&obj->page_table_lock); | |
487 | ||
488 | if (!*iopgd) { | |
489 | if (!iopte) | |
490 | return ERR_PTR(-ENOMEM); | |
491 | ||
492 | *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; | |
493 | flush_iopgd_range(iopgd, iopgd); | |
494 | ||
495 | dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); | |
496 | } else { | |
497 | /* We raced, free the reduniovant table */ | |
498 | iopte_free(iopte); | |
499 | } | |
500 | ||
501 | pte_ready: | |
502 | iopte = iopte_offset(iopgd, da); | |
503 | ||
504 | dev_vdbg(obj->dev, | |
505 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", | |
506 | __func__, da, iopgd, *iopgd, iopte, *iopte); | |
507 | ||
508 | return iopte; | |
509 | } | |
510 | ||
511 | static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
512 | { | |
513 | u32 *iopgd = iopgd_offset(obj, da); | |
514 | ||
515 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; | |
516 | flush_iopgd_range(iopgd, iopgd); | |
517 | return 0; | |
518 | } | |
519 | ||
520 | static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
521 | { | |
522 | u32 *iopgd = iopgd_offset(obj, da); | |
523 | int i; | |
524 | ||
525 | for (i = 0; i < 16; i++) | |
526 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; | |
527 | flush_iopgd_range(iopgd, iopgd + 15); | |
528 | return 0; | |
529 | } | |
530 | ||
531 | static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
532 | { | |
533 | u32 *iopgd = iopgd_offset(obj, da); | |
534 | u32 *iopte = iopte_alloc(obj, iopgd, da); | |
535 | ||
536 | if (IS_ERR(iopte)) | |
537 | return PTR_ERR(iopte); | |
538 | ||
539 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; | |
540 | flush_iopte_range(iopte, iopte); | |
541 | ||
542 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", | |
543 | __func__, da, pa, iopte, *iopte); | |
544 | ||
545 | return 0; | |
546 | } | |
547 | ||
548 | static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
549 | { | |
550 | u32 *iopgd = iopgd_offset(obj, da); | |
551 | u32 *iopte = iopte_alloc(obj, iopgd, da); | |
552 | int i; | |
553 | ||
554 | if (IS_ERR(iopte)) | |
555 | return PTR_ERR(iopte); | |
556 | ||
557 | for (i = 0; i < 16; i++) | |
558 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; | |
559 | flush_iopte_range(iopte, iopte + 15); | |
560 | return 0; | |
561 | } | |
562 | ||
563 | static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) | |
564 | { | |
565 | int (*fn)(struct iommu *, u32, u32, u32); | |
566 | u32 prot; | |
567 | int err; | |
568 | ||
569 | if (!obj || !e) | |
570 | return -EINVAL; | |
571 | ||
572 | switch (e->pgsz) { | |
573 | case MMU_CAM_PGSZ_16M: | |
574 | fn = iopgd_alloc_super; | |
575 | break; | |
576 | case MMU_CAM_PGSZ_1M: | |
577 | fn = iopgd_alloc_section; | |
578 | break; | |
579 | case MMU_CAM_PGSZ_64K: | |
580 | fn = iopte_alloc_large; | |
581 | break; | |
582 | case MMU_CAM_PGSZ_4K: | |
583 | fn = iopte_alloc_page; | |
584 | break; | |
585 | default: | |
586 | fn = NULL; | |
587 | BUG(); | |
588 | break; | |
589 | } | |
590 | ||
591 | prot = get_iopte_attr(e); | |
592 | ||
593 | spin_lock(&obj->page_table_lock); | |
594 | err = fn(obj, e->da, e->pa, prot); | |
595 | spin_unlock(&obj->page_table_lock); | |
596 | ||
597 | return err; | |
598 | } | |
599 | ||
600 | /** | |
601 | * iopgtable_store_entry - Make an iommu pte entry | |
602 | * @obj: target iommu | |
603 | * @e: an iommu tlb entry info | |
604 | **/ | |
605 | int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) | |
606 | { | |
607 | int err; | |
608 | ||
609 | flush_iotlb_page(obj, e->da); | |
610 | err = iopgtable_store_entry_core(obj, e); | |
611 | #ifdef PREFETCH_IOTLB | |
612 | if (!err) | |
613 | load_iotlb_entry(obj, e); | |
614 | #endif | |
615 | return err; | |
616 | } | |
617 | EXPORT_SYMBOL_GPL(iopgtable_store_entry); | |
618 | ||
619 | /** | |
620 | * iopgtable_lookup_entry - Lookup an iommu pte entry | |
621 | * @obj: target iommu | |
622 | * @da: iommu device virtual address | |
623 | * @ppgd: iommu pgd entry pointer to be returned | |
624 | * @ppte: iommu pte entry pointer to be returned | |
625 | **/ | |
626 | void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte) | |
627 | { | |
628 | u32 *iopgd, *iopte = NULL; | |
629 | ||
630 | iopgd = iopgd_offset(obj, da); | |
631 | if (!*iopgd) | |
632 | goto out; | |
633 | ||
634 | if (*iopgd & IOPGD_TABLE) | |
635 | iopte = iopte_offset(iopgd, da); | |
636 | out: | |
637 | *ppgd = iopgd; | |
638 | *ppte = iopte; | |
639 | } | |
640 | EXPORT_SYMBOL_GPL(iopgtable_lookup_entry); | |
641 | ||
642 | static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) | |
643 | { | |
644 | size_t bytes; | |
645 | u32 *iopgd = iopgd_offset(obj, da); | |
646 | int nent = 1; | |
647 | ||
648 | if (!*iopgd) | |
649 | return 0; | |
650 | ||
651 | if (*iopgd & IOPGD_TABLE) { | |
652 | int i; | |
653 | u32 *iopte = iopte_offset(iopgd, da); | |
654 | ||
655 | bytes = IOPTE_SIZE; | |
656 | if (*iopte & IOPTE_LARGE) { | |
657 | nent *= 16; | |
658 | /* rewind to the 1st entry */ | |
c127c7dc | 659 | iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); |
a9dcad5e HD |
660 | } |
661 | bytes *= nent; | |
662 | memset(iopte, 0, nent * sizeof(*iopte)); | |
663 | flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); | |
664 | ||
665 | /* | |
666 | * do table walk to check if this table is necessary or not | |
667 | */ | |
668 | iopte = iopte_offset(iopgd, 0); | |
669 | for (i = 0; i < PTRS_PER_IOPTE; i++) | |
670 | if (iopte[i]) | |
671 | goto out; | |
672 | ||
673 | iopte_free(iopte); | |
674 | nent = 1; /* for the next L1 entry */ | |
675 | } else { | |
676 | bytes = IOPGD_SIZE; | |
dcc730dc | 677 | if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { |
a9dcad5e HD |
678 | nent *= 16; |
679 | /* rewind to the 1st entry */ | |
8d33ea58 | 680 | iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); |
a9dcad5e HD |
681 | } |
682 | bytes *= nent; | |
683 | } | |
684 | memset(iopgd, 0, nent * sizeof(*iopgd)); | |
685 | flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); | |
686 | out: | |
687 | return bytes; | |
688 | } | |
689 | ||
690 | /** | |
691 | * iopgtable_clear_entry - Remove an iommu pte entry | |
692 | * @obj: target iommu | |
693 | * @da: iommu device virtual address | |
694 | **/ | |
695 | size_t iopgtable_clear_entry(struct iommu *obj, u32 da) | |
696 | { | |
697 | size_t bytes; | |
698 | ||
699 | spin_lock(&obj->page_table_lock); | |
700 | ||
701 | bytes = iopgtable_clear_entry_core(obj, da); | |
702 | flush_iotlb_page(obj, da); | |
703 | ||
704 | spin_unlock(&obj->page_table_lock); | |
705 | ||
706 | return bytes; | |
707 | } | |
708 | EXPORT_SYMBOL_GPL(iopgtable_clear_entry); | |
709 | ||
710 | static void iopgtable_clear_entry_all(struct iommu *obj) | |
711 | { | |
712 | int i; | |
713 | ||
714 | spin_lock(&obj->page_table_lock); | |
715 | ||
716 | for (i = 0; i < PTRS_PER_IOPGD; i++) { | |
717 | u32 da; | |
718 | u32 *iopgd; | |
719 | ||
720 | da = i << IOPGD_SHIFT; | |
721 | iopgd = iopgd_offset(obj, da); | |
722 | ||
723 | if (!*iopgd) | |
724 | continue; | |
725 | ||
726 | if (*iopgd & IOPGD_TABLE) | |
727 | iopte_free(iopte_offset(iopgd, 0)); | |
728 | ||
729 | *iopgd = 0; | |
730 | flush_iopgd_range(iopgd, iopgd); | |
731 | } | |
732 | ||
733 | flush_iotlb_all(obj); | |
734 | ||
735 | spin_unlock(&obj->page_table_lock); | |
736 | } | |
737 | ||
738 | /* | |
739 | * Device IOMMU generic operations | |
740 | */ | |
741 | static irqreturn_t iommu_fault_handler(int irq, void *data) | |
742 | { | |
743 | u32 stat, da; | |
744 | u32 *iopgd, *iopte; | |
745 | int err = -EIO; | |
746 | struct iommu *obj = data; | |
747 | ||
748 | if (!obj->refcount) | |
749 | return IRQ_NONE; | |
750 | ||
751 | /* Dynamic loading TLB or PTE */ | |
752 | if (obj->isr) | |
753 | err = obj->isr(obj); | |
754 | ||
755 | if (!err) | |
756 | return IRQ_HANDLED; | |
757 | ||
758 | clk_enable(obj->clk); | |
759 | stat = iommu_report_fault(obj, &da); | |
760 | clk_disable(obj->clk); | |
761 | if (!stat) | |
762 | return IRQ_HANDLED; | |
763 | ||
764 | iopgd = iopgd_offset(obj, da); | |
765 | ||
766 | if (!(*iopgd & IOPGD_TABLE)) { | |
767 | dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__, | |
768 | da, iopgd, *iopgd); | |
769 | return IRQ_NONE; | |
770 | } | |
771 | ||
772 | iopte = iopte_offset(iopgd, da); | |
773 | ||
774 | dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", | |
775 | __func__, da, iopgd, *iopgd, iopte, *iopte); | |
776 | ||
777 | return IRQ_NONE; | |
778 | } | |
779 | ||
780 | static int device_match_by_alias(struct device *dev, void *data) | |
781 | { | |
782 | struct iommu *obj = to_iommu(dev); | |
783 | const char *name = data; | |
784 | ||
785 | pr_debug("%s: %s %s\n", __func__, obj->name, name); | |
786 | ||
787 | return strcmp(obj->name, name) == 0; | |
788 | } | |
789 | ||
790 | /** | |
791 | * iommu_get - Get iommu handler | |
792 | * @name: target iommu name | |
793 | **/ | |
794 | struct iommu *iommu_get(const char *name) | |
795 | { | |
796 | int err = -ENOMEM; | |
797 | struct device *dev; | |
798 | struct iommu *obj; | |
799 | ||
800 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, | |
801 | device_match_by_alias); | |
802 | if (!dev) | |
803 | return ERR_PTR(-ENODEV); | |
804 | ||
805 | obj = to_iommu(dev); | |
806 | ||
807 | mutex_lock(&obj->iommu_lock); | |
808 | ||
809 | if (obj->refcount++ == 0) { | |
810 | err = iommu_enable(obj); | |
811 | if (err) | |
812 | goto err_enable; | |
813 | flush_iotlb_all(obj); | |
814 | } | |
815 | ||
816 | if (!try_module_get(obj->owner)) | |
817 | goto err_module; | |
818 | ||
819 | mutex_unlock(&obj->iommu_lock); | |
820 | ||
821 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | |
822 | return obj; | |
823 | ||
824 | err_module: | |
825 | if (obj->refcount == 1) | |
826 | iommu_disable(obj); | |
827 | err_enable: | |
828 | obj->refcount--; | |
829 | mutex_unlock(&obj->iommu_lock); | |
830 | return ERR_PTR(err); | |
831 | } | |
832 | EXPORT_SYMBOL_GPL(iommu_get); | |
833 | ||
834 | /** | |
835 | * iommu_put - Put back iommu handler | |
836 | * @obj: target iommu | |
837 | **/ | |
838 | void iommu_put(struct iommu *obj) | |
839 | { | |
acf9d467 | 840 | if (!obj || IS_ERR(obj)) |
a9dcad5e HD |
841 | return; |
842 | ||
843 | mutex_lock(&obj->iommu_lock); | |
844 | ||
845 | if (--obj->refcount == 0) | |
846 | iommu_disable(obj); | |
847 | ||
848 | module_put(obj->owner); | |
849 | ||
850 | mutex_unlock(&obj->iommu_lock); | |
851 | ||
852 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | |
853 | } | |
854 | EXPORT_SYMBOL_GPL(iommu_put); | |
855 | ||
856 | /* | |
857 | * OMAP Device MMU(IOMMU) detection | |
858 | */ | |
859 | static int __devinit omap_iommu_probe(struct platform_device *pdev) | |
860 | { | |
861 | int err = -ENODEV; | |
862 | void *p; | |
863 | int irq; | |
864 | struct iommu *obj; | |
865 | struct resource *res; | |
866 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | |
867 | ||
868 | if (pdev->num_resources != 2) | |
869 | return -EINVAL; | |
870 | ||
871 | obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); | |
872 | if (!obj) | |
873 | return -ENOMEM; | |
874 | ||
875 | obj->clk = clk_get(&pdev->dev, pdata->clk_name); | |
876 | if (IS_ERR(obj->clk)) | |
877 | goto err_clk; | |
878 | ||
879 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | |
880 | obj->name = pdata->name; | |
881 | obj->dev = &pdev->dev; | |
882 | obj->ctx = (void *)obj + sizeof(*obj); | |
883 | ||
884 | mutex_init(&obj->iommu_lock); | |
885 | mutex_init(&obj->mmap_lock); | |
886 | spin_lock_init(&obj->page_table_lock); | |
887 | INIT_LIST_HEAD(&obj->mmap); | |
888 | ||
889 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
890 | if (!res) { | |
891 | err = -ENODEV; | |
892 | goto err_mem; | |
893 | } | |
894 | obj->regbase = ioremap(res->start, resource_size(res)); | |
895 | if (!obj->regbase) { | |
896 | err = -ENOMEM; | |
897 | goto err_mem; | |
898 | } | |
899 | ||
900 | res = request_mem_region(res->start, resource_size(res), | |
901 | dev_name(&pdev->dev)); | |
902 | if (!res) { | |
903 | err = -EIO; | |
904 | goto err_mem; | |
905 | } | |
906 | ||
907 | irq = platform_get_irq(pdev, 0); | |
908 | if (irq < 0) { | |
909 | err = -ENODEV; | |
910 | goto err_irq; | |
911 | } | |
912 | err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, | |
913 | dev_name(&pdev->dev), obj); | |
914 | if (err < 0) | |
915 | goto err_irq; | |
916 | platform_set_drvdata(pdev, obj); | |
917 | ||
918 | p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE)); | |
919 | if (!p) { | |
920 | err = -ENOMEM; | |
921 | goto err_pgd; | |
922 | } | |
923 | memset(p, 0, IOPGD_TABLE_SIZE); | |
924 | clean_dcache_area(p, IOPGD_TABLE_SIZE); | |
925 | obj->iopgd = p; | |
926 | ||
927 | BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE)); | |
928 | ||
929 | dev_info(&pdev->dev, "%s registered\n", obj->name); | |
930 | return 0; | |
931 | ||
932 | err_pgd: | |
933 | free_irq(irq, obj); | |
934 | err_irq: | |
935 | release_mem_region(res->start, resource_size(res)); | |
936 | iounmap(obj->regbase); | |
937 | err_mem: | |
938 | clk_put(obj->clk); | |
939 | err_clk: | |
940 | kfree(obj); | |
941 | return err; | |
942 | } | |
943 | ||
944 | static int __devexit omap_iommu_remove(struct platform_device *pdev) | |
945 | { | |
946 | int irq; | |
947 | struct resource *res; | |
948 | struct iommu *obj = platform_get_drvdata(pdev); | |
949 | ||
950 | platform_set_drvdata(pdev, NULL); | |
951 | ||
952 | iopgtable_clear_entry_all(obj); | |
953 | free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE)); | |
954 | ||
955 | irq = platform_get_irq(pdev, 0); | |
956 | free_irq(irq, obj); | |
957 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
958 | release_mem_region(res->start, resource_size(res)); | |
959 | iounmap(obj->regbase); | |
960 | ||
961 | clk_put(obj->clk); | |
962 | dev_info(&pdev->dev, "%s removed\n", obj->name); | |
963 | kfree(obj); | |
964 | return 0; | |
965 | } | |
966 | ||
967 | static struct platform_driver omap_iommu_driver = { | |
968 | .probe = omap_iommu_probe, | |
969 | .remove = __devexit_p(omap_iommu_remove), | |
970 | .driver = { | |
971 | .name = "omap-iommu", | |
972 | }, | |
973 | }; | |
974 | ||
975 | static void iopte_cachep_ctor(void *iopte) | |
976 | { | |
977 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); | |
978 | } | |
979 | ||
980 | static int __init omap_iommu_init(void) | |
981 | { | |
982 | struct kmem_cache *p; | |
983 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | |
984 | size_t align = 1 << 10; /* L2 pagetable alignement */ | |
985 | ||
986 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | |
987 | iopte_cachep_ctor); | |
988 | if (!p) | |
989 | return -ENOMEM; | |
990 | iopte_cachep = p; | |
991 | ||
992 | return platform_driver_register(&omap_iommu_driver); | |
993 | } | |
994 | module_init(omap_iommu_init); | |
995 | ||
996 | static void __exit omap_iommu_exit(void) | |
997 | { | |
998 | kmem_cache_destroy(iopte_cachep); | |
999 | ||
1000 | platform_driver_unregister(&omap_iommu_driver); | |
1001 | } | |
1002 | module_exit(omap_iommu_exit); | |
1003 | ||
1004 | MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); | |
1005 | MODULE_ALIAS("platform:omap-iommu"); | |
1006 | MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); | |
1007 | MODULE_LICENSE("GPL v2"); |