2 * omap iommu: tlb and pagetable primitives
4 * Copyright (C) 2008-2010 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/clk.h>
20 #include <linux/platform_device.h>
22 #include <asm/cacheflush.h>
24 #include <plat/iommu.h>
26 #include "iopgtable.h"
28 #define for_each_iotlb_cr(obj, n, __i, cr) \
30 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
33 /* accommodate the difference between omap1 and omap2/3 */
34 static const struct iommu_functions *arch_iommu;
36 static struct platform_driver omap_iommu_driver;
37 static struct kmem_cache *iopte_cachep;
40 * install_iommu_arch - Install archtecure specific iommu functions
41 * @ops: a pointer to architecture specific iommu functions
43 * There are several kind of iommu algorithm(tlb, pagetable) among
44 * omap series. This interface installs such an iommu algorighm.
46 int install_iommu_arch(const struct iommu_functions *ops)
54 EXPORT_SYMBOL_GPL(install_iommu_arch);
57 * uninstall_iommu_arch - Uninstall archtecure specific iommu functions
58 * @ops: a pointer to architecture specific iommu functions
60 * This interface uninstalls the iommu algorighm installed previously.
62 void uninstall_iommu_arch(const struct iommu_functions *ops)
64 if (arch_iommu != ops)
65 pr_err("%s: not your arch\n", __func__);
69 EXPORT_SYMBOL_GPL(uninstall_iommu_arch);
72 * iommu_save_ctx - Save registers for pm off-mode support
75 void iommu_save_ctx(struct iommu *obj)
77 arch_iommu->save_ctx(obj);
79 EXPORT_SYMBOL_GPL(iommu_save_ctx);
82 * iommu_restore_ctx - Restore registers for pm off-mode support
85 void iommu_restore_ctx(struct iommu *obj)
87 arch_iommu->restore_ctx(obj);
89 EXPORT_SYMBOL_GPL(iommu_restore_ctx);
92 * iommu_arch_version - Return running iommu arch version
94 u32 iommu_arch_version(void)
96 return arch_iommu->version;
98 EXPORT_SYMBOL_GPL(iommu_arch_version);
100 static int iommu_enable(struct iommu *obj)
107 clk_enable(obj->clk);
109 err = arch_iommu->enable(obj);
111 clk_disable(obj->clk);
115 static void iommu_disable(struct iommu *obj)
120 clk_enable(obj->clk);
122 arch_iommu->disable(obj);
124 clk_disable(obj->clk);
130 void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
134 arch_iommu->cr_to_e(cr, e);
136 EXPORT_SYMBOL_GPL(iotlb_cr_to_e);
138 static inline int iotlb_cr_valid(struct cr_regs *cr)
143 return arch_iommu->cr_valid(cr);
146 static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj,
147 struct iotlb_entry *e)
152 return arch_iommu->alloc_cr(obj, e);
155 u32 iotlb_cr_to_virt(struct cr_regs *cr)
157 return arch_iommu->cr_to_virt(cr);
159 EXPORT_SYMBOL_GPL(iotlb_cr_to_virt);
161 static u32 get_iopte_attr(struct iotlb_entry *e)
163 return arch_iommu->get_pte_attr(e);
166 static u32 iommu_report_fault(struct iommu *obj, u32 *da)
168 return arch_iommu->fault_isr(obj, da);
171 static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
175 val = iommu_read_reg(obj, MMU_LOCK);
177 l->base = MMU_LOCK_BASE(val);
178 l->vict = MMU_LOCK_VICT(val);
182 static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
186 val = (l->base << MMU_LOCK_BASE_SHIFT);
187 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
189 iommu_write_reg(obj, val, MMU_LOCK);
192 static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr)
194 arch_iommu->tlb_read_cr(obj, cr);
197 static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr)
199 arch_iommu->tlb_load_cr(obj, cr);
201 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
202 iommu_write_reg(obj, 1, MMU_LD_TLB);
206 * iotlb_dump_cr - Dump an iommu tlb entry into buf
208 * @cr: contents of cam and ram register
209 * @buf: output buffer
211 static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr,
216 return arch_iommu->dump_cr(obj, cr, buf);
219 /* only used in iotlb iteration for-loop */
220 static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n)
225 iotlb_lock_get(obj, &l);
227 iotlb_lock_set(obj, &l);
228 iotlb_read_cr(obj, &cr);
234 * load_iotlb_entry - Set an iommu tlb entry
236 * @e: an iommu tlb entry info
238 int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
244 if (!obj || !obj->nr_tlb_entries || !e)
247 clk_enable(obj->clk);
249 iotlb_lock_get(obj, &l);
250 if (l.base == obj->nr_tlb_entries) {
251 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
259 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
260 if (!iotlb_cr_valid(&tmp))
263 if (i == obj->nr_tlb_entries) {
264 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
269 iotlb_lock_get(obj, &l);
272 iotlb_lock_set(obj, &l);
275 cr = iotlb_alloc_cr(obj, e);
277 clk_disable(obj->clk);
281 iotlb_load_cr(obj, cr);
286 /* increment victim for next tlb load */
287 if (++l.vict == obj->nr_tlb_entries)
289 iotlb_lock_set(obj, &l);
291 clk_disable(obj->clk);
294 EXPORT_SYMBOL_GPL(load_iotlb_entry);
297 * flush_iotlb_page - Clear an iommu tlb entry
299 * @da: iommu device virtual address
301 * Clear an iommu tlb entry which includes 'da' address.
303 void flush_iotlb_page(struct iommu *obj, u32 da)
308 clk_enable(obj->clk);
310 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
314 if (!iotlb_cr_valid(&cr))
317 start = iotlb_cr_to_virt(&cr);
318 bytes = iopgsz_to_bytes(cr.cam & 3);
320 if ((start <= da) && (da < start + bytes)) {
321 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
322 __func__, start, da, bytes);
323 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
326 clk_disable(obj->clk);
328 if (i == obj->nr_tlb_entries)
329 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
331 EXPORT_SYMBOL_GPL(flush_iotlb_page);
334 * flush_iotlb_range - Clear an iommu tlb entries
336 * @start: iommu device virtual address(start)
337 * @end: iommu device virtual address(end)
339 * Clear an iommu tlb entry which includes 'da' address.
341 void flush_iotlb_range(struct iommu *obj, u32 start, u32 end)
346 flush_iotlb_page(obj, da);
347 /* FIXME: Optimize for multiple page size */
351 EXPORT_SYMBOL_GPL(flush_iotlb_range);
354 * flush_iotlb_all - Clear all iommu tlb entries
357 void flush_iotlb_all(struct iommu *obj)
361 clk_enable(obj->clk);
365 iotlb_lock_set(obj, &l);
367 iommu_write_reg(obj, 1, MMU_GFLUSH);
369 clk_disable(obj->clk);
371 EXPORT_SYMBOL_GPL(flush_iotlb_all);
373 #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
375 ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
380 clk_enable(obj->clk);
382 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
384 clk_disable(obj->clk);
388 EXPORT_SYMBOL_GPL(iommu_dump_ctx);
390 static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
393 struct iotlb_lock saved;
395 struct cr_regs *p = crs;
397 clk_enable(obj->clk);
398 iotlb_lock_get(obj, &saved);
400 for_each_iotlb_cr(obj, num, i, tmp) {
401 if (!iotlb_cr_valid(&tmp))
406 iotlb_lock_set(obj, &saved);
407 clk_disable(obj->clk);
413 * dump_tlb_entries - dump cr arrays to given buffer
415 * @buf: output buffer
417 size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes)
423 num = bytes / sizeof(*cr);
424 num = min(obj->nr_tlb_entries, num);
426 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
430 num = __dump_tlb_entries(obj, cr, num);
431 for (i = 0; i < num; i++)
432 p += iotlb_dump_cr(obj, cr + i, p);
437 EXPORT_SYMBOL_GPL(dump_tlb_entries);
439 int foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
441 return driver_for_each_device(&omap_iommu_driver.driver,
444 EXPORT_SYMBOL_GPL(foreach_iommu_device);
446 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
449 * H/W pagetable operations
451 static void flush_iopgd_range(u32 *first, u32 *last)
453 /* FIXME: L2 cache should be taken care of if it exists */
455 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
457 first += L1_CACHE_BYTES / sizeof(*first);
458 } while (first <= last);
461 static void flush_iopte_range(u32 *first, u32 *last)
463 /* FIXME: L2 cache should be taken care of if it exists */
465 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
467 first += L1_CACHE_BYTES / sizeof(*first);
468 } while (first <= last);
471 static void iopte_free(u32 *iopte)
473 /* Note: freed iopte's must be clean ready for re-use */
474 kmem_cache_free(iopte_cachep, iopte);
477 static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
481 /* a table has already existed */
486 * do the allocation outside the page table lock
488 spin_unlock(&obj->page_table_lock);
489 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
490 spin_lock(&obj->page_table_lock);
494 return ERR_PTR(-ENOMEM);
496 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
497 flush_iopgd_range(iopgd, iopgd);
499 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
501 /* We raced, free the reduniovant table */
506 iopte = iopte_offset(iopgd, da);
509 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
510 __func__, da, iopgd, *iopgd, iopte, *iopte);
515 static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
517 u32 *iopgd = iopgd_offset(obj, da);
519 if ((da | pa) & ~IOSECTION_MASK) {
520 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
521 __func__, da, pa, IOSECTION_SIZE);
525 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
526 flush_iopgd_range(iopgd, iopgd);
530 static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
532 u32 *iopgd = iopgd_offset(obj, da);
535 if ((da | pa) & ~IOSUPER_MASK) {
536 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
537 __func__, da, pa, IOSUPER_SIZE);
541 for (i = 0; i < 16; i++)
542 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
543 flush_iopgd_range(iopgd, iopgd + 15);
547 static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
549 u32 *iopgd = iopgd_offset(obj, da);
550 u32 *iopte = iopte_alloc(obj, iopgd, da);
553 return PTR_ERR(iopte);
555 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
556 flush_iopte_range(iopte, iopte);
558 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
559 __func__, da, pa, iopte, *iopte);
564 static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
566 u32 *iopgd = iopgd_offset(obj, da);
567 u32 *iopte = iopte_alloc(obj, iopgd, da);
570 if ((da | pa) & ~IOLARGE_MASK) {
571 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
572 __func__, da, pa, IOLARGE_SIZE);
577 return PTR_ERR(iopte);
579 for (i = 0; i < 16; i++)
580 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
581 flush_iopte_range(iopte, iopte + 15);
585 static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e)
587 int (*fn)(struct iommu *, u32, u32, u32);
595 case MMU_CAM_PGSZ_16M:
596 fn = iopgd_alloc_super;
598 case MMU_CAM_PGSZ_1M:
599 fn = iopgd_alloc_section;
601 case MMU_CAM_PGSZ_64K:
602 fn = iopte_alloc_large;
604 case MMU_CAM_PGSZ_4K:
605 fn = iopte_alloc_page;
613 prot = get_iopte_attr(e);
615 spin_lock(&obj->page_table_lock);
616 err = fn(obj, e->da, e->pa, prot);
617 spin_unlock(&obj->page_table_lock);
623 * iopgtable_store_entry - Make an iommu pte entry
625 * @e: an iommu tlb entry info
627 int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e)
631 flush_iotlb_page(obj, e->da);
632 err = iopgtable_store_entry_core(obj, e);
633 #ifdef PREFETCH_IOTLB
635 load_iotlb_entry(obj, e);
639 EXPORT_SYMBOL_GPL(iopgtable_store_entry);
642 * iopgtable_lookup_entry - Lookup an iommu pte entry
644 * @da: iommu device virtual address
645 * @ppgd: iommu pgd entry pointer to be returned
646 * @ppte: iommu pte entry pointer to be returned
648 void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
650 u32 *iopgd, *iopte = NULL;
652 iopgd = iopgd_offset(obj, da);
656 if (*iopgd & IOPGD_TABLE)
657 iopte = iopte_offset(iopgd, da);
662 EXPORT_SYMBOL_GPL(iopgtable_lookup_entry);
664 static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da)
667 u32 *iopgd = iopgd_offset(obj, da);
673 if (*iopgd & IOPGD_TABLE) {
675 u32 *iopte = iopte_offset(iopgd, da);
678 if (*iopte & IOPTE_LARGE) {
680 /* rewind to the 1st entry */
681 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
684 memset(iopte, 0, nent * sizeof(*iopte));
685 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
688 * do table walk to check if this table is necessary or not
690 iopte = iopte_offset(iopgd, 0);
691 for (i = 0; i < PTRS_PER_IOPTE; i++)
696 nent = 1; /* for the next L1 entry */
699 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
701 /* rewind to the 1st entry */
702 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
706 memset(iopgd, 0, nent * sizeof(*iopgd));
707 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
713 * iopgtable_clear_entry - Remove an iommu pte entry
715 * @da: iommu device virtual address
717 size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
721 spin_lock(&obj->page_table_lock);
723 bytes = iopgtable_clear_entry_core(obj, da);
724 flush_iotlb_page(obj, da);
726 spin_unlock(&obj->page_table_lock);
730 EXPORT_SYMBOL_GPL(iopgtable_clear_entry);
732 static void iopgtable_clear_entry_all(struct iommu *obj)
736 spin_lock(&obj->page_table_lock);
738 for (i = 0; i < PTRS_PER_IOPGD; i++) {
742 da = i << IOPGD_SHIFT;
743 iopgd = iopgd_offset(obj, da);
748 if (*iopgd & IOPGD_TABLE)
749 iopte_free(iopte_offset(iopgd, 0));
752 flush_iopgd_range(iopgd, iopgd);
755 flush_iotlb_all(obj);
757 spin_unlock(&obj->page_table_lock);
761 * Device IOMMU generic operations
763 static irqreturn_t iommu_fault_handler(int irq, void *data)
768 struct iommu *obj = data;
773 /* Dynamic loading TLB or PTE */
780 clk_enable(obj->clk);
781 stat = iommu_report_fault(obj, &da);
782 clk_disable(obj->clk);
786 iopgd = iopgd_offset(obj, da);
788 if (!(*iopgd & IOPGD_TABLE)) {
789 dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__,
794 iopte = iopte_offset(iopgd, da);
796 dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
797 __func__, da, iopgd, *iopgd, iopte, *iopte);
802 static int device_match_by_alias(struct device *dev, void *data)
804 struct iommu *obj = to_iommu(dev);
805 const char *name = data;
807 pr_debug("%s: %s %s\n", __func__, obj->name, name);
809 return strcmp(obj->name, name) == 0;
813 * iommu_get - Get iommu handler
814 * @name: target iommu name
816 struct iommu *iommu_get(const char *name)
822 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
823 device_match_by_alias);
825 return ERR_PTR(-ENODEV);
829 mutex_lock(&obj->iommu_lock);
831 if (obj->refcount++ == 0) {
832 err = iommu_enable(obj);
835 flush_iotlb_all(obj);
838 if (!try_module_get(obj->owner))
841 mutex_unlock(&obj->iommu_lock);
843 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
847 if (obj->refcount == 1)
851 mutex_unlock(&obj->iommu_lock);
854 EXPORT_SYMBOL_GPL(iommu_get);
857 * iommu_put - Put back iommu handler
860 void iommu_put(struct iommu *obj)
862 if (!obj || IS_ERR(obj))
865 mutex_lock(&obj->iommu_lock);
867 if (--obj->refcount == 0)
870 module_put(obj->owner);
872 mutex_unlock(&obj->iommu_lock);
874 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
876 EXPORT_SYMBOL_GPL(iommu_put);
879 * OMAP Device MMU(IOMMU) detection
881 static int __devinit omap_iommu_probe(struct platform_device *pdev)
887 struct resource *res;
888 struct iommu_platform_data *pdata = pdev->dev.platform_data;
890 if (pdev->num_resources != 2)
893 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
897 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
898 if (IS_ERR(obj->clk))
901 obj->nr_tlb_entries = pdata->nr_tlb_entries;
902 obj->name = pdata->name;
903 obj->dev = &pdev->dev;
904 obj->ctx = (void *)obj + sizeof(*obj);
906 mutex_init(&obj->iommu_lock);
907 mutex_init(&obj->mmap_lock);
908 spin_lock_init(&obj->page_table_lock);
909 INIT_LIST_HEAD(&obj->mmap);
911 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
916 obj->regbase = ioremap(res->start, resource_size(res));
922 res = request_mem_region(res->start, resource_size(res),
923 dev_name(&pdev->dev));
929 irq = platform_get_irq(pdev, 0);
934 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
935 dev_name(&pdev->dev), obj);
938 platform_set_drvdata(pdev, obj);
940 p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE));
945 memset(p, 0, IOPGD_TABLE_SIZE);
946 clean_dcache_area(p, IOPGD_TABLE_SIZE);
949 BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE));
951 dev_info(&pdev->dev, "%s registered\n", obj->name);
957 release_mem_region(res->start, resource_size(res));
958 iounmap(obj->regbase);
966 static int __devexit omap_iommu_remove(struct platform_device *pdev)
969 struct resource *res;
970 struct iommu *obj = platform_get_drvdata(pdev);
972 platform_set_drvdata(pdev, NULL);
974 iopgtable_clear_entry_all(obj);
975 free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
977 irq = platform_get_irq(pdev, 0);
979 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
980 release_mem_region(res->start, resource_size(res));
981 iounmap(obj->regbase);
984 dev_info(&pdev->dev, "%s removed\n", obj->name);
989 static struct platform_driver omap_iommu_driver = {
990 .probe = omap_iommu_probe,
991 .remove = __devexit_p(omap_iommu_remove),
993 .name = "omap-iommu",
997 static void iopte_cachep_ctor(void *iopte)
999 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1002 static int __init omap_iommu_init(void)
1004 struct kmem_cache *p;
1005 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1006 size_t align = 1 << 10; /* L2 pagetable alignement */
1008 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1014 return platform_driver_register(&omap_iommu_driver);
1016 module_init(omap_iommu_init);
1018 static void __exit omap_iommu_exit(void)
1020 kmem_cache_destroy(iopte_cachep);
1022 platform_driver_unregister(&omap_iommu_driver);
1024 module_exit(omap_iommu_exit);
1026 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1027 MODULE_ALIAS("platform:omap-iommu");
1028 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1029 MODULE_LICENSE("GPL v2");