2 * IOMMU mmap management and range allocation functions.
3 * Based almost entirely upon the powerpc iommu allocator.
6 #include <linux/export.h>
7 #include <linux/bitmap.h>
9 #include <linux/iommu-helper.h>
10 #include <linux/iommu-common.h>
11 #include <linux/dma-mapping.h>
13 #define IOMMU_LARGE_ALLOC 15
16 * Initialize iommu_pool entries for the iommu_table. `num_entries'
17 * is the number of table entries. If `large_pool' is set to true,
18 * the top 1/4 of the table will be set aside for pool allocations
19 * of more than IOMMU_LARGE_ALLOC pages.
21 extern void iommu_tbl_pool_init(struct iommu_table *iommu,
22 unsigned long num_entries,
24 const struct iommu_tbl_ops *iommu_tbl_ops,
25 bool large_pool, u32 npools)
27 unsigned int start, i;
28 struct iommu_pool *p = &(iommu->large_pool);
31 iommu->nr_pools = IOMMU_NR_POOLS;
33 iommu->nr_pools = npools;
34 BUG_ON(npools > IOMMU_NR_POOLS);
36 iommu->page_table_shift = page_table_shift;
37 iommu->iommu_tbl_ops = iommu_tbl_ops;
40 iommu->flags |= IOMMU_HAS_LARGE_POOL;
43 iommu->poolsize = num_entries/iommu->nr_pools;
45 iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
46 for (i = 0; i < iommu->nr_pools; i++) {
47 spin_lock_init(&(iommu->arena_pool[i].lock));
48 iommu->arena_pool[i].start = start;
49 iommu->arena_pool[i].hint = start;
50 start += iommu->poolsize; /* start for next pool */
51 iommu->arena_pool[i].end = start - 1;
55 /* initialize large_pool */
56 spin_lock_init(&(p->lock));
61 EXPORT_SYMBOL(iommu_tbl_pool_init);
63 unsigned long iommu_tbl_range_alloc(struct device *dev,
64 struct iommu_table *iommu,
66 unsigned long *handle,
67 unsigned int pool_hash)
69 unsigned long n, end, start, limit, boundary_size;
70 struct iommu_pool *arena;
73 unsigned int npools = iommu->nr_pools;
75 bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
76 bool largealloc = (large_pool && npages > IOMMU_LARGE_ALLOC);
80 if (unlikely(npages == 0)) {
81 printk_ratelimited("npages == 0\n");
82 return DMA_ERROR_CODE;
86 arena = &(iommu->large_pool);
87 spin_lock_irqsave(&arena->lock, flags);
88 pool_nr = 0; /* to keep compiler happy */
90 /* pick out pool_nr */
91 pool_nr = pool_hash & (npools - 1);
92 arena = &(iommu->arena_pool[pool_nr]);
94 /* find first available unlocked pool */
95 while (!spin_trylock_irqsave(&(arena->lock), flags)) {
96 pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
97 arena = &(iommu->arena_pool[pool_nr]);
102 if (pass == 0 && handle && *handle &&
103 (*handle >= arena->start) && (*handle < arena->end))
110 /* The case below can happen if we have a small segment appended
111 * to a large, or when the previous alloc was at the very end of
112 * the available space. If so, go back to the beginning and flush.
114 if (start >= limit) {
115 start = arena->start;
116 if (iommu->iommu_tbl_ops->reset != NULL)
117 iommu->iommu_tbl_ops->reset(iommu);
121 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
122 1 << iommu->page_table_shift);
124 boundary_size = ALIGN(1UL << 32, 1 << iommu->page_table_shift);
126 shift = iommu->page_table_map_base >> iommu->page_table_shift;
127 boundary_size = boundary_size >> iommu->page_table_shift;
129 * if the iommu has a non-trivial cookie <-> index mapping, we set
130 * things up so that iommu_is_span_boundary() merely checks if the
131 * (index + npages) < num_tsb_entries
133 if (iommu->iommu_tbl_ops->cookie_to_index != NULL) {
135 boundary_size = iommu->poolsize * iommu->nr_pools;
137 n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
140 if (likely(pass == 0)) {
141 /* First failure, rescan from the beginning. */
142 arena->hint = arena->start;
143 if (iommu->iommu_tbl_ops->reset != NULL)
144 iommu->iommu_tbl_ops->reset(iommu);
147 } else if (!largealloc && pass <= iommu->nr_pools) {
148 spin_unlock(&(arena->lock));
149 pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
150 arena = &(iommu->arena_pool[pool_nr]);
151 while (!spin_trylock(&(arena->lock))) {
152 pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
153 arena = &(iommu->arena_pool[pool_nr]);
155 arena->hint = arena->start;
160 spin_unlock_irqrestore(&(arena->lock), flags);
161 return DMA_ERROR_CODE;
169 /* Update handle for SG allocations */
172 spin_unlock_irqrestore(&(arena->lock), flags);
176 EXPORT_SYMBOL(iommu_tbl_range_alloc);
178 static struct iommu_pool *get_pool(struct iommu_table *tbl,
181 struct iommu_pool *p;
182 unsigned long largepool_start = tbl->large_pool.start;
183 bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
185 /* The large pool is the last pool at the top of the table */
186 if (large_pool && entry >= largepool_start) {
187 p = &tbl->large_pool;
189 unsigned int pool_nr = entry / tbl->poolsize;
191 BUG_ON(pool_nr >= tbl->nr_pools);
192 p = &tbl->arena_pool[pool_nr];
197 void iommu_tbl_range_free(struct iommu_table *iommu, u64 dma_addr,
198 unsigned long npages, bool do_demap, void *demap_arg)
201 struct iommu_pool *pool;
203 unsigned long shift = iommu->page_table_shift;
205 if (iommu->iommu_tbl_ops->cookie_to_index != NULL) {
206 entry = (*iommu->iommu_tbl_ops->cookie_to_index)(dma_addr,
209 entry = (dma_addr - iommu->page_table_map_base) >> shift;
211 pool = get_pool(iommu, entry);
213 spin_lock_irqsave(&(pool->lock), flags);
214 if (do_demap && iommu->iommu_tbl_ops->demap != NULL)
215 (*iommu->iommu_tbl_ops->demap)(demap_arg, entry, npages);
217 bitmap_clear(iommu->map, entry, npages);
218 spin_unlock_irqrestore(&(pool->lock), flags);
220 EXPORT_SYMBOL(iommu_tbl_range_free);