2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include "etnaviv_drv.h"
18 #include "etnaviv_gem.h"
19 #include "etnaviv_gpu.h"
20 #include "etnaviv_mmu.h"
22 static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
23 unsigned long iova, int flags, void *arg)
25 DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
29 int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
30 struct sg_table *sgt, unsigned len, int prot)
32 struct iommu_domain *domain = iommu->domain;
33 struct scatterlist *sg;
34 unsigned int da = iova;
41 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
42 u32 pa = sg_dma_address(sg) - sg->offset;
43 size_t bytes = sg_dma_len(sg) + sg->offset;
45 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
47 ret = iommu_map(domain, da, pa, bytes, prot);
59 for_each_sg(sgt->sgl, sg, i, j) {
60 size_t bytes = sg_dma_len(sg) + sg->offset;
62 iommu_unmap(domain, da, bytes);
68 int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
69 struct sg_table *sgt, unsigned len)
71 struct iommu_domain *domain = iommu->domain;
72 struct scatterlist *sg;
73 unsigned int da = iova;
76 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
77 size_t bytes = sg_dma_len(sg) + sg->offset;
80 unmapped = iommu_unmap(domain, da, bytes);
84 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
86 BUG_ON(!PAGE_ALIGNED(bytes));
94 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
95 struct etnaviv_vram_mapping *mapping)
97 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
99 etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
100 etnaviv_obj->sgt, etnaviv_obj->base.size);
101 drm_mm_remove_node(&mapping->vram_node);
104 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
105 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
106 struct etnaviv_vram_mapping *mapping)
108 struct etnaviv_vram_mapping *free = NULL;
109 struct sg_table *sgt = etnaviv_obj->sgt;
110 struct drm_mm_node *node;
113 lockdep_assert_held(&etnaviv_obj->lock);
115 mutex_lock(&mmu->lock);
117 /* v1 MMU can optimize single entry (contiguous) scatterlists */
118 if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
121 iova = sg_dma_address(sgt->sgl) - memory_base;
122 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
123 mapping->iova = iova;
124 list_add_tail(&mapping->mmu_node, &mmu->mappings);
125 mutex_unlock(&mmu->lock);
130 node = &mapping->vram_node;
132 struct etnaviv_vram_mapping *m, *n;
133 struct list_head list;
136 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
137 etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
138 DRM_MM_SEARCH_DEFAULT);
144 * If we did not search from the start of the MMU region,
145 * try again in case there are free slots.
147 if (mmu->last_iova) {
149 mmu->need_flush = true;
153 /* Try to retire some entries */
154 drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
157 INIT_LIST_HEAD(&list);
158 list_for_each_entry(free, &mmu->mappings, mmu_node) {
159 /* If this vram node has not been used, skip this. */
160 if (!free->vram_node.mm)
164 * If the iova is pinned, then it's in-use,
165 * so we must keep its mapping.
170 list_add(&free->scan_node, &list);
171 if (drm_mm_scan_add_block(&free->vram_node)) {
178 /* Nothing found, clean up and fail */
179 list_for_each_entry_safe(m, n, &list, scan_node)
180 BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
185 * drm_mm does not allow any other operations while
186 * scanning, so we have to remove all blocks first.
187 * If drm_mm_scan_remove_block() returns false, we
188 * can leave the block pinned.
190 list_for_each_entry_safe(m, n, &list, scan_node)
191 if (!drm_mm_scan_remove_block(&m->vram_node))
192 list_del_init(&m->scan_node);
195 * Unmap the blocks which need to be reaped from the MMU.
196 * Clear the mmu pointer to prevent the get_iova finding
199 list_for_each_entry_safe(m, n, &list, scan_node) {
200 etnaviv_iommu_remove_mapping(mmu, m);
202 list_del_init(&m->mmu_node);
203 list_del_init(&m->scan_node);
207 * We removed enough mappings so that the new allocation will
208 * succeed. Ensure that the MMU will be flushed before the
209 * associated commit requesting this mapping, and retry the
210 * allocation one more time.
212 mmu->need_flush = true;
216 mutex_unlock(&mmu->lock);
220 mmu->last_iova = node->start + etnaviv_obj->base.size;
221 mapping->iova = node->start;
222 ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
223 IOMMU_READ | IOMMU_WRITE);
226 drm_mm_remove_node(node);
227 mutex_unlock(&mmu->lock);
231 list_add_tail(&mapping->mmu_node, &mmu->mappings);
232 mutex_unlock(&mmu->lock);
237 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
238 struct etnaviv_vram_mapping *mapping)
240 WARN_ON(mapping->use);
242 mutex_lock(&mmu->lock);
244 /* If the vram node is on the mm, unmap and remove the node */
245 if (mapping->vram_node.mm == &mmu->mm)
246 etnaviv_iommu_remove_mapping(mmu, mapping);
248 list_del(&mapping->mmu_node);
249 mutex_unlock(&mmu->lock);
252 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
254 drm_mm_takedown(&mmu->mm);
255 iommu_domain_free(mmu->domain);
259 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
260 struct iommu_domain *domain, enum etnaviv_iommu_version version)
262 struct etnaviv_iommu *mmu;
264 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
266 return ERR_PTR(-ENOMEM);
268 mmu->domain = domain;
270 mmu->version = version;
271 mutex_init(&mmu->lock);
272 INIT_LIST_HEAD(&mmu->mappings);
274 drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
275 domain->geometry.aperture_end -
276 domain->geometry.aperture_start + 1);
278 iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev);
283 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
285 struct etnaviv_iommu_ops *ops;
287 ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
289 return ops->dump_size(iommu->domain);
292 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
294 struct etnaviv_iommu_ops *ops;
296 ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
298 ops->dump(iommu->domain, buf);