drm/etnaviv: add cmdbuf suballocator
[linux-2.6-block.git] / drivers / gpu / drm / etnaviv / etnaviv_mmu.c
1 /*
2  * Copyright (C) 2015 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include "common.xml.h"
18 #include "etnaviv_cmdbuf.h"
19 #include "etnaviv_drv.h"
20 #include "etnaviv_gem.h"
21 #include "etnaviv_gpu.h"
22 #include "etnaviv_iommu.h"
23 #include "etnaviv_mmu.h"
24
25 static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
26                 unsigned long iova, int flags, void *arg)
27 {
28         DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
29         return 0;
30 }
31
32 int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
33                 struct sg_table *sgt, unsigned len, int prot)
34 {
35         struct iommu_domain *domain = iommu->domain;
36         struct scatterlist *sg;
37         unsigned int da = iova;
38         unsigned int i, j;
39         int ret;
40
41         if (!domain || !sgt)
42                 return -EINVAL;
43
44         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
45                 u32 pa = sg_dma_address(sg) - sg->offset;
46                 size_t bytes = sg_dma_len(sg) + sg->offset;
47
48                 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
49
50                 ret = iommu_map(domain, da, pa, bytes, prot);
51                 if (ret)
52                         goto fail;
53
54                 da += bytes;
55         }
56
57         return 0;
58
59 fail:
60         da = iova;
61
62         for_each_sg(sgt->sgl, sg, i, j) {
63                 size_t bytes = sg_dma_len(sg) + sg->offset;
64
65                 iommu_unmap(domain, da, bytes);
66                 da += bytes;
67         }
68         return ret;
69 }
70
71 int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
72                 struct sg_table *sgt, unsigned len)
73 {
74         struct iommu_domain *domain = iommu->domain;
75         struct scatterlist *sg;
76         unsigned int da = iova;
77         int i;
78
79         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
80                 size_t bytes = sg_dma_len(sg) + sg->offset;
81                 size_t unmapped;
82
83                 unmapped = iommu_unmap(domain, da, bytes);
84                 if (unmapped < bytes)
85                         return unmapped;
86
87                 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
88
89                 BUG_ON(!PAGE_ALIGNED(bytes));
90
91                 da += bytes;
92         }
93
94         return 0;
95 }
96
97 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
98         struct etnaviv_vram_mapping *mapping)
99 {
100         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
101
102         etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
103                             etnaviv_obj->sgt, etnaviv_obj->base.size);
104         drm_mm_remove_node(&mapping->vram_node);
105 }
106
107 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
108                                    struct drm_mm_node *node, size_t size)
109 {
110         struct etnaviv_vram_mapping *free = NULL;
111         int ret;
112
113         lockdep_assert_held(&mmu->lock);
114
115         while (1) {
116                 struct etnaviv_vram_mapping *m, *n;
117                 struct list_head list;
118                 bool found;
119
120                 /*
121                  * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
122                  * drm_mm into giving out a low IOVA after address space
123                  * rollover. This needs a proper fix.
124                  */
125                 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
126                         size, 0, mmu->last_iova, ~0UL,
127                         mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
128
129                 if (ret != -ENOSPC)
130                         break;
131
132                 /*
133                  * If we did not search from the start of the MMU region,
134                  * try again in case there are free slots.
135                  */
136                 if (mmu->last_iova) {
137                         mmu->last_iova = 0;
138                         mmu->need_flush = true;
139                         continue;
140                 }
141
142                 /* Try to retire some entries */
143                 drm_mm_init_scan(&mmu->mm, size, 0, 0);
144
145                 found = 0;
146                 INIT_LIST_HEAD(&list);
147                 list_for_each_entry(free, &mmu->mappings, mmu_node) {
148                         /* If this vram node has not been used, skip this. */
149                         if (!free->vram_node.mm)
150                                 continue;
151
152                         /*
153                          * If the iova is pinned, then it's in-use,
154                          * so we must keep its mapping.
155                          */
156                         if (free->use)
157                                 continue;
158
159                         list_add(&free->scan_node, &list);
160                         if (drm_mm_scan_add_block(&free->vram_node)) {
161                                 found = true;
162                                 break;
163                         }
164                 }
165
166                 if (!found) {
167                         /* Nothing found, clean up and fail */
168                         list_for_each_entry_safe(m, n, &list, scan_node)
169                                 BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
170                         break;
171                 }
172
173                 /*
174                  * drm_mm does not allow any other operations while
175                  * scanning, so we have to remove all blocks first.
176                  * If drm_mm_scan_remove_block() returns false, we
177                  * can leave the block pinned.
178                  */
179                 list_for_each_entry_safe(m, n, &list, scan_node)
180                         if (!drm_mm_scan_remove_block(&m->vram_node))
181                                 list_del_init(&m->scan_node);
182
183                 /*
184                  * Unmap the blocks which need to be reaped from the MMU.
185                  * Clear the mmu pointer to prevent the mapping_get finding
186                  * this mapping.
187                  */
188                 list_for_each_entry_safe(m, n, &list, scan_node) {
189                         etnaviv_iommu_remove_mapping(mmu, m);
190                         m->mmu = NULL;
191                         list_del_init(&m->mmu_node);
192                         list_del_init(&m->scan_node);
193                 }
194
195                 /*
196                  * We removed enough mappings so that the new allocation will
197                  * succeed, retry the allocation one more time.
198                  */
199         }
200
201         return ret;
202 }
203
204 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
205         struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
206         struct etnaviv_vram_mapping *mapping)
207 {
208         struct sg_table *sgt = etnaviv_obj->sgt;
209         struct drm_mm_node *node;
210         int ret;
211
212         lockdep_assert_held(&etnaviv_obj->lock);
213
214         mutex_lock(&mmu->lock);
215
216         /* v1 MMU can optimize single entry (contiguous) scatterlists */
217         if (mmu->version == ETNAVIV_IOMMU_V1 &&
218             sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
219                 u32 iova;
220
221                 iova = sg_dma_address(sgt->sgl) - memory_base;
222                 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
223                         mapping->iova = iova;
224                         list_add_tail(&mapping->mmu_node, &mmu->mappings);
225                         mutex_unlock(&mmu->lock);
226                         return 0;
227                 }
228         }
229
230         node = &mapping->vram_node;
231
232         ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
233         if (ret < 0) {
234                 mutex_unlock(&mmu->lock);
235                 return ret;
236         }
237
238         mmu->last_iova = node->start + etnaviv_obj->base.size;
239         mapping->iova = node->start;
240         ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
241                                 IOMMU_READ | IOMMU_WRITE);
242
243         if (ret < 0) {
244                 drm_mm_remove_node(node);
245                 mutex_unlock(&mmu->lock);
246                 return ret;
247         }
248
249         list_add_tail(&mapping->mmu_node, &mmu->mappings);
250         mmu->need_flush = true;
251         mutex_unlock(&mmu->lock);
252
253         return ret;
254 }
255
256 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
257         struct etnaviv_vram_mapping *mapping)
258 {
259         WARN_ON(mapping->use);
260
261         mutex_lock(&mmu->lock);
262
263         /* If the vram node is on the mm, unmap and remove the node */
264         if (mapping->vram_node.mm == &mmu->mm)
265                 etnaviv_iommu_remove_mapping(mmu, mapping);
266
267         list_del(&mapping->mmu_node);
268         mmu->need_flush = true;
269         mutex_unlock(&mmu->lock);
270 }
271
272 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
273 {
274         drm_mm_takedown(&mmu->mm);
275         iommu_domain_free(mmu->domain);
276         kfree(mmu);
277 }
278
279 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
280 {
281         enum etnaviv_iommu_version version;
282         struct etnaviv_iommu *mmu;
283
284         mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
285         if (!mmu)
286                 return ERR_PTR(-ENOMEM);
287
288         if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
289                 mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
290                 version = ETNAVIV_IOMMU_V1;
291         } else {
292                 mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
293                 version = ETNAVIV_IOMMU_V2;
294         }
295
296         if (!mmu->domain) {
297                 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
298                 kfree(mmu);
299                 return ERR_PTR(-ENOMEM);
300         }
301
302         mmu->gpu = gpu;
303         mmu->version = version;
304         mutex_init(&mmu->lock);
305         INIT_LIST_HEAD(&mmu->mappings);
306
307         drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
308                     mmu->domain->geometry.aperture_end -
309                     mmu->domain->geometry.aperture_start + 1);
310
311         iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
312
313         return mmu;
314 }
315
316 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
317 {
318         if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
319                 etnaviv_iommuv1_restore(gpu);
320         else
321                 etnaviv_iommuv2_restore(gpu);
322 }
323
324 int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
325                                   struct drm_mm_node *vram_node, size_t size,
326                                   u32 *iova)
327 {
328         struct etnaviv_iommu *mmu = gpu->mmu;
329
330         if (mmu->version == ETNAVIV_IOMMU_V1) {
331                 *iova = paddr - gpu->memory_base;
332                 return 0;
333         } else {
334                 int ret;
335
336                 mutex_lock(&mmu->lock);
337                 ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
338                 if (ret < 0) {
339                         mutex_unlock(&mmu->lock);
340                         return ret;
341                 }
342                 ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
343                                 IOMMU_READ);
344                 if (ret < 0) {
345                         drm_mm_remove_node(vram_node);
346                         mutex_unlock(&mmu->lock);
347                         return ret;
348                 }
349                 mmu->last_iova = vram_node->start + size;
350                 gpu->mmu->need_flush = true;
351                 mutex_unlock(&mmu->lock);
352
353                 *iova = (u32)vram_node->start;
354                 return 0;
355         }
356 }
357
358 void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
359                                    struct drm_mm_node *vram_node, size_t size,
360                                    u32 iova)
361 {
362         struct etnaviv_iommu *mmu = gpu->mmu;
363
364         if (mmu->version == ETNAVIV_IOMMU_V2) {
365                 mutex_lock(&mmu->lock);
366                 iommu_unmap(mmu->domain,iova, size);
367                 drm_mm_remove_node(vram_node);
368                 mutex_unlock(&mmu->lock);
369         }
370 }
371 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
372 {
373         struct etnaviv_iommu_ops *ops;
374
375         ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
376
377         return ops->dump_size(iommu->domain);
378 }
379
380 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
381 {
382         struct etnaviv_iommu_ops *ops;
383
384         ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
385
386         ops->dump(iommu->domain, buf);
387 }