Commit | Line | Data |
---|---|---|
a8c21a54 T |
1 | /* |
2 | * Copyright (C) 2015 Etnaviv Project | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License version 2 as published by | |
6 | * the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | #include "etnaviv_drv.h" | |
18 | #include "etnaviv_gem.h" | |
19 | #include "etnaviv_gpu.h" | |
20 | #include "etnaviv_mmu.h" | |
21 | ||
22 | static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev, | |
23 | unsigned long iova, int flags, void *arg) | |
24 | { | |
25 | DBG("*** fault: iova=%08lx, flags=%d", iova, flags); | |
26 | return 0; | |
27 | } | |
28 | ||
29 | int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, | |
30 | struct sg_table *sgt, unsigned len, int prot) | |
31 | { | |
32 | struct iommu_domain *domain = iommu->domain; | |
33 | struct scatterlist *sg; | |
34 | unsigned int da = iova; | |
35 | unsigned int i, j; | |
36 | int ret; | |
37 | ||
38 | if (!domain || !sgt) | |
39 | return -EINVAL; | |
40 | ||
41 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
42 | u32 pa = sg_dma_address(sg) - sg->offset; | |
43 | size_t bytes = sg_dma_len(sg) + sg->offset; | |
44 | ||
45 | VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); | |
46 | ||
47 | ret = iommu_map(domain, da, pa, bytes, prot); | |
48 | if (ret) | |
49 | goto fail; | |
50 | ||
51 | da += bytes; | |
52 | } | |
53 | ||
54 | return 0; | |
55 | ||
56 | fail: | |
57 | da = iova; | |
58 | ||
59 | for_each_sg(sgt->sgl, sg, i, j) { | |
60 | size_t bytes = sg_dma_len(sg) + sg->offset; | |
61 | ||
62 | iommu_unmap(domain, da, bytes); | |
63 | da += bytes; | |
64 | } | |
65 | return ret; | |
66 | } | |
67 | ||
68 | int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, | |
69 | struct sg_table *sgt, unsigned len) | |
70 | { | |
71 | struct iommu_domain *domain = iommu->domain; | |
72 | struct scatterlist *sg; | |
73 | unsigned int da = iova; | |
74 | int i; | |
75 | ||
76 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
77 | size_t bytes = sg_dma_len(sg) + sg->offset; | |
78 | size_t unmapped; | |
79 | ||
80 | unmapped = iommu_unmap(domain, da, bytes); | |
81 | if (unmapped < bytes) | |
82 | return unmapped; | |
83 | ||
84 | VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); | |
85 | ||
86 | BUG_ON(!PAGE_ALIGNED(bytes)); | |
87 | ||
88 | da += bytes; | |
89 | } | |
90 | ||
91 | return 0; | |
92 | } | |
93 | ||
94 | static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu, | |
95 | struct etnaviv_vram_mapping *mapping) | |
96 | { | |
97 | struct etnaviv_gem_object *etnaviv_obj = mapping->object; | |
98 | ||
99 | etnaviv_iommu_unmap(mmu, mapping->vram_node.start, | |
100 | etnaviv_obj->sgt, etnaviv_obj->base.size); | |
101 | drm_mm_remove_node(&mapping->vram_node); | |
102 | } | |
103 | ||
104 | int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, | |
105 | struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, | |
106 | struct etnaviv_vram_mapping *mapping) | |
107 | { | |
108 | struct etnaviv_vram_mapping *free = NULL; | |
109 | struct sg_table *sgt = etnaviv_obj->sgt; | |
110 | struct drm_mm_node *node; | |
111 | int ret; | |
112 | ||
113 | lockdep_assert_held(&etnaviv_obj->lock); | |
114 | ||
115 | mutex_lock(&mmu->lock); | |
116 | ||
117 | /* v1 MMU can optimize single entry (contiguous) scatterlists */ | |
118 | if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) { | |
119 | u32 iova; | |
120 | ||
121 | iova = sg_dma_address(sgt->sgl) - memory_base; | |
122 | if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) { | |
123 | mapping->iova = iova; | |
124 | list_add_tail(&mapping->mmu_node, &mmu->mappings); | |
125 | mutex_unlock(&mmu->lock); | |
126 | return 0; | |
127 | } | |
128 | } | |
129 | ||
130 | node = &mapping->vram_node; | |
131 | while (1) { | |
132 | struct etnaviv_vram_mapping *m, *n; | |
133 | struct list_head list; | |
134 | bool found; | |
135 | ||
136 | ret = drm_mm_insert_node_in_range(&mmu->mm, node, | |
137 | etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL, | |
138 | DRM_MM_SEARCH_DEFAULT); | |
139 | ||
140 | if (ret != -ENOSPC) | |
141 | break; | |
142 | ||
143 | /* | |
144 | * If we did not search from the start of the MMU region, | |
145 | * try again in case there are free slots. | |
146 | */ | |
147 | if (mmu->last_iova) { | |
148 | mmu->last_iova = 0; | |
149 | mmu->need_flush = true; | |
150 | continue; | |
151 | } | |
152 | ||
153 | /* Try to retire some entries */ | |
154 | drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0); | |
155 | ||
156 | found = 0; | |
157 | INIT_LIST_HEAD(&list); | |
158 | list_for_each_entry(free, &mmu->mappings, mmu_node) { | |
159 | /* If this vram node has not been used, skip this. */ | |
160 | if (!free->vram_node.mm) | |
161 | continue; | |
162 | ||
163 | /* | |
164 | * If the iova is pinned, then it's in-use, | |
165 | * so we must keep its mapping. | |
166 | */ | |
167 | if (free->use) | |
168 | continue; | |
169 | ||
170 | list_add(&free->scan_node, &list); | |
171 | if (drm_mm_scan_add_block(&free->vram_node)) { | |
172 | found = true; | |
173 | break; | |
174 | } | |
175 | } | |
176 | ||
177 | if (!found) { | |
178 | /* Nothing found, clean up and fail */ | |
179 | list_for_each_entry_safe(m, n, &list, scan_node) | |
180 | BUG_ON(drm_mm_scan_remove_block(&m->vram_node)); | |
181 | break; | |
182 | } | |
183 | ||
184 | /* | |
185 | * drm_mm does not allow any other operations while | |
186 | * scanning, so we have to remove all blocks first. | |
187 | * If drm_mm_scan_remove_block() returns false, we | |
188 | * can leave the block pinned. | |
189 | */ | |
190 | list_for_each_entry_safe(m, n, &list, scan_node) | |
191 | if (!drm_mm_scan_remove_block(&m->vram_node)) | |
192 | list_del_init(&m->scan_node); | |
193 | ||
194 | /* | |
195 | * Unmap the blocks which need to be reaped from the MMU. | |
196 | * Clear the mmu pointer to prevent the get_iova finding | |
197 | * this mapping. | |
198 | */ | |
199 | list_for_each_entry_safe(m, n, &list, scan_node) { | |
200 | etnaviv_iommu_remove_mapping(mmu, m); | |
201 | m->mmu = NULL; | |
202 | list_del_init(&m->mmu_node); | |
203 | list_del_init(&m->scan_node); | |
204 | } | |
205 | ||
206 | /* | |
207 | * We removed enough mappings so that the new allocation will | |
208 | * succeed. Ensure that the MMU will be flushed before the | |
209 | * associated commit requesting this mapping, and retry the | |
210 | * allocation one more time. | |
211 | */ | |
212 | mmu->need_flush = true; | |
213 | } | |
214 | ||
215 | if (ret < 0) { | |
216 | mutex_unlock(&mmu->lock); | |
217 | return ret; | |
218 | } | |
219 | ||
220 | mmu->last_iova = node->start + etnaviv_obj->base.size; | |
221 | mapping->iova = node->start; | |
222 | ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size, | |
223 | IOMMU_READ | IOMMU_WRITE); | |
224 | ||
225 | if (ret < 0) { | |
226 | drm_mm_remove_node(node); | |
227 | mutex_unlock(&mmu->lock); | |
228 | return ret; | |
229 | } | |
230 | ||
231 | list_add_tail(&mapping->mmu_node, &mmu->mappings); | |
232 | mutex_unlock(&mmu->lock); | |
233 | ||
234 | return ret; | |
235 | } | |
236 | ||
237 | void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, | |
238 | struct etnaviv_vram_mapping *mapping) | |
239 | { | |
240 | WARN_ON(mapping->use); | |
241 | ||
242 | mutex_lock(&mmu->lock); | |
243 | ||
244 | /* If the vram node is on the mm, unmap and remove the node */ | |
245 | if (mapping->vram_node.mm == &mmu->mm) | |
246 | etnaviv_iommu_remove_mapping(mmu, mapping); | |
247 | ||
248 | list_del(&mapping->mmu_node); | |
249 | mutex_unlock(&mmu->lock); | |
250 | } | |
251 | ||
252 | void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu) | |
253 | { | |
254 | drm_mm_takedown(&mmu->mm); | |
255 | iommu_domain_free(mmu->domain); | |
256 | kfree(mmu); | |
257 | } | |
258 | ||
259 | struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu, | |
260 | struct iommu_domain *domain, enum etnaviv_iommu_version version) | |
261 | { | |
262 | struct etnaviv_iommu *mmu; | |
263 | ||
264 | mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); | |
265 | if (!mmu) | |
266 | return ERR_PTR(-ENOMEM); | |
267 | ||
268 | mmu->domain = domain; | |
269 | mmu->gpu = gpu; | |
270 | mmu->version = version; | |
271 | mutex_init(&mmu->lock); | |
272 | INIT_LIST_HEAD(&mmu->mappings); | |
273 | ||
274 | drm_mm_init(&mmu->mm, domain->geometry.aperture_start, | |
275 | domain->geometry.aperture_end - | |
276 | domain->geometry.aperture_start + 1); | |
277 | ||
278 | iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev); | |
279 | ||
280 | return mmu; | |
281 | } | |
282 | ||
283 | size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu) | |
284 | { | |
285 | struct etnaviv_iommu_ops *ops; | |
286 | ||
287 | ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops); | |
288 | ||
289 | return ops->dump_size(iommu->domain); | |
290 | } | |
291 | ||
292 | void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf) | |
293 | { | |
294 | struct etnaviv_iommu_ops *ops; | |
295 | ||
296 | ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops); | |
297 | ||
298 | ops->dump(iommu->domain, buf); | |
299 | } |