Commit | Line | Data |
---|---|---|
c8afe684 RC |
1 | /* |
2 | * Copyright (C) 2013 Red Hat | |
3 | * Author: Rob Clark <robdclark@gmail.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/spinlock.h> | |
19 | #include <linux/shmem_fs.h> | |
05b84911 | 20 | #include <linux/dma-buf.h> |
01c8f1c4 | 21 | #include <linux/pfn_t.h> |
c8afe684 RC |
22 | |
23 | #include "msm_drv.h" | |
fde5de6c | 24 | #include "msm_fence.h" |
c8afe684 | 25 | #include "msm_gem.h" |
7198e6b0 | 26 | #include "msm_gpu.h" |
871d812a | 27 | #include "msm_mmu.h" |
c8afe684 | 28 | |
0e08270a SS |
29 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj); |
30 | ||
31 | ||
871d812a RC |
32 | static dma_addr_t physaddr(struct drm_gem_object *obj) |
33 | { | |
34 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
35 | struct msm_drm_private *priv = obj->dev->dev_private; | |
36 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + | |
37 | priv->vram.paddr; | |
38 | } | |
39 | ||
072f1f91 RC |
40 | static bool use_pages(struct drm_gem_object *obj) |
41 | { | |
42 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
43 | return !msm_obj->vram_node; | |
44 | } | |
45 | ||
871d812a | 46 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
0e08270a | 47 | static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) |
871d812a RC |
48 | { |
49 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
50 | struct msm_drm_private *priv = obj->dev->dev_private; | |
51 | dma_addr_t paddr; | |
52 | struct page **p; | |
53 | int ret, i; | |
54 | ||
2098105e | 55 | p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
871d812a RC |
56 | if (!p) |
57 | return ERR_PTR(-ENOMEM); | |
58 | ||
0e08270a | 59 | spin_lock(&priv->vram.lock); |
4e64e553 | 60 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); |
0e08270a | 61 | spin_unlock(&priv->vram.lock); |
871d812a | 62 | if (ret) { |
2098105e | 63 | kvfree(p); |
871d812a RC |
64 | return ERR_PTR(ret); |
65 | } | |
66 | ||
67 | paddr = physaddr(obj); | |
68 | for (i = 0; i < npages; i++) { | |
69 | p[i] = phys_to_page(paddr); | |
70 | paddr += PAGE_SIZE; | |
71 | } | |
72 | ||
73 | return p; | |
74 | } | |
c8afe684 | 75 | |
c8afe684 RC |
76 | static struct page **get_pages(struct drm_gem_object *obj) |
77 | { | |
78 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
79 | ||
80 | if (!msm_obj->pages) { | |
81 | struct drm_device *dev = obj->dev; | |
871d812a | 82 | struct page **p; |
c8afe684 RC |
83 | int npages = obj->size >> PAGE_SHIFT; |
84 | ||
072f1f91 | 85 | if (use_pages(obj)) |
0cdbe8ac | 86 | p = drm_gem_get_pages(obj); |
871d812a RC |
87 | else |
88 | p = get_pages_vram(obj, npages); | |
89 | ||
c8afe684 RC |
90 | if (IS_ERR(p)) { |
91 | dev_err(dev->dev, "could not get pages: %ld\n", | |
92 | PTR_ERR(p)); | |
93 | return p; | |
94 | } | |
95 | ||
96 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); | |
1f70e079 | 97 | if (IS_ERR(msm_obj->sgt)) { |
c8afe684 | 98 | dev_err(dev->dev, "failed to allocate sgt\n"); |
1f70e079 | 99 | return ERR_CAST(msm_obj->sgt); |
c8afe684 RC |
100 | } |
101 | ||
102 | msm_obj->pages = p; | |
103 | ||
104 | /* For non-cached buffers, ensure the new pages are clean | |
105 | * because display controller, GPU, etc. are not coherent: | |
106 | */ | |
107 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
108 | dma_map_sg(dev->dev, msm_obj->sgt->sgl, | |
109 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | |
110 | } | |
111 | ||
112 | return msm_obj->pages; | |
113 | } | |
114 | ||
0e08270a SS |
115 | static void put_pages_vram(struct drm_gem_object *obj) |
116 | { | |
117 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
118 | struct msm_drm_private *priv = obj->dev->dev_private; | |
119 | ||
120 | spin_lock(&priv->vram.lock); | |
121 | drm_mm_remove_node(msm_obj->vram_node); | |
122 | spin_unlock(&priv->vram.lock); | |
123 | ||
124 | kvfree(msm_obj->pages); | |
125 | } | |
126 | ||
c8afe684 RC |
127 | static void put_pages(struct drm_gem_object *obj) |
128 | { | |
129 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
130 | ||
131 | if (msm_obj->pages) { | |
132 | /* For non-cached buffers, ensure the new pages are clean | |
133 | * because display controller, GPU, etc. are not coherent: | |
134 | */ | |
135 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
136 | dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, | |
137 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | |
138 | sg_free_table(msm_obj->sgt); | |
139 | kfree(msm_obj->sgt); | |
140 | ||
072f1f91 | 141 | if (use_pages(obj)) |
871d812a | 142 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
0e08270a SS |
143 | else |
144 | put_pages_vram(obj); | |
871d812a | 145 | |
c8afe684 RC |
146 | msm_obj->pages = NULL; |
147 | } | |
148 | } | |
149 | ||
05b84911 RC |
150 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) |
151 | { | |
0e08270a | 152 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
05b84911 | 153 | struct page **p; |
0e08270a SS |
154 | |
155 | mutex_lock(&msm_obj->lock); | |
156 | ||
157 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { | |
158 | mutex_unlock(&msm_obj->lock); | |
159 | return ERR_PTR(-EBUSY); | |
160 | } | |
161 | ||
05b84911 | 162 | p = get_pages(obj); |
0e08270a | 163 | mutex_unlock(&msm_obj->lock); |
05b84911 RC |
164 | return p; |
165 | } | |
166 | ||
167 | void msm_gem_put_pages(struct drm_gem_object *obj) | |
168 | { | |
169 | /* when we start tracking the pin count, then do something here */ | |
170 | } | |
171 | ||
c8afe684 RC |
172 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
173 | struct vm_area_struct *vma) | |
174 | { | |
175 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
176 | ||
177 | vma->vm_flags &= ~VM_PFNMAP; | |
178 | vma->vm_flags |= VM_MIXEDMAP; | |
179 | ||
180 | if (msm_obj->flags & MSM_BO_WC) { | |
181 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
182 | } else if (msm_obj->flags & MSM_BO_UNCACHED) { | |
183 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); | |
184 | } else { | |
185 | /* | |
186 | * Shunt off cached objs to shmem file so they have their own | |
187 | * address_space (so unmap_mapping_range does what we want, | |
188 | * in particular in the case of mmap'd dmabufs) | |
189 | */ | |
190 | fput(vma->vm_file); | |
191 | get_file(obj->filp); | |
192 | vma->vm_pgoff = 0; | |
193 | vma->vm_file = obj->filp; | |
194 | ||
195 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | |
196 | } | |
197 | ||
198 | return 0; | |
199 | } | |
200 | ||
201 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
202 | { | |
203 | int ret; | |
204 | ||
205 | ret = drm_gem_mmap(filp, vma); | |
206 | if (ret) { | |
207 | DBG("mmap failed: %d", ret); | |
208 | return ret; | |
209 | } | |
210 | ||
211 | return msm_gem_mmap_obj(vma->vm_private_data, vma); | |
212 | } | |
213 | ||
11bac800 | 214 | int msm_gem_fault(struct vm_fault *vmf) |
c8afe684 | 215 | { |
11bac800 | 216 | struct vm_area_struct *vma = vmf->vma; |
c8afe684 | 217 | struct drm_gem_object *obj = vma->vm_private_data; |
0e08270a | 218 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
c8afe684 RC |
219 | struct page **pages; |
220 | unsigned long pfn; | |
221 | pgoff_t pgoff; | |
222 | int ret; | |
223 | ||
0e08270a SS |
224 | /* |
225 | * vm_ops.open/drm_gem_mmap_obj and close get and put | |
226 | * a reference on obj. So, we dont need to hold one here. | |
c8afe684 | 227 | */ |
0e08270a | 228 | ret = mutex_lock_interruptible(&msm_obj->lock); |
c8afe684 RC |
229 | if (ret) |
230 | goto out; | |
231 | ||
0e08270a SS |
232 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { |
233 | mutex_unlock(&msm_obj->lock); | |
234 | return VM_FAULT_SIGBUS; | |
235 | } | |
236 | ||
c8afe684 RC |
237 | /* make sure we have pages attached now */ |
238 | pages = get_pages(obj); | |
239 | if (IS_ERR(pages)) { | |
240 | ret = PTR_ERR(pages); | |
241 | goto out_unlock; | |
242 | } | |
243 | ||
244 | /* We don't use vmf->pgoff since that has the fake offset: */ | |
1a29d85e | 245 | pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
c8afe684 | 246 | |
871d812a | 247 | pfn = page_to_pfn(pages[pgoff]); |
c8afe684 | 248 | |
1a29d85e | 249 | VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, |
c8afe684 RC |
250 | pfn, pfn << PAGE_SHIFT); |
251 | ||
1a29d85e | 252 | ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); |
c8afe684 RC |
253 | |
254 | out_unlock: | |
0e08270a | 255 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
256 | out: |
257 | switch (ret) { | |
258 | case -EAGAIN: | |
c8afe684 RC |
259 | case 0: |
260 | case -ERESTARTSYS: | |
261 | case -EINTR: | |
505886d5 RC |
262 | case -EBUSY: |
263 | /* | |
264 | * EBUSY is ok: this just means that another thread | |
265 | * already did the job. | |
266 | */ | |
c8afe684 RC |
267 | return VM_FAULT_NOPAGE; |
268 | case -ENOMEM: | |
269 | return VM_FAULT_OOM; | |
270 | default: | |
271 | return VM_FAULT_SIGBUS; | |
272 | } | |
273 | } | |
274 | ||
275 | /** get mmap offset */ | |
276 | static uint64_t mmap_offset(struct drm_gem_object *obj) | |
277 | { | |
278 | struct drm_device *dev = obj->dev; | |
0e08270a | 279 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
c8afe684 RC |
280 | int ret; |
281 | ||
0e08270a | 282 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
c8afe684 RC |
283 | |
284 | /* Make it mmapable */ | |
285 | ret = drm_gem_create_mmap_offset(obj); | |
286 | ||
287 | if (ret) { | |
288 | dev_err(dev->dev, "could not allocate mmap offset\n"); | |
289 | return 0; | |
290 | } | |
291 | ||
292 | return drm_vma_node_offset_addr(&obj->vma_node); | |
293 | } | |
294 | ||
295 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) | |
296 | { | |
297 | uint64_t offset; | |
0e08270a SS |
298 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
299 | ||
300 | mutex_lock(&msm_obj->lock); | |
c8afe684 | 301 | offset = mmap_offset(obj); |
0e08270a | 302 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
303 | return offset; |
304 | } | |
305 | ||
4b85f7f5 RC |
306 | static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, |
307 | struct msm_gem_address_space *aspace) | |
308 | { | |
309 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
310 | struct msm_gem_vma *vma; | |
311 | ||
0e08270a SS |
312 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
313 | ||
4b85f7f5 RC |
314 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
315 | if (!vma) | |
316 | return ERR_PTR(-ENOMEM); | |
317 | ||
318 | vma->aspace = aspace; | |
319 | ||
320 | list_add_tail(&vma->list, &msm_obj->vmas); | |
321 | ||
322 | return vma; | |
323 | } | |
324 | ||
325 | static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, | |
326 | struct msm_gem_address_space *aspace) | |
327 | { | |
328 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
329 | struct msm_gem_vma *vma; | |
330 | ||
0e08270a | 331 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
4b85f7f5 RC |
332 | |
333 | list_for_each_entry(vma, &msm_obj->vmas, list) { | |
334 | if (vma->aspace == aspace) | |
335 | return vma; | |
336 | } | |
337 | ||
338 | return NULL; | |
339 | } | |
340 | ||
341 | static void del_vma(struct msm_gem_vma *vma) | |
342 | { | |
343 | if (!vma) | |
344 | return; | |
345 | ||
346 | list_del(&vma->list); | |
347 | kfree(vma); | |
348 | } | |
349 | ||
0e08270a | 350 | /* Called with msm_obj->lock locked */ |
4fe5f65e RC |
351 | static void |
352 | put_iova(struct drm_gem_object *obj) | |
353 | { | |
4fe5f65e | 354 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
4b85f7f5 | 355 | struct msm_gem_vma *vma, *tmp; |
4fe5f65e | 356 | |
0e08270a | 357 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
4fe5f65e | 358 | |
4b85f7f5 RC |
359 | list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { |
360 | msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt); | |
361 | del_vma(vma); | |
4fe5f65e RC |
362 | } |
363 | } | |
364 | ||
0e08270a SS |
365 | /* get iova, taking a reference. Should have a matching put */ |
366 | int msm_gem_get_iova(struct drm_gem_object *obj, | |
8bdcd949 | 367 | struct msm_gem_address_space *aspace, uint64_t *iova) |
c8afe684 RC |
368 | { |
369 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
4b85f7f5 | 370 | struct msm_gem_vma *vma; |
c8afe684 RC |
371 | int ret = 0; |
372 | ||
0e08270a SS |
373 | mutex_lock(&msm_obj->lock); |
374 | ||
375 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { | |
376 | mutex_unlock(&msm_obj->lock); | |
377 | return -EBUSY; | |
378 | } | |
cb1e3818 | 379 | |
4b85f7f5 | 380 | vma = lookup_vma(obj, aspace); |
871d812a | 381 | |
4b85f7f5 RC |
382 | if (!vma) { |
383 | struct page **pages; | |
384 | ||
385 | vma = add_vma(obj, aspace); | |
71e3dfa1 DC |
386 | if (IS_ERR(vma)) { |
387 | ret = PTR_ERR(vma); | |
388 | goto unlock; | |
389 | } | |
4b85f7f5 RC |
390 | |
391 | pages = get_pages(obj); | |
392 | if (IS_ERR(pages)) { | |
393 | ret = PTR_ERR(pages); | |
394 | goto fail; | |
395 | } | |
871d812a | 396 | |
4b85f7f5 RC |
397 | ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt, |
398 | obj->size >> PAGE_SHIFT); | |
399 | if (ret) | |
400 | goto fail; | |
c8afe684 RC |
401 | } |
402 | ||
4b85f7f5 | 403 | *iova = vma->iova; |
0e08270a SS |
404 | |
405 | mutex_unlock(&msm_obj->lock); | |
4b85f7f5 RC |
406 | return 0; |
407 | ||
408 | fail: | |
409 | del_vma(vma); | |
71e3dfa1 | 410 | unlock: |
0e08270a | 411 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
412 | return ret; |
413 | } | |
414 | ||
2638d90a RC |
415 | /* get iova without taking a reference, used in places where you have |
416 | * already done a 'msm_gem_get_iova()'. | |
417 | */ | |
8bdcd949 RC |
418 | uint64_t msm_gem_iova(struct drm_gem_object *obj, |
419 | struct msm_gem_address_space *aspace) | |
2638d90a | 420 | { |
0e08270a | 421 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
4b85f7f5 RC |
422 | struct msm_gem_vma *vma; |
423 | ||
0e08270a | 424 | mutex_lock(&msm_obj->lock); |
4b85f7f5 | 425 | vma = lookup_vma(obj, aspace); |
0e08270a | 426 | mutex_unlock(&msm_obj->lock); |
4b85f7f5 RC |
427 | WARN_ON(!vma); |
428 | ||
429 | return vma ? vma->iova : 0; | |
2638d90a RC |
430 | } |
431 | ||
8bdcd949 RC |
432 | void msm_gem_put_iova(struct drm_gem_object *obj, |
433 | struct msm_gem_address_space *aspace) | |
c8afe684 RC |
434 | { |
435 | // XXX TODO .. | |
436 | // NOTE: probably don't need a _locked() version.. we wouldn't | |
437 | // normally unmap here, but instead just mark that it could be | |
438 | // unmapped (if the iova refcnt drops to zero), but then later | |
439 | // if another _get_iova_locked() fails we can start unmapping | |
440 | // things that are no longer needed.. | |
441 | } | |
442 | ||
443 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |
444 | struct drm_mode_create_dumb *args) | |
445 | { | |
446 | args->pitch = align_pitch(args->width, args->bpp); | |
447 | args->size = PAGE_ALIGN(args->pitch * args->height); | |
448 | return msm_gem_new_handle(dev, file, args->size, | |
449 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); | |
450 | } | |
451 | ||
c8afe684 RC |
452 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
453 | uint32_t handle, uint64_t *offset) | |
454 | { | |
455 | struct drm_gem_object *obj; | |
456 | int ret = 0; | |
457 | ||
458 | /* GEM does all our handle to object mapping */ | |
a8ad0bd8 | 459 | obj = drm_gem_object_lookup(file, handle); |
c8afe684 RC |
460 | if (obj == NULL) { |
461 | ret = -ENOENT; | |
462 | goto fail; | |
463 | } | |
464 | ||
465 | *offset = msm_gem_mmap_offset(obj); | |
466 | ||
467 | drm_gem_object_unreference_unlocked(obj); | |
468 | ||
469 | fail: | |
470 | return ret; | |
471 | } | |
472 | ||
0e08270a | 473 | void *msm_gem_get_vaddr(struct drm_gem_object *obj) |
c8afe684 RC |
474 | { |
475 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
0e08270a SS |
476 | int ret = 0; |
477 | ||
478 | mutex_lock(&msm_obj->lock); | |
479 | ||
480 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { | |
481 | mutex_unlock(&msm_obj->lock); | |
482 | return ERR_PTR(-EBUSY); | |
483 | } | |
484 | ||
485 | /* increment vmap_count *before* vmap() call, so shrinker can | |
486 | * check vmap_count (is_vunmapable()) outside of msm_obj->lock. | |
487 | * This guarantees that we won't try to msm_gem_vunmap() this | |
488 | * same object from within the vmap() call (while we already | |
489 | * hold msm_obj->lock) | |
490 | */ | |
491 | msm_obj->vmap_count++; | |
492 | ||
c8afe684 RC |
493 | if (!msm_obj->vaddr) { |
494 | struct page **pages = get_pages(obj); | |
0e08270a SS |
495 | if (IS_ERR(pages)) { |
496 | ret = PTR_ERR(pages); | |
497 | goto fail; | |
498 | } | |
c8afe684 RC |
499 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
500 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | |
0e08270a SS |
501 | if (msm_obj->vaddr == NULL) { |
502 | ret = -ENOMEM; | |
503 | goto fail; | |
504 | } | |
c8afe684 | 505 | } |
0e08270a SS |
506 | |
507 | mutex_unlock(&msm_obj->lock); | |
c8afe684 | 508 | return msm_obj->vaddr; |
c8afe684 | 509 | |
0e08270a SS |
510 | fail: |
511 | msm_obj->vmap_count--; | |
512 | mutex_unlock(&msm_obj->lock); | |
513 | return ERR_PTR(ret); | |
c8afe684 RC |
514 | } |
515 | ||
0e08270a | 516 | void msm_gem_put_vaddr(struct drm_gem_object *obj) |
18f23049 | 517 | { |
e1e9db2c | 518 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
0e08270a SS |
519 | |
520 | mutex_lock(&msm_obj->lock); | |
e1e9db2c RC |
521 | WARN_ON(msm_obj->vmap_count < 1); |
522 | msm_obj->vmap_count--; | |
0e08270a | 523 | mutex_unlock(&msm_obj->lock); |
18f23049 RC |
524 | } |
525 | ||
4cd33c48 RC |
526 | /* Update madvise status, returns true if not purged, else |
527 | * false or -errno. | |
528 | */ | |
529 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) | |
530 | { | |
531 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
532 | ||
0e08270a SS |
533 | mutex_lock(&msm_obj->lock); |
534 | ||
4cd33c48 RC |
535 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
536 | ||
537 | if (msm_obj->madv != __MSM_MADV_PURGED) | |
538 | msm_obj->madv = madv; | |
539 | ||
0e08270a SS |
540 | madv = msm_obj->madv; |
541 | ||
542 | mutex_unlock(&msm_obj->lock); | |
543 | ||
544 | return (madv != __MSM_MADV_PURGED); | |
4cd33c48 RC |
545 | } |
546 | ||
0e08270a | 547 | void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) |
68209390 RC |
548 | { |
549 | struct drm_device *dev = obj->dev; | |
550 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
551 | ||
552 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
553 | WARN_ON(!is_purgeable(msm_obj)); | |
554 | WARN_ON(obj->import_attach); | |
555 | ||
0e08270a SS |
556 | mutex_lock_nested(&msm_obj->lock, subclass); |
557 | ||
68209390 RC |
558 | put_iova(obj); |
559 | ||
0e08270a | 560 | msm_gem_vunmap_locked(obj); |
68209390 RC |
561 | |
562 | put_pages(obj); | |
563 | ||
564 | msm_obj->madv = __MSM_MADV_PURGED; | |
565 | ||
566 | drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); | |
567 | drm_gem_free_mmap_offset(obj); | |
568 | ||
569 | /* Our goal here is to return as much of the memory as | |
570 | * is possible back to the system as we are called from OOM. | |
571 | * To do this we must instruct the shmfs to drop all of its | |
572 | * backing pages, *now*. | |
573 | */ | |
574 | shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); | |
575 | ||
576 | invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, | |
577 | 0, (loff_t)-1); | |
0e08270a SS |
578 | |
579 | mutex_unlock(&msm_obj->lock); | |
68209390 RC |
580 | } |
581 | ||
0e08270a | 582 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj) |
e1e9db2c RC |
583 | { |
584 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
585 | ||
0e08270a SS |
586 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
587 | ||
e1e9db2c RC |
588 | if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) |
589 | return; | |
590 | ||
591 | vunmap(msm_obj->vaddr); | |
592 | msm_obj->vaddr = NULL; | |
593 | } | |
594 | ||
0e08270a SS |
595 | void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) |
596 | { | |
597 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
598 | ||
599 | mutex_lock_nested(&msm_obj->lock, subclass); | |
600 | msm_gem_vunmap_locked(obj); | |
601 | mutex_unlock(&msm_obj->lock); | |
602 | } | |
603 | ||
b6295f9a RC |
604 | /* must be called before _move_to_active().. */ |
605 | int msm_gem_sync_object(struct drm_gem_object *obj, | |
606 | struct msm_fence_context *fctx, bool exclusive) | |
607 | { | |
608 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
609 | struct reservation_object_list *fobj; | |
f54d1867 | 610 | struct dma_fence *fence; |
b6295f9a RC |
611 | int i, ret; |
612 | ||
b6295f9a RC |
613 | fobj = reservation_object_get_list(msm_obj->resv); |
614 | if (!fobj || (fobj->shared_count == 0)) { | |
615 | fence = reservation_object_get_excl(msm_obj->resv); | |
616 | /* don't need to wait on our own fences, since ring is fifo */ | |
617 | if (fence && (fence->context != fctx->context)) { | |
f54d1867 | 618 | ret = dma_fence_wait(fence, true); |
b6295f9a RC |
619 | if (ret) |
620 | return ret; | |
621 | } | |
622 | } | |
623 | ||
624 | if (!exclusive || !fobj) | |
625 | return 0; | |
626 | ||
627 | for (i = 0; i < fobj->shared_count; i++) { | |
628 | fence = rcu_dereference_protected(fobj->shared[i], | |
629 | reservation_object_held(msm_obj->resv)); | |
630 | if (fence->context != fctx->context) { | |
f54d1867 | 631 | ret = dma_fence_wait(fence, true); |
b6295f9a RC |
632 | if (ret) |
633 | return ret; | |
634 | } | |
635 | } | |
636 | ||
637 | return 0; | |
638 | } | |
639 | ||
7198e6b0 | 640 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
f54d1867 | 641 | struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) |
7198e6b0 RC |
642 | { |
643 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
4cd33c48 | 644 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); |
7198e6b0 | 645 | msm_obj->gpu = gpu; |
b6295f9a RC |
646 | if (exclusive) |
647 | reservation_object_add_excl_fence(msm_obj->resv, fence); | |
bf6811f3 | 648 | else |
b6295f9a | 649 | reservation_object_add_shared_fence(msm_obj->resv, fence); |
7198e6b0 RC |
650 | list_del_init(&msm_obj->mm_list); |
651 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); | |
652 | } | |
653 | ||
654 | void msm_gem_move_to_inactive(struct drm_gem_object *obj) | |
655 | { | |
656 | struct drm_device *dev = obj->dev; | |
657 | struct msm_drm_private *priv = dev->dev_private; | |
658 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
659 | ||
660 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
661 | ||
662 | msm_obj->gpu = NULL; | |
7198e6b0 RC |
663 | list_del_init(&msm_obj->mm_list); |
664 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
7198e6b0 RC |
665 | } |
666 | ||
b6295f9a | 667 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
7198e6b0 | 668 | { |
7198e6b0 | 669 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
b6295f9a | 670 | bool write = !!(op & MSM_PREP_WRITE); |
f755e227 CW |
671 | unsigned long remain = |
672 | op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); | |
673 | long ret; | |
674 | ||
675 | ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, | |
676 | true, remain); | |
677 | if (ret == 0) | |
678 | return remain == 0 ? -EBUSY : -ETIMEDOUT; | |
679 | else if (ret < 0) | |
680 | return ret; | |
7198e6b0 RC |
681 | |
682 | /* TODO cache maintenance */ | |
c8afe684 | 683 | |
b6295f9a | 684 | return 0; |
7198e6b0 | 685 | } |
c8afe684 | 686 | |
7198e6b0 RC |
687 | int msm_gem_cpu_fini(struct drm_gem_object *obj) |
688 | { | |
689 | /* TODO cache maintenance */ | |
c8afe684 RC |
690 | return 0; |
691 | } | |
692 | ||
693 | #ifdef CONFIG_DEBUG_FS | |
f54d1867 | 694 | static void describe_fence(struct dma_fence *fence, const char *type, |
b6295f9a RC |
695 | struct seq_file *m) |
696 | { | |
f54d1867 | 697 | if (!dma_fence_is_signaled(fence)) |
b6295f9a RC |
698 | seq_printf(m, "\t%9s: %s %s seq %u\n", type, |
699 | fence->ops->get_driver_name(fence), | |
700 | fence->ops->get_timeline_name(fence), | |
701 | fence->seqno); | |
702 | } | |
703 | ||
c8afe684 RC |
704 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
705 | { | |
c8afe684 | 706 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
b6295f9a RC |
707 | struct reservation_object *robj = msm_obj->resv; |
708 | struct reservation_object_list *fobj; | |
f54d1867 | 709 | struct dma_fence *fence; |
4b85f7f5 | 710 | struct msm_gem_vma *vma; |
c8afe684 | 711 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
4cd33c48 | 712 | const char *madv; |
c8afe684 | 713 | |
0e08270a | 714 | mutex_lock(&msm_obj->lock); |
b6295f9a | 715 | |
4cd33c48 RC |
716 | switch (msm_obj->madv) { |
717 | case __MSM_MADV_PURGED: | |
718 | madv = " purged"; | |
719 | break; | |
720 | case MSM_MADV_DONTNEED: | |
721 | madv = " purgeable"; | |
722 | break; | |
723 | case MSM_MADV_WILLNEED: | |
724 | default: | |
725 | madv = ""; | |
726 | break; | |
727 | } | |
728 | ||
667ce33e | 729 | seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", |
7198e6b0 | 730 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
2c935bc5 | 731 | obj->name, kref_read(&obj->refcount), |
667ce33e RC |
732 | off, msm_obj->vaddr); |
733 | ||
4b85f7f5 RC |
734 | /* FIXME: we need to print the address space here too */ |
735 | list_for_each_entry(vma, &msm_obj->vmas, list) | |
736 | seq_printf(m, " %08llx", vma->iova); | |
667ce33e RC |
737 | |
738 | seq_printf(m, " %zu%s\n", obj->size, madv); | |
b6295f9a RC |
739 | |
740 | rcu_read_lock(); | |
741 | fobj = rcu_dereference(robj->fence); | |
742 | if (fobj) { | |
743 | unsigned int i, shared_count = fobj->shared_count; | |
744 | ||
745 | for (i = 0; i < shared_count; i++) { | |
746 | fence = rcu_dereference(fobj->shared[i]); | |
747 | describe_fence(fence, "Shared", m); | |
748 | } | |
749 | } | |
750 | ||
751 | fence = rcu_dereference(robj->fence_excl); | |
752 | if (fence) | |
753 | describe_fence(fence, "Exclusive", m); | |
754 | rcu_read_unlock(); | |
0e08270a SS |
755 | |
756 | mutex_unlock(&msm_obj->lock); | |
c8afe684 RC |
757 | } |
758 | ||
759 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) | |
760 | { | |
761 | struct msm_gem_object *msm_obj; | |
762 | int count = 0; | |
763 | size_t size = 0; | |
764 | ||
765 | list_for_each_entry(msm_obj, list, mm_list) { | |
766 | struct drm_gem_object *obj = &msm_obj->base; | |
767 | seq_printf(m, " "); | |
768 | msm_gem_describe(obj, m); | |
769 | count++; | |
770 | size += obj->size; | |
771 | } | |
772 | ||
773 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | |
774 | } | |
775 | #endif | |
776 | ||
777 | void msm_gem_free_object(struct drm_gem_object *obj) | |
778 | { | |
779 | struct drm_device *dev = obj->dev; | |
780 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
c8afe684 RC |
781 | |
782 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
783 | ||
7198e6b0 RC |
784 | /* object should not be on active list: */ |
785 | WARN_ON(is_active(msm_obj)); | |
786 | ||
c8afe684 RC |
787 | list_del(&msm_obj->mm_list); |
788 | ||
0e08270a SS |
789 | mutex_lock(&msm_obj->lock); |
790 | ||
4fe5f65e | 791 | put_iova(obj); |
c8afe684 | 792 | |
05b84911 RC |
793 | if (obj->import_attach) { |
794 | if (msm_obj->vaddr) | |
795 | dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); | |
796 | ||
797 | /* Don't drop the pages for imported dmabuf, as they are not | |
798 | * ours, just free the array we allocated: | |
799 | */ | |
800 | if (msm_obj->pages) | |
2098105e | 801 | kvfree(msm_obj->pages); |
c8afe684 | 802 | |
f28730c8 | 803 | drm_prime_gem_destroy(obj, msm_obj->sgt); |
05b84911 | 804 | } else { |
0e08270a | 805 | msm_gem_vunmap_locked(obj); |
05b84911 RC |
806 | put_pages(obj); |
807 | } | |
c8afe684 | 808 | |
7198e6b0 RC |
809 | if (msm_obj->resv == &msm_obj->_resv) |
810 | reservation_object_fini(msm_obj->resv); | |
811 | ||
c8afe684 RC |
812 | drm_gem_object_release(obj); |
813 | ||
0e08270a | 814 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
815 | kfree(msm_obj); |
816 | } | |
817 | ||
818 | /* convenience method to construct a GEM buffer object, and userspace handle */ | |
819 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |
820 | uint32_t size, uint32_t flags, uint32_t *handle) | |
821 | { | |
822 | struct drm_gem_object *obj; | |
823 | int ret; | |
824 | ||
c8afe684 RC |
825 | obj = msm_gem_new(dev, size, flags); |
826 | ||
c8afe684 RC |
827 | if (IS_ERR(obj)) |
828 | return PTR_ERR(obj); | |
829 | ||
830 | ret = drm_gem_handle_create(file, obj, handle); | |
831 | ||
832 | /* drop reference from allocate - handle holds it now */ | |
833 | drm_gem_object_unreference_unlocked(obj); | |
834 | ||
835 | return ret; | |
836 | } | |
837 | ||
05b84911 RC |
838 | static int msm_gem_new_impl(struct drm_device *dev, |
839 | uint32_t size, uint32_t flags, | |
79f0e202 | 840 | struct reservation_object *resv, |
0e08270a SS |
841 | struct drm_gem_object **obj, |
842 | bool struct_mutex_locked) | |
c8afe684 RC |
843 | { |
844 | struct msm_drm_private *priv = dev->dev_private; | |
845 | struct msm_gem_object *msm_obj; | |
c8afe684 RC |
846 | |
847 | switch (flags & MSM_BO_CACHE_MASK) { | |
848 | case MSM_BO_UNCACHED: | |
849 | case MSM_BO_CACHED: | |
850 | case MSM_BO_WC: | |
851 | break; | |
852 | default: | |
853 | dev_err(dev->dev, "invalid cache flag: %x\n", | |
854 | (flags & MSM_BO_CACHE_MASK)); | |
05b84911 | 855 | return -EINVAL; |
c8afe684 RC |
856 | } |
857 | ||
667ce33e | 858 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); |
05b84911 RC |
859 | if (!msm_obj) |
860 | return -ENOMEM; | |
c8afe684 | 861 | |
0e08270a SS |
862 | mutex_init(&msm_obj->lock); |
863 | ||
c8afe684 | 864 | msm_obj->flags = flags; |
4cd33c48 | 865 | msm_obj->madv = MSM_MADV_WILLNEED; |
c8afe684 | 866 | |
79f0e202 RC |
867 | if (resv) { |
868 | msm_obj->resv = resv; | |
869 | } else { | |
870 | msm_obj->resv = &msm_obj->_resv; | |
871 | reservation_object_init(msm_obj->resv); | |
872 | } | |
c8afe684 | 873 | |
7198e6b0 | 874 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
4b85f7f5 RC |
875 | INIT_LIST_HEAD(&msm_obj->vmas); |
876 | ||
0e08270a SS |
877 | if (struct_mutex_locked) { |
878 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
879 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
880 | } else { | |
881 | mutex_lock(&dev->struct_mutex); | |
882 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
883 | mutex_unlock(&dev->struct_mutex); | |
884 | } | |
c8afe684 | 885 | |
05b84911 RC |
886 | *obj = &msm_obj->base; |
887 | ||
888 | return 0; | |
889 | } | |
890 | ||
0e08270a SS |
891 | static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, |
892 | uint32_t size, uint32_t flags, bool struct_mutex_locked) | |
05b84911 | 893 | { |
f4839bd5 | 894 | struct msm_drm_private *priv = dev->dev_private; |
871d812a | 895 | struct drm_gem_object *obj = NULL; |
f4839bd5 | 896 | bool use_vram = false; |
05b84911 RC |
897 | int ret; |
898 | ||
05b84911 RC |
899 | size = PAGE_ALIGN(size); |
900 | ||
f4839bd5 RC |
901 | if (!iommu_present(&platform_bus_type)) |
902 | use_vram = true; | |
903 | else if ((flags & MSM_BO_STOLEN) && priv->vram.size) | |
904 | use_vram = true; | |
905 | ||
906 | if (WARN_ON(use_vram && !priv->vram.size)) | |
907 | return ERR_PTR(-EINVAL); | |
908 | ||
1a5dff5d JC |
909 | /* Disallow zero sized objects as they make the underlying |
910 | * infrastructure grumpy | |
911 | */ | |
912 | if (size == 0) | |
913 | return ERR_PTR(-EINVAL); | |
914 | ||
0e08270a | 915 | ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked); |
05b84911 RC |
916 | if (ret) |
917 | goto fail; | |
918 | ||
f4839bd5 | 919 | if (use_vram) { |
4b85f7f5 | 920 | struct msm_gem_vma *vma; |
f4839bd5 | 921 | struct page **pages; |
b3949a9a HV |
922 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
923 | ||
924 | mutex_lock(&msm_obj->lock); | |
f4839bd5 | 925 | |
4b85f7f5 | 926 | vma = add_vma(obj, NULL); |
b3949a9a | 927 | mutex_unlock(&msm_obj->lock); |
4b85f7f5 RC |
928 | if (IS_ERR(vma)) { |
929 | ret = PTR_ERR(vma); | |
930 | goto fail; | |
931 | } | |
932 | ||
933 | to_msm_bo(obj)->vram_node = &vma->node; | |
934 | ||
f4839bd5 RC |
935 | drm_gem_private_object_init(dev, obj, size); |
936 | ||
f4839bd5 RC |
937 | pages = get_pages(obj); |
938 | if (IS_ERR(pages)) { | |
939 | ret = PTR_ERR(pages); | |
940 | goto fail; | |
941 | } | |
4b85f7f5 RC |
942 | |
943 | vma->iova = physaddr(obj); | |
f4839bd5 | 944 | } else { |
871d812a RC |
945 | ret = drm_gem_object_init(dev, obj, size); |
946 | if (ret) | |
947 | goto fail; | |
871d812a | 948 | } |
05b84911 RC |
949 | |
950 | return obj; | |
951 | ||
952 | fail: | |
0e08270a | 953 | drm_gem_object_unreference_unlocked(obj); |
05b84911 RC |
954 | return ERR_PTR(ret); |
955 | } | |
956 | ||
0e08270a SS |
957 | struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, |
958 | uint32_t size, uint32_t flags) | |
959 | { | |
960 | return _msm_gem_new(dev, size, flags, true); | |
961 | } | |
962 | ||
963 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |
964 | uint32_t size, uint32_t flags) | |
965 | { | |
966 | return _msm_gem_new(dev, size, flags, false); | |
967 | } | |
968 | ||
05b84911 | 969 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
79f0e202 | 970 | struct dma_buf *dmabuf, struct sg_table *sgt) |
05b84911 RC |
971 | { |
972 | struct msm_gem_object *msm_obj; | |
973 | struct drm_gem_object *obj; | |
79f0e202 | 974 | uint32_t size; |
05b84911 RC |
975 | int ret, npages; |
976 | ||
871d812a RC |
977 | /* if we don't have IOMMU, don't bother pretending we can import: */ |
978 | if (!iommu_present(&platform_bus_type)) { | |
979 | dev_err(dev->dev, "cannot import without IOMMU\n"); | |
980 | return ERR_PTR(-EINVAL); | |
981 | } | |
982 | ||
79f0e202 | 983 | size = PAGE_ALIGN(dmabuf->size); |
05b84911 | 984 | |
0e08270a | 985 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false); |
05b84911 RC |
986 | if (ret) |
987 | goto fail; | |
988 | ||
989 | drm_gem_private_object_init(dev, obj, size); | |
990 | ||
991 | npages = size / PAGE_SIZE; | |
992 | ||
993 | msm_obj = to_msm_bo(obj); | |
0e08270a | 994 | mutex_lock(&msm_obj->lock); |
05b84911 | 995 | msm_obj->sgt = sgt; |
2098105e | 996 | msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
05b84911 | 997 | if (!msm_obj->pages) { |
0e08270a | 998 | mutex_unlock(&msm_obj->lock); |
05b84911 RC |
999 | ret = -ENOMEM; |
1000 | goto fail; | |
1001 | } | |
1002 | ||
1003 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); | |
0e08270a SS |
1004 | if (ret) { |
1005 | mutex_unlock(&msm_obj->lock); | |
05b84911 | 1006 | goto fail; |
0e08270a | 1007 | } |
05b84911 | 1008 | |
0e08270a | 1009 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
1010 | return obj; |
1011 | ||
1012 | fail: | |
e73a8569 | 1013 | drm_gem_object_unreference_unlocked(obj); |
c8afe684 RC |
1014 | return ERR_PTR(ret); |
1015 | } | |
8223286d JC |
1016 | |
1017 | static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, | |
1018 | uint32_t flags, struct msm_gem_address_space *aspace, | |
1019 | struct drm_gem_object **bo, uint64_t *iova, bool locked) | |
1020 | { | |
1021 | void *vaddr; | |
1022 | struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); | |
1023 | int ret; | |
1024 | ||
1025 | if (IS_ERR(obj)) | |
1026 | return ERR_CAST(obj); | |
1027 | ||
1028 | if (iova) { | |
1029 | ret = msm_gem_get_iova(obj, aspace, iova); | |
1030 | if (ret) { | |
1031 | drm_gem_object_unreference(obj); | |
1032 | return ERR_PTR(ret); | |
1033 | } | |
1034 | } | |
1035 | ||
1036 | vaddr = msm_gem_get_vaddr(obj); | |
c9811d0f | 1037 | if (IS_ERR(vaddr)) { |
8223286d JC |
1038 | msm_gem_put_iova(obj, aspace); |
1039 | drm_gem_object_unreference(obj); | |
c9811d0f | 1040 | return ERR_CAST(vaddr); |
8223286d JC |
1041 | } |
1042 | ||
1043 | if (bo) | |
1044 | *bo = obj; | |
1045 | ||
1046 | return vaddr; | |
1047 | } | |
1048 | ||
1049 | void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, | |
1050 | uint32_t flags, struct msm_gem_address_space *aspace, | |
1051 | struct drm_gem_object **bo, uint64_t *iova) | |
1052 | { | |
1053 | return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); | |
1054 | } | |
1055 | ||
1056 | void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, | |
1057 | uint32_t flags, struct msm_gem_address_space *aspace, | |
1058 | struct drm_gem_object **bo, uint64_t *iova) | |
1059 | { | |
1060 | return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); | |
1061 | } |