Commit | Line | Data |
---|---|---|
2194a63a NT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright 2018 Noralf Trønnes | |
4 | */ | |
5 | ||
6 | #include <linux/dma-buf.h> | |
7 | #include <linux/export.h> | |
8 | #include <linux/mutex.h> | |
9 | #include <linux/shmem_fs.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/vmalloc.h> | |
12 | ||
d3ea256a | 13 | #include <drm/drm.h> |
2194a63a NT |
14 | #include <drm/drm_device.h> |
15 | #include <drm/drm_drv.h> | |
16 | #include <drm/drm_gem_shmem_helper.h> | |
17 | #include <drm/drm_prime.h> | |
18 | #include <drm/drm_print.h> | |
19 | ||
20 | /** | |
21 | * DOC: overview | |
22 | * | |
23 | * This library provides helpers for GEM objects backed by shmem buffers | |
24 | * allocated using anonymous pageable memory. | |
25 | */ | |
26 | ||
27 | static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { | |
28 | .free = drm_gem_shmem_free_object, | |
29 | .print_info = drm_gem_shmem_print_info, | |
30 | .pin = drm_gem_shmem_pin, | |
31 | .unpin = drm_gem_shmem_unpin, | |
32 | .get_sg_table = drm_gem_shmem_get_sg_table, | |
33 | .vmap = drm_gem_shmem_vmap, | |
34 | .vunmap = drm_gem_shmem_vunmap, | |
0be89589 | 35 | .mmap = drm_gem_shmem_mmap, |
2194a63a NT |
36 | }; |
37 | ||
38 | /** | |
39 | * drm_gem_shmem_create - Allocate an object with the given size | |
40 | * @dev: DRM device | |
41 | * @size: Size of the object to allocate | |
42 | * | |
43 | * This function creates a shmem GEM object. | |
44 | * | |
45 | * Returns: | |
46 | * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative | |
47 | * error code on failure. | |
48 | */ | |
49 | struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) | |
50 | { | |
51 | struct drm_gem_shmem_object *shmem; | |
52 | struct drm_gem_object *obj; | |
53 | int ret; | |
54 | ||
55 | size = PAGE_ALIGN(size); | |
56 | ||
57 | if (dev->driver->gem_create_object) | |
58 | obj = dev->driver->gem_create_object(dev, size); | |
59 | else | |
60 | obj = kzalloc(sizeof(*shmem), GFP_KERNEL); | |
61 | if (!obj) | |
62 | return ERR_PTR(-ENOMEM); | |
63 | ||
64 | if (!obj->funcs) | |
65 | obj->funcs = &drm_gem_shmem_funcs; | |
66 | ||
67 | ret = drm_gem_object_init(dev, obj, size); | |
68 | if (ret) | |
69 | goto err_free; | |
70 | ||
71 | ret = drm_gem_create_mmap_offset(obj); | |
72 | if (ret) | |
73 | goto err_release; | |
74 | ||
75 | shmem = to_drm_gem_shmem_obj(obj); | |
76 | mutex_init(&shmem->pages_lock); | |
77 | mutex_init(&shmem->vmap_lock); | |
17acb9f3 | 78 | INIT_LIST_HEAD(&shmem->madv_list); |
2194a63a NT |
79 | |
80 | /* | |
81 | * Our buffers are kept pinned, so allocating them | |
82 | * from the MOVABLE zone is a really bad idea, and | |
83 | * conflicts with CMA. See comments above new_inode() | |
84 | * why this is required _and_ expected if you're | |
85 | * going to pin these pages. | |
86 | */ | |
87 | mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | | |
88 | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); | |
89 | ||
90 | return shmem; | |
91 | ||
92 | err_release: | |
93 | drm_gem_object_release(obj); | |
94 | err_free: | |
95 | kfree(obj); | |
96 | ||
97 | return ERR_PTR(ret); | |
98 | } | |
99 | EXPORT_SYMBOL_GPL(drm_gem_shmem_create); | |
100 | ||
101 | /** | |
102 | * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object | |
103 | * @obj: GEM object to free | |
104 | * | |
105 | * This function cleans up the GEM object state and frees the memory used to | |
0b638559 DV |
106 | * store the object itself. It should be used to implement |
107 | * &drm_gem_object_funcs.free. | |
2194a63a NT |
108 | */ |
109 | void drm_gem_shmem_free_object(struct drm_gem_object *obj) | |
110 | { | |
111 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
112 | ||
113 | WARN_ON(shmem->vmap_use_count); | |
114 | ||
115 | if (obj->import_attach) { | |
116 | shmem->pages_use_count--; | |
117 | drm_prime_gem_destroy(obj, shmem->sgt); | |
118 | kvfree(shmem->pages); | |
119 | } else { | |
120 | if (shmem->sgt) { | |
121 | dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, | |
122 | shmem->sgt->nents, DMA_BIDIRECTIONAL); | |
2194a63a NT |
123 | sg_free_table(shmem->sgt); |
124 | kfree(shmem->sgt); | |
125 | } | |
3bf5189d RH |
126 | if (shmem->pages) |
127 | drm_gem_shmem_put_pages(shmem); | |
2194a63a NT |
128 | } |
129 | ||
130 | WARN_ON(shmem->pages_use_count); | |
131 | ||
132 | drm_gem_object_release(obj); | |
133 | mutex_destroy(&shmem->pages_lock); | |
134 | mutex_destroy(&shmem->vmap_lock); | |
135 | kfree(shmem); | |
136 | } | |
137 | EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object); | |
138 | ||
139 | static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) | |
140 | { | |
141 | struct drm_gem_object *obj = &shmem->base; | |
142 | struct page **pages; | |
143 | ||
144 | if (shmem->pages_use_count++ > 0) | |
145 | return 0; | |
146 | ||
147 | pages = drm_gem_get_pages(obj); | |
148 | if (IS_ERR(pages)) { | |
149 | DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); | |
150 | shmem->pages_use_count = 0; | |
151 | return PTR_ERR(pages); | |
152 | } | |
153 | ||
154 | shmem->pages = pages; | |
155 | ||
156 | return 0; | |
157 | } | |
158 | ||
159 | /* | |
160 | * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object | |
161 | * @shmem: shmem GEM object | |
162 | * | |
163 | * This function makes sure that backing pages exists for the shmem GEM object | |
164 | * and increases the use count. | |
165 | * | |
166 | * Returns: | |
167 | * 0 on success or a negative error code on failure. | |
168 | */ | |
169 | int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) | |
170 | { | |
171 | int ret; | |
172 | ||
173 | ret = mutex_lock_interruptible(&shmem->pages_lock); | |
174 | if (ret) | |
175 | return ret; | |
176 | ret = drm_gem_shmem_get_pages_locked(shmem); | |
177 | mutex_unlock(&shmem->pages_lock); | |
178 | ||
179 | return ret; | |
180 | } | |
181 | EXPORT_SYMBOL(drm_gem_shmem_get_pages); | |
182 | ||
183 | static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) | |
184 | { | |
185 | struct drm_gem_object *obj = &shmem->base; | |
186 | ||
187 | if (WARN_ON_ONCE(!shmem->pages_use_count)) | |
188 | return; | |
189 | ||
190 | if (--shmem->pages_use_count > 0) | |
191 | return; | |
192 | ||
193 | drm_gem_put_pages(obj, shmem->pages, | |
194 | shmem->pages_mark_dirty_on_put, | |
195 | shmem->pages_mark_accessed_on_put); | |
196 | shmem->pages = NULL; | |
197 | } | |
198 | ||
199 | /* | |
200 | * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object | |
201 | * @shmem: shmem GEM object | |
202 | * | |
203 | * This function decreases the use count and puts the backing pages when use drops to zero. | |
204 | */ | |
205 | void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) | |
206 | { | |
207 | mutex_lock(&shmem->pages_lock); | |
208 | drm_gem_shmem_put_pages_locked(shmem); | |
209 | mutex_unlock(&shmem->pages_lock); | |
210 | } | |
211 | EXPORT_SYMBOL(drm_gem_shmem_put_pages); | |
212 | ||
213 | /** | |
214 | * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object | |
215 | * @obj: GEM object | |
216 | * | |
217 | * This function makes sure the backing pages are pinned in memory while the | |
0b638559 DV |
218 | * buffer is exported. It should only be used to implement |
219 | * &drm_gem_object_funcs.pin. | |
2194a63a NT |
220 | * |
221 | * Returns: | |
222 | * 0 on success or a negative error code on failure. | |
223 | */ | |
224 | int drm_gem_shmem_pin(struct drm_gem_object *obj) | |
225 | { | |
226 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
227 | ||
228 | return drm_gem_shmem_get_pages(shmem); | |
229 | } | |
230 | EXPORT_SYMBOL(drm_gem_shmem_pin); | |
231 | ||
232 | /** | |
233 | * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object | |
234 | * @obj: GEM object | |
235 | * | |
236 | * This function removes the requirement that the backing pages are pinned in | |
0b638559 | 237 | * memory. It should only be used to implement &drm_gem_object_funcs.unpin. |
2194a63a NT |
238 | */ |
239 | void drm_gem_shmem_unpin(struct drm_gem_object *obj) | |
240 | { | |
241 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
242 | ||
243 | drm_gem_shmem_put_pages(shmem); | |
244 | } | |
245 | EXPORT_SYMBOL(drm_gem_shmem_unpin); | |
246 | ||
247 | static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) | |
248 | { | |
249 | struct drm_gem_object *obj = &shmem->base; | |
250 | int ret; | |
251 | ||
252 | if (shmem->vmap_use_count++ > 0) | |
253 | return shmem->vaddr; | |
254 | ||
255 | ret = drm_gem_shmem_get_pages(shmem); | |
256 | if (ret) | |
257 | goto err_zero_use; | |
258 | ||
1cad6292 | 259 | if (obj->import_attach) { |
2194a63a | 260 | shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); |
1cad6292 GH |
261 | } else { |
262 | pgprot_t prot = PAGE_KERNEL; | |
263 | ||
264 | if (!shmem->map_cached) | |
265 | prot = pgprot_writecombine(prot); | |
be7d9f05 | 266 | shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, |
1cad6292 GH |
267 | VM_MAP, prot); |
268 | } | |
2194a63a NT |
269 | |
270 | if (!shmem->vaddr) { | |
271 | DRM_DEBUG_KMS("Failed to vmap pages\n"); | |
272 | ret = -ENOMEM; | |
273 | goto err_put_pages; | |
274 | } | |
275 | ||
276 | return shmem->vaddr; | |
277 | ||
278 | err_put_pages: | |
279 | drm_gem_shmem_put_pages(shmem); | |
280 | err_zero_use: | |
281 | shmem->vmap_use_count = 0; | |
282 | ||
283 | return ERR_PTR(ret); | |
284 | } | |
285 | ||
286 | /* | |
287 | * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object | |
288 | * @shmem: shmem GEM object | |
289 | * | |
0b638559 DV |
290 | * This function makes sure that a contiguous kernel virtual address mapping |
291 | * exists for the buffer backing the shmem GEM object. | |
292 | * | |
293 | * This function can be used to implement &drm_gem_object_funcs.vmap. But it can | |
294 | * also be called by drivers directly, in which case it will hide the | |
295 | * differences between dma-buf imported and natively allocated objects. | |
296 | * | |
297 | * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). | |
2194a63a NT |
298 | * |
299 | * Returns: | |
300 | * 0 on success or a negative error code on failure. | |
301 | */ | |
302 | void *drm_gem_shmem_vmap(struct drm_gem_object *obj) | |
303 | { | |
304 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
305 | void *vaddr; | |
306 | int ret; | |
307 | ||
308 | ret = mutex_lock_interruptible(&shmem->vmap_lock); | |
309 | if (ret) | |
310 | return ERR_PTR(ret); | |
311 | vaddr = drm_gem_shmem_vmap_locked(shmem); | |
312 | mutex_unlock(&shmem->vmap_lock); | |
313 | ||
314 | return vaddr; | |
315 | } | |
316 | EXPORT_SYMBOL(drm_gem_shmem_vmap); | |
317 | ||
318 | static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) | |
319 | { | |
320 | struct drm_gem_object *obj = &shmem->base; | |
321 | ||
322 | if (WARN_ON_ONCE(!shmem->vmap_use_count)) | |
323 | return; | |
324 | ||
325 | if (--shmem->vmap_use_count > 0) | |
326 | return; | |
327 | ||
328 | if (obj->import_attach) | |
329 | dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr); | |
330 | else | |
331 | vunmap(shmem->vaddr); | |
332 | ||
333 | shmem->vaddr = NULL; | |
334 | drm_gem_shmem_put_pages(shmem); | |
335 | } | |
336 | ||
337 | /* | |
338 | * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object | |
339 | * @shmem: shmem GEM object | |
340 | * | |
0b638559 DV |
341 | * This function cleans up a kernel virtual address mapping acquired by |
342 | * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to | |
343 | * zero. | |
344 | * | |
345 | * This function can be used to implement &drm_gem_object_funcs.vmap. But it can | |
346 | * also be called by drivers directly, in which case it will hide the | |
347 | * differences between dma-buf imported and natively allocated objects. | |
2194a63a NT |
348 | */ |
349 | void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) | |
350 | { | |
351 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
352 | ||
353 | mutex_lock(&shmem->vmap_lock); | |
354 | drm_gem_shmem_vunmap_locked(shmem); | |
355 | mutex_unlock(&shmem->vmap_lock); | |
356 | } | |
357 | EXPORT_SYMBOL(drm_gem_shmem_vunmap); | |
358 | ||
359 | struct drm_gem_shmem_object * | |
360 | drm_gem_shmem_create_with_handle(struct drm_file *file_priv, | |
361 | struct drm_device *dev, size_t size, | |
362 | uint32_t *handle) | |
363 | { | |
364 | struct drm_gem_shmem_object *shmem; | |
365 | int ret; | |
366 | ||
367 | shmem = drm_gem_shmem_create(dev, size); | |
368 | if (IS_ERR(shmem)) | |
369 | return shmem; | |
370 | ||
371 | /* | |
372 | * Allocate an id of idr table where the obj is registered | |
373 | * and handle has the id what user can see. | |
374 | */ | |
375 | ret = drm_gem_handle_create(file_priv, &shmem->base, handle); | |
376 | /* drop reference from allocate - handle holds it now. */ | |
be6ee102 | 377 | drm_gem_object_put(&shmem->base); |
2194a63a NT |
378 | if (ret) |
379 | return ERR_PTR(ret); | |
380 | ||
381 | return shmem; | |
382 | } | |
383 | EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); | |
384 | ||
17acb9f3 RH |
385 | /* Update madvise status, returns true if not purged, else |
386 | * false or -errno. | |
387 | */ | |
388 | int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv) | |
389 | { | |
390 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
391 | ||
392 | mutex_lock(&shmem->pages_lock); | |
393 | ||
394 | if (shmem->madv >= 0) | |
395 | shmem->madv = madv; | |
396 | ||
397 | madv = shmem->madv; | |
398 | ||
399 | mutex_unlock(&shmem->pages_lock); | |
400 | ||
401 | return (madv >= 0); | |
402 | } | |
403 | EXPORT_SYMBOL(drm_gem_shmem_madvise); | |
404 | ||
405 | void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) | |
406 | { | |
407 | struct drm_device *dev = obj->dev; | |
408 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
409 | ||
410 | WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); | |
411 | ||
4fa3d66f RH |
412 | dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, |
413 | shmem->sgt->nents, DMA_BIDIRECTIONAL); | |
414 | sg_free_table(shmem->sgt); | |
415 | kfree(shmem->sgt); | |
416 | shmem->sgt = NULL; | |
417 | ||
17acb9f3 RH |
418 | drm_gem_shmem_put_pages_locked(shmem); |
419 | ||
420 | shmem->madv = -1; | |
421 | ||
422 | drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); | |
423 | drm_gem_free_mmap_offset(obj); | |
424 | ||
425 | /* Our goal here is to return as much of the memory as | |
426 | * is possible back to the system as we are called from OOM. | |
427 | * To do this we must instruct the shmfs to drop all of its | |
428 | * backing pages, *now*. | |
429 | */ | |
430 | shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); | |
431 | ||
432 | invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, | |
433 | 0, (loff_t)-1); | |
434 | } | |
435 | EXPORT_SYMBOL(drm_gem_shmem_purge_locked); | |
436 | ||
dfbc7a46 | 437 | bool drm_gem_shmem_purge(struct drm_gem_object *obj) |
17acb9f3 RH |
438 | { |
439 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
440 | ||
dfbc7a46 RH |
441 | if (!mutex_trylock(&shmem->pages_lock)) |
442 | return false; | |
17acb9f3 RH |
443 | drm_gem_shmem_purge_locked(obj); |
444 | mutex_unlock(&shmem->pages_lock); | |
dfbc7a46 RH |
445 | |
446 | return true; | |
17acb9f3 RH |
447 | } |
448 | EXPORT_SYMBOL(drm_gem_shmem_purge); | |
449 | ||
2194a63a NT |
450 | /** |
451 | * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object | |
452 | * @file: DRM file structure to create the dumb buffer for | |
453 | * @dev: DRM device | |
454 | * @args: IOCTL data | |
455 | * | |
456 | * This function computes the pitch of the dumb buffer and rounds it up to an | |
457 | * integer number of bytes per pixel. Drivers for hardware that doesn't have | |
458 | * any additional restrictions on the pitch can directly use this function as | |
459 | * their &drm_driver.dumb_create callback. | |
460 | * | |
461 | * For hardware with additional restrictions, drivers can adjust the fields | |
462 | * set up by userspace before calling into this function. | |
463 | * | |
464 | * Returns: | |
465 | * 0 on success or a negative error code on failure. | |
466 | */ | |
467 | int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, | |
468 | struct drm_mode_create_dumb *args) | |
469 | { | |
470 | u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); | |
471 | struct drm_gem_shmem_object *shmem; | |
472 | ||
473 | if (!args->pitch || !args->size) { | |
474 | args->pitch = min_pitch; | |
475 | args->size = args->pitch * args->height; | |
476 | } else { | |
477 | /* ensure sane minimum values */ | |
478 | if (args->pitch < min_pitch) | |
479 | args->pitch = min_pitch; | |
480 | if (args->size < args->pitch * args->height) | |
481 | args->size = args->pitch * args->height; | |
482 | } | |
483 | ||
484 | shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); | |
485 | ||
486 | return PTR_ERR_OR_ZERO(shmem); | |
487 | } | |
488 | EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); | |
489 | ||
490 | static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) | |
491 | { | |
492 | struct vm_area_struct *vma = vmf->vma; | |
493 | struct drm_gem_object *obj = vma->vm_private_data; | |
494 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
495 | loff_t num_pages = obj->size >> PAGE_SHIFT; | |
496 | struct page *page; | |
497 | ||
3a3fe6e7 | 498 | if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) |
2194a63a NT |
499 | return VM_FAULT_SIGBUS; |
500 | ||
501 | page = shmem->pages[vmf->pgoff]; | |
502 | ||
503 | return vmf_insert_page(vma, vmf->address, page); | |
504 | } | |
505 | ||
506 | static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) | |
507 | { | |
508 | struct drm_gem_object *obj = vma->vm_private_data; | |
509 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
510 | int ret; | |
511 | ||
512 | ret = drm_gem_shmem_get_pages(shmem); | |
513 | WARN_ON_ONCE(ret != 0); | |
514 | ||
515 | drm_gem_vm_open(vma); | |
516 | } | |
517 | ||
518 | static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) | |
519 | { | |
520 | struct drm_gem_object *obj = vma->vm_private_data; | |
521 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
522 | ||
523 | drm_gem_shmem_put_pages(shmem); | |
524 | drm_gem_vm_close(vma); | |
525 | } | |
526 | ||
0be89589 | 527 | static const struct vm_operations_struct drm_gem_shmem_vm_ops = { |
2194a63a NT |
528 | .fault = drm_gem_shmem_fault, |
529 | .open = drm_gem_shmem_vm_open, | |
530 | .close = drm_gem_shmem_vm_close, | |
531 | }; | |
2194a63a NT |
532 | |
533 | /** | |
534 | * drm_gem_shmem_mmap - Memory-map a shmem GEM object | |
0be89589 | 535 | * @obj: gem object |
2194a63a NT |
536 | * @vma: VMA for the area to be mapped |
537 | * | |
538 | * This function implements an augmented version of the GEM DRM file mmap | |
539 | * operation for shmem objects. Drivers which employ the shmem helpers should | |
0be89589 | 540 | * use this function as their &drm_gem_object_funcs.mmap handler. |
2194a63a NT |
541 | * |
542 | * Returns: | |
543 | * 0 on success or a negative error code on failure. | |
544 | */ | |
0be89589 | 545 | int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) |
2194a63a NT |
546 | { |
547 | struct drm_gem_shmem_object *shmem; | |
548 | int ret; | |
549 | ||
e5516553 GH |
550 | /* Remove the fake offset */ |
551 | vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); | |
552 | ||
0be89589 | 553 | shmem = to_drm_gem_shmem_obj(obj); |
2194a63a NT |
554 | |
555 | ret = drm_gem_shmem_get_pages(shmem); | |
556 | if (ret) { | |
557 | drm_gem_vm_close(vma); | |
558 | return ret; | |
559 | } | |
560 | ||
1bf01e1e | 561 | vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; |
1cad6292 GH |
562 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
563 | if (!shmem->map_cached) | |
564 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | |
0be89589 | 565 | vma->vm_ops = &drm_gem_shmem_vm_ops; |
2194a63a | 566 | |
2194a63a NT |
567 | return 0; |
568 | } | |
569 | EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); | |
570 | ||
571 | /** | |
572 | * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs | |
573 | * @p: DRM printer | |
574 | * @indent: Tab indentation level | |
575 | * @obj: GEM object | |
0b638559 DV |
576 | * |
577 | * This implements the &drm_gem_object_funcs.info callback. | |
2194a63a NT |
578 | */ |
579 | void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, | |
580 | const struct drm_gem_object *obj) | |
581 | { | |
582 | const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
583 | ||
584 | drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); | |
585 | drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); | |
586 | drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); | |
587 | } | |
588 | EXPORT_SYMBOL(drm_gem_shmem_print_info); | |
589 | ||
590 | /** | |
591 | * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned | |
592 | * pages for a shmem GEM object | |
593 | * @obj: GEM object | |
594 | * | |
595 | * This function exports a scatter/gather table suitable for PRIME usage by | |
0b638559 DV |
596 | * calling the standard DMA mapping API. Drivers should not call this function |
597 | * directly, instead it should only be used as an implementation for | |
598 | * &drm_gem_object_funcs.get_sg_table. | |
599 | * | |
600 | * Drivers who need to acquire an scatter/gather table for objects need to call | |
601 | * drm_gem_shmem_get_pages_sgt() instead. | |
2194a63a NT |
602 | * |
603 | * Returns: | |
604 | * A pointer to the scatter/gather table of pinned pages or NULL on failure. | |
605 | */ | |
606 | struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) | |
607 | { | |
608 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
609 | ||
610 | return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); | |
611 | } | |
612 | EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); | |
613 | ||
614 | /** | |
615 | * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a | |
616 | * scatter/gather table for a shmem GEM object. | |
617 | * @obj: GEM object | |
618 | * | |
619 | * This function returns a scatter/gather table suitable for driver usage. If | |
620 | * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg | |
621 | * table created. | |
622 | * | |
0b638559 DV |
623 | * This is the main function for drivers to get at backing storage, and it hides |
624 | * and difference between dma-buf imported and natively allocated objects. | |
625 | * drm_gem_shmem_get_sg_table() should not be directly called by drivers. | |
626 | * | |
2194a63a NT |
627 | * Returns: |
628 | * A pointer to the scatter/gather table of pinned pages or errno on failure. | |
629 | */ | |
630 | struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) | |
631 | { | |
632 | int ret; | |
633 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
634 | struct sg_table *sgt; | |
635 | ||
636 | if (shmem->sgt) | |
637 | return shmem->sgt; | |
638 | ||
639 | WARN_ON(obj->import_attach); | |
640 | ||
641 | ret = drm_gem_shmem_get_pages(shmem); | |
642 | if (ret) | |
643 | return ERR_PTR(ret); | |
644 | ||
645 | sgt = drm_gem_shmem_get_sg_table(&shmem->base); | |
646 | if (IS_ERR(sgt)) { | |
647 | ret = PTR_ERR(sgt); | |
648 | goto err_put_pages; | |
649 | } | |
650 | /* Map the pages for use by the h/w. */ | |
651 | dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); | |
652 | ||
653 | shmem->sgt = sgt; | |
654 | ||
655 | return sgt; | |
656 | ||
657 | err_put_pages: | |
658 | drm_gem_shmem_put_pages(shmem); | |
659 | return ERR_PTR(ret); | |
660 | } | |
661 | EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); | |
662 | ||
663 | /** | |
664 | * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from | |
665 | * another driver's scatter/gather table of pinned pages | |
666 | * @dev: Device to import into | |
667 | * @attach: DMA-BUF attachment | |
668 | * @sgt: Scatter/gather table of pinned pages | |
669 | * | |
670 | * This function imports a scatter/gather table exported via DMA-BUF by | |
671 | * another driver. Drivers that use the shmem helpers should set this as their | |
672 | * &drm_driver.gem_prime_import_sg_table callback. | |
673 | * | |
674 | * Returns: | |
675 | * A pointer to a newly created GEM object or an ERR_PTR-encoded negative | |
676 | * error code on failure. | |
677 | */ | |
678 | struct drm_gem_object * | |
679 | drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, | |
680 | struct dma_buf_attachment *attach, | |
681 | struct sg_table *sgt) | |
682 | { | |
683 | size_t size = PAGE_ALIGN(attach->dmabuf->size); | |
684 | size_t npages = size >> PAGE_SHIFT; | |
685 | struct drm_gem_shmem_object *shmem; | |
686 | int ret; | |
687 | ||
688 | shmem = drm_gem_shmem_create(dev, size); | |
689 | if (IS_ERR(shmem)) | |
690 | return ERR_CAST(shmem); | |
691 | ||
692 | shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); | |
693 | if (!shmem->pages) { | |
694 | ret = -ENOMEM; | |
695 | goto err_free_gem; | |
696 | } | |
697 | ||
698 | ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages); | |
699 | if (ret < 0) | |
700 | goto err_free_array; | |
701 | ||
702 | shmem->sgt = sgt; | |
703 | shmem->pages_use_count = 1; /* Permanently pinned from our point of view */ | |
704 | ||
705 | DRM_DEBUG_PRIME("size = %zu\n", size); | |
706 | ||
707 | return &shmem->base; | |
708 | ||
709 | err_free_array: | |
710 | kvfree(shmem->pages); | |
711 | err_free_gem: | |
be6ee102 | 712 | drm_gem_object_put(&shmem->base); |
2194a63a NT |
713 | |
714 | return ERR_PTR(ret); | |
715 | } | |
716 | EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); |