drm: Integrate VRAM MM into struct drm_device
[linux-2.6-block.git] / drivers / gpu / drm / drm_gem_vram_helper.c
CommitLineData
85438a8d
TZ
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3#include <drm/drm_gem_vram_helper.h>
59f5989a 4#include <drm/drm_device.h>
fed1eec0 5#include <drm/drm_mode.h>
1f460b49 6#include <drm/drm_prime.h>
5c9dcacf 7#include <drm/drm_vram_mm_helper.h>
85438a8d
TZ
8#include <drm/ttm/ttm_page_alloc.h>
9
10/**
11 * DOC: overview
12 *
13 * This library provides a GEM buffer object that is backed by video RAM
14 * (VRAM). It can be used for framebuffer devices with dedicated memory.
15 */
16
17/*
18 * Buffer-objects helpers
19 */
20
21static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
22{
23 /* We got here via ttm_bo_put(), which means that the
24 * TTM buffer object in 'bo' has already been cleaned
25 * up; only release the GEM object.
26 */
27 drm_gem_object_release(&gbo->gem);
28}
29
30static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
31{
32 drm_gem_vram_cleanup(gbo);
33 kfree(gbo);
34}
35
36static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
37{
38 struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
39
40 drm_gem_vram_destroy(gbo);
41}
42
43static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
44 unsigned long pl_flag)
45{
46 unsigned int i;
47 unsigned int c = 0;
48
49 gbo->placement.placement = gbo->placements;
50 gbo->placement.busy_placement = gbo->placements;
51
52 if (pl_flag & TTM_PL_FLAG_VRAM)
53 gbo->placements[c++].flags = TTM_PL_FLAG_WC |
54 TTM_PL_FLAG_UNCACHED |
55 TTM_PL_FLAG_VRAM;
56
57 if (pl_flag & TTM_PL_FLAG_SYSTEM)
58 gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
59 TTM_PL_FLAG_SYSTEM;
60
61 if (!c)
62 gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
63 TTM_PL_FLAG_SYSTEM;
64
65 gbo->placement.num_placement = c;
66 gbo->placement.num_busy_placement = c;
67
68 for (i = 0; i < c; ++i) {
69 gbo->placements[i].fpfn = 0;
70 gbo->placements[i].lpfn = 0;
71 }
72}
73
74static int drm_gem_vram_init(struct drm_device *dev,
75 struct ttm_bo_device *bdev,
76 struct drm_gem_vram_object *gbo,
77 size_t size, unsigned long pg_align,
78 bool interruptible)
79{
80 int ret;
81 size_t acc_size;
82
83 ret = drm_gem_object_init(dev, &gbo->gem, size);
84 if (ret)
85 return ret;
86
87 acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
88
89 gbo->bo.bdev = bdev;
90 drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
91
92 ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
93 &gbo->placement, pg_align, interruptible, acc_size,
94 NULL, NULL, ttm_buffer_object_destroy);
95 if (ret)
96 goto err_drm_gem_object_release;
97
98 return 0;
99
100err_drm_gem_object_release:
101 drm_gem_object_release(&gbo->gem);
102 return ret;
103}
104
105/**
106 * drm_gem_vram_create() - Creates a VRAM-backed GEM object
107 * @dev: the DRM device
108 * @bdev: the TTM BO device backing the object
109 * @size: the buffer size in bytes
110 * @pg_align: the buffer's alignment in multiples of the page size
111 * @interruptible: sleep interruptible if waiting for memory
112 *
113 * Returns:
114 * A new instance of &struct drm_gem_vram_object on success, or
115 * an ERR_PTR()-encoded error code otherwise.
116 */
117struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
118 struct ttm_bo_device *bdev,
119 size_t size,
120 unsigned long pg_align,
121 bool interruptible)
122{
123 struct drm_gem_vram_object *gbo;
124 int ret;
125
126 gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
127 if (!gbo)
128 return ERR_PTR(-ENOMEM);
129
130 ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
131 if (ret < 0)
132 goto err_kfree;
133
134 return gbo;
135
136err_kfree:
137 kfree(gbo);
138 return ERR_PTR(ret);
139}
140EXPORT_SYMBOL(drm_gem_vram_create);
141
142/**
143 * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
144 * @gbo: the GEM VRAM object
145 *
146 * See ttm_bo_put() for more information.
147 */
148void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
149{
150 ttm_bo_put(&gbo->bo);
151}
152EXPORT_SYMBOL(drm_gem_vram_put);
153
154/**
155 * drm_gem_vram_reserve() - Reserves a VRAM-backed GEM object
156 * @gbo: the GEM VRAM object
157 * @no_wait: don't wait for buffer object to become available
158 *
159 * See ttm_bo_reserve() for more information.
160 *
161 * Returns:
162 * 0 on success, or
163 * a negative error code otherwise
164 */
165int drm_gem_vram_reserve(struct drm_gem_vram_object *gbo, bool no_wait)
166{
167 return ttm_bo_reserve(&gbo->bo, true, no_wait, NULL);
168}
169EXPORT_SYMBOL(drm_gem_vram_reserve);
170
171/**
172 * drm_gem_vram_unreserve() - \
173 Release a reservation acquired by drm_gem_vram_reserve()
174 * @gbo: the GEM VRAM object
175 *
176 * See ttm_bo_unreserve() for more information.
177 */
178void drm_gem_vram_unreserve(struct drm_gem_vram_object *gbo)
179{
180 ttm_bo_unreserve(&gbo->bo);
181}
182EXPORT_SYMBOL(drm_gem_vram_unreserve);
183
184/**
185 * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset
186 * @gbo: the GEM VRAM object
187 *
188 * See drm_vma_node_offset_addr() for more information.
189 *
190 * Returns:
191 * The buffer object's offset for userspace mappings on success, or
192 * 0 if no offset is allocated.
193 */
194u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
195{
196 return drm_vma_node_offset_addr(&gbo->bo.vma_node);
197}
198EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
199
200/**
201 * drm_gem_vram_offset() - \
202 Returns a GEM VRAM object's offset in video memory
203 * @gbo: the GEM VRAM object
204 *
205 * This function returns the buffer object's offset in the device's video
206 * memory. The buffer object has to be pinned to %TTM_PL_VRAM.
207 *
208 * Returns:
209 * The buffer object's offset in video memory on success, or
210 * a negative errno code otherwise.
211 */
212s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
213{
214 if (WARN_ON_ONCE(!gbo->pin_count))
215 return (s64)-ENODEV;
216 return gbo->bo.offset;
217}
218EXPORT_SYMBOL(drm_gem_vram_offset);
219
220/**
221 * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
222 * @gbo: the GEM VRAM object
223 * @pl_flag: a bitmask of possible memory regions
224 *
225 * Pinning a buffer object ensures that it is not evicted from
226 * a memory region. A pinned buffer object has to be unpinned before
227 * it can be pinned to another region.
228 *
229 * Returns:
230 * 0 on success, or
231 * a negative error code otherwise.
232 */
233int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
234{
235 int i, ret;
236 struct ttm_operation_ctx ctx = { false, false };
237
238 if (gbo->pin_count) {
239 ++gbo->pin_count;
240 return 0;
241 }
242
243 drm_gem_vram_placement(gbo, pl_flag);
244 for (i = 0; i < gbo->placement.num_placement; ++i)
245 gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
246
247 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
248 if (ret < 0)
249 return ret;
250
251 gbo->pin_count = 1;
252
253 return 0;
254}
255EXPORT_SYMBOL(drm_gem_vram_pin);
256
257/**
258 * drm_gem_vram_unpin() - Unpins a GEM VRAM object
259 * @gbo: the GEM VRAM object
260 *
261 * Returns:
262 * 0 on success, or
263 * a negative error code otherwise.
264 */
265int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
266{
267 int i, ret;
268 struct ttm_operation_ctx ctx = { false, false };
269
270 if (WARN_ON_ONCE(!gbo->pin_count))
271 return 0;
272
273 --gbo->pin_count;
274 if (gbo->pin_count)
275 return 0;
276
277 for (i = 0; i < gbo->placement.num_placement ; ++i)
278 gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
279
280 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
281 if (ret < 0)
282 return ret;
283
284 return 0;
285}
286EXPORT_SYMBOL(drm_gem_vram_unpin);
287
288/**
289 * drm_gem_vram_push_to_system() - \
290 Unpins a GEM VRAM object and moves it to system memory
291 * @gbo: the GEM VRAM object
292 *
293 * This operation only works if the caller holds the final pin on the
294 * buffer object.
295 *
296 * Returns:
297 * 0 on success, or
298 * a negative error code otherwise.
299 */
300int drm_gem_vram_push_to_system(struct drm_gem_vram_object *gbo)
301{
302 int i, ret;
303 struct ttm_operation_ctx ctx = { false, false };
304
305 if (WARN_ON_ONCE(!gbo->pin_count))
306 return 0;
307
308 --gbo->pin_count;
309 if (gbo->pin_count)
310 return 0;
311
312 if (gbo->kmap.virtual)
313 ttm_bo_kunmap(&gbo->kmap);
314
315 drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
316 for (i = 0; i < gbo->placement.num_placement ; ++i)
317 gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
318
319 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
320 if (ret)
321 return ret;
322
323 return 0;
324}
325EXPORT_SYMBOL(drm_gem_vram_push_to_system);
326
327/**
328 * drm_gem_vram_kmap_at() - Maps a GEM VRAM object into kernel address space
329 * @gbo: the GEM VRAM object
330 * @map: establish a mapping if necessary
331 * @is_iomem: returns true if the mapped memory is I/O memory, or false \
332 otherwise; can be NULL
333 * @kmap: the mapping's kmap object
334 *
335 * This function maps the buffer object into the kernel's address space
336 * or returns the current mapping. If the parameter map is false, the
337 * function only queries the current mapping, but does not establish a
338 * new one.
339 *
340 * Returns:
341 * The buffers virtual address if mapped, or
342 * NULL if not mapped, or
343 * an ERR_PTR()-encoded error code otherwise.
344 */
345void *drm_gem_vram_kmap_at(struct drm_gem_vram_object *gbo, bool map,
346 bool *is_iomem, struct ttm_bo_kmap_obj *kmap)
347{
348 int ret;
349
350 if (kmap->virtual || !map)
351 goto out;
352
353 ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
354 if (ret)
355 return ERR_PTR(ret);
356
357out:
358 if (!is_iomem)
359 return kmap->virtual;
360 if (!kmap->virtual) {
361 *is_iomem = false;
362 return NULL;
363 }
364 return ttm_kmap_obj_virtual(kmap, is_iomem);
365}
366EXPORT_SYMBOL(drm_gem_vram_kmap_at);
367
368/**
369 * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
370 * @gbo: the GEM VRAM object
371 * @map: establish a mapping if necessary
372 * @is_iomem: returns true if the mapped memory is I/O memory, or false \
373 otherwise; can be NULL
374 *
375 * This function maps the buffer object into the kernel's address space
376 * or returns the current mapping. If the parameter map is false, the
377 * function only queries the current mapping, but does not establish a
378 * new one.
379 *
380 * Returns:
381 * The buffers virtual address if mapped, or
382 * NULL if not mapped, or
383 * an ERR_PTR()-encoded error code otherwise.
384 */
385void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
386 bool *is_iomem)
387{
388 return drm_gem_vram_kmap_at(gbo, map, is_iomem, &gbo->kmap);
389}
390EXPORT_SYMBOL(drm_gem_vram_kmap);
391
392/**
393 * drm_gem_vram_kunmap_at() - Unmaps a GEM VRAM object
394 * @gbo: the GEM VRAM object
395 * @kmap: the mapping's kmap object
396 */
397void drm_gem_vram_kunmap_at(struct drm_gem_vram_object *gbo,
398 struct ttm_bo_kmap_obj *kmap)
399{
400 if (!kmap->virtual)
401 return;
402
403 ttm_bo_kunmap(kmap);
404 kmap->virtual = NULL;
405}
406EXPORT_SYMBOL(drm_gem_vram_kunmap_at);
407
408/**
409 * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
410 * @gbo: the GEM VRAM object
411 */
412void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
413{
414 drm_gem_vram_kunmap_at(gbo, &gbo->kmap);
415}
416EXPORT_SYMBOL(drm_gem_vram_kunmap);
6c812bc5 417
fed1eec0
TZ
418/**
419 * drm_gem_vram_fill_create_dumb() - \
420 Helper for implementing &struct drm_driver.dumb_create
421 * @file: the DRM file
422 * @dev: the DRM device
423 * @bdev: the TTM BO device managing the buffer object
424 * @pg_align: the buffer's alignment in multiples of the page size
425 * @interruptible: sleep interruptible if waiting for memory
426 * @args: the arguments as provided to \
427 &struct drm_driver.dumb_create
428 *
429 * This helper function fills &struct drm_mode_create_dumb, which is used
430 * by &struct drm_driver.dumb_create. Implementations of this interface
431 * should forwards their arguments to this helper, plus the driver-specific
432 * parameters.
433 *
434 * Returns:
435 * 0 on success, or
436 * a negative error code otherwise.
437 */
438int drm_gem_vram_fill_create_dumb(struct drm_file *file,
439 struct drm_device *dev,
440 struct ttm_bo_device *bdev,
441 unsigned long pg_align,
442 bool interruptible,
443 struct drm_mode_create_dumb *args)
444{
445 size_t pitch, size;
446 struct drm_gem_vram_object *gbo;
447 int ret;
448 u32 handle;
449
450 pitch = args->width * ((args->bpp + 7) / 8);
451 size = pitch * args->height;
452
453 size = roundup(size, PAGE_SIZE);
454 if (!size)
455 return -EINVAL;
456
457 gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
458 if (IS_ERR(gbo))
459 return PTR_ERR(gbo);
460
461 ret = drm_gem_handle_create(file, &gbo->gem, &handle);
462 if (ret)
463 goto err_drm_gem_object_put_unlocked;
464
465 drm_gem_object_put_unlocked(&gbo->gem);
466
467 args->pitch = pitch;
468 args->size = size;
469 args->handle = handle;
470
471 return 0;
472
473err_drm_gem_object_put_unlocked:
474 drm_gem_object_put_unlocked(&gbo->gem);
475 return ret;
476}
477EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
478
6c812bc5
TZ
479/*
480 * Helpers for struct ttm_bo_driver
481 */
482
483static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
484{
485 return (bo->destroy == ttm_buffer_object_destroy);
486}
487
488/**
489 * drm_gem_vram_bo_driver_evict_flags() - \
490 Implements &struct ttm_bo_driver.evict_flags
491 * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
492 * @pl: TTM placement information.
493 */
494void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
495 struct ttm_placement *pl)
496{
497 struct drm_gem_vram_object *gbo;
498
499 /* TTM may pass BOs that are not GEM VRAM BOs. */
500 if (!drm_is_gem_vram(bo))
501 return;
502
503 gbo = drm_gem_vram_of_bo(bo);
504 drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
505 *pl = gbo->placement;
506}
507EXPORT_SYMBOL(drm_gem_vram_bo_driver_evict_flags);
508
509/**
510 * drm_gem_vram_bo_driver_verify_access() - \
511 Implements &struct ttm_bo_driver.verify_access
512 * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
513 * @filp: File pointer.
514 *
515 * Returns:
516 * 0 on success, or
517 * a negative errno code otherwise.
518 */
519int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
520 struct file *filp)
521{
522 struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
523
524 return drm_vma_node_verify_access(&gbo->gem.vma_node,
525 filp->private_data);
526}
527EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access);
737000fd 528
5c9dcacf
TZ
529/**
530 * drm_gem_vram_mm_funcs - Functions for &struct drm_vram_mm
531 *
532 * Most users of @struct drm_gem_vram_object will also use
533 * @struct drm_vram_mm. This instance of &struct drm_vram_mm_funcs
534 * can be used to connect both.
535 */
536const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = {
537 .evict_flags = drm_gem_vram_bo_driver_evict_flags,
538 .verify_access = drm_gem_vram_bo_driver_verify_access
539};
540EXPORT_SYMBOL(drm_gem_vram_mm_funcs);
541
737000fd
TZ
542/*
543 * Helpers for struct drm_driver
544 */
545
546/**
547 * drm_gem_vram_driver_gem_free_object_unlocked() - \
548 Implements &struct drm_driver.gem_free_object_unlocked
549 * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
550 */
551void drm_gem_vram_driver_gem_free_object_unlocked(struct drm_gem_object *gem)
552{
553 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
554
555 drm_gem_vram_put(gbo);
556}
557EXPORT_SYMBOL(drm_gem_vram_driver_gem_free_object_unlocked);
558
59f5989a
TZ
559/**
560 * drm_gem_vram_driver_create_dumb() - \
561 Implements &struct drm_driver.dumb_create
562 * @file: the DRM file
563 * @dev: the DRM device
564 * @args: the arguments as provided to \
565 &struct drm_driver.dumb_create
566 *
567 * This function requires the driver to use @drm_device.vram_mm for its
568 * instance of VRAM MM.
569 *
570 * Returns:
571 * 0 on success, or
572 * a negative error code otherwise.
573 */
574int drm_gem_vram_driver_dumb_create(struct drm_file *file,
575 struct drm_device *dev,
576 struct drm_mode_create_dumb *args)
577{
578 if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
579 return -EINVAL;
580
581 return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
582 false, args);
583}
584EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
585
737000fd
TZ
586/**
587 * drm_gem_vram_driver_dumb_mmap_offset() - \
588 Implements &struct drm_driver.dumb_mmap_offset
589 * @file: DRM file pointer.
590 * @dev: DRM device.
591 * @handle: GEM handle
592 * @offset: Returns the mapping's memory offset on success
593 *
594 * Returns:
595 * 0 on success, or
596 * a negative errno code otherwise.
597 */
598int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
599 struct drm_device *dev,
600 uint32_t handle, uint64_t *offset)
601{
602 struct drm_gem_object *gem;
603 struct drm_gem_vram_object *gbo;
604
605 gem = drm_gem_object_lookup(file, handle);
606 if (!gem)
607 return -ENOENT;
608
609 gbo = drm_gem_vram_of_gem(gem);
610 *offset = drm_gem_vram_mmap_offset(gbo);
611
612 drm_gem_object_put_unlocked(gem);
613
614 return 0;
615}
616EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
1f460b49
TZ
617
618/*
619 * PRIME helpers for struct drm_driver
620 */
621
622/**
623 * drm_gem_vram_driver_gem_prime_pin() - \
624 Implements &struct drm_driver.gem_prime_pin
625 * @gem: The GEM object to pin
626 *
627 * Returns:
628 * 0 on success, or
629 * a negative errno code otherwise.
630 */
631int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *gem)
632{
633 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
634
635 return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
636}
637EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_pin);
638
639/**
640 * drm_gem_vram_driver_gem_prime_unpin() - \
641 Implements &struct drm_driver.gem_prime_unpin
642 * @gem: The GEM object to unpin
643 */
644void drm_gem_vram_driver_gem_prime_unpin(struct drm_gem_object *gem)
645{
646 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
647
648 drm_gem_vram_unpin(gbo);
649}
650EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_unpin);
651
652/**
653 * drm_gem_vram_driver_gem_prime_vmap() - \
654 Implements &struct drm_driver.gem_prime_vmap
655 * @gem: The GEM object to map
656 *
657 * Returns:
658 * The buffers virtual address on success, or
659 * NULL otherwise.
660 */
661void *drm_gem_vram_driver_gem_prime_vmap(struct drm_gem_object *gem)
662{
663 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
664 int ret;
665 void *base;
666
667 ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
668 if (ret)
669 return NULL;
670 base = drm_gem_vram_kmap(gbo, true, NULL);
671 if (IS_ERR(base)) {
672 drm_gem_vram_unpin(gbo);
673 return NULL;
674 }
675 return base;
676}
677EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vmap);
678
679/**
680 * drm_gem_vram_driver_gem_prime_vunmap() - \
681 Implements &struct drm_driver.gem_prime_vunmap
682 * @gem: The GEM object to unmap
683 * @vaddr: The mapping's base address
684 */
685void drm_gem_vram_driver_gem_prime_vunmap(struct drm_gem_object *gem,
686 void *vaddr)
687{
688 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
689
690 drm_gem_vram_kunmap(gbo);
691 drm_gem_vram_unpin(gbo);
692}
693EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vunmap);
694
695/**
696 * drm_gem_vram_driver_gem_prime_mmap() - \
697 Implements &struct drm_driver.gem_prime_mmap
698 * @gem: The GEM object to map
699 * @vma: The VMA describing the mapping
700 *
701 * Returns:
702 * 0 on success, or
703 * a negative errno code otherwise.
704 */
705int drm_gem_vram_driver_gem_prime_mmap(struct drm_gem_object *gem,
706 struct vm_area_struct *vma)
707{
708 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
709
710 gbo->gem.vma_node.vm_node.start = gbo->bo.vma_node.vm_node.start;
711 return drm_gem_prime_mmap(gem, vma);
712}
713EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_mmap);