Commit | Line | Data |
---|---|---|
85438a8d TZ |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | ||
6b5ce4a1 TZ |
3 | #include <drm/drm_debugfs.h> |
4 | #include <drm/drm_device.h> | |
0fb5f69e | 5 | #include <drm/drm_drv.h> |
6b5ce4a1 | 6 | #include <drm/drm_file.h> |
6542ad89 | 7 | #include <drm/drm_framebuffer.h> |
527f6d91 | 8 | #include <drm/drm_gem_ttm_helper.h> |
85438a8d | 9 | #include <drm/drm_gem_vram_helper.h> |
fed1eec0 | 10 | #include <drm/drm_mode.h> |
6542ad89 | 11 | #include <drm/drm_plane.h> |
1f460b49 | 12 | #include <drm/drm_prime.h> |
6542ad89 | 13 | #include <drm/drm_simple_kms_helper.h> |
85438a8d TZ |
14 | #include <drm/ttm/ttm_page_alloc.h> |
15 | ||
31070a87 TZ |
16 | static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; |
17 | ||
85438a8d TZ |
18 | /** |
19 | * DOC: overview | |
20 | * | |
21 | * This library provides a GEM buffer object that is backed by video RAM | |
22 | * (VRAM). It can be used for framebuffer devices with dedicated memory. | |
6b5ce4a1 TZ |
23 | * |
24 | * The data structure &struct drm_vram_mm and its helpers implement a memory | |
25 | * manager for simple framebuffer devices with dedicated video memory. Buffer | |
26 | * objects are either placed in video RAM or evicted to system memory. The rsp. | |
27 | * buffer object is provided by &struct drm_gem_vram_object. | |
85438a8d TZ |
28 | */ |
29 | ||
30 | /* | |
31 | * Buffer-objects helpers | |
32 | */ | |
33 | ||
34 | static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) | |
35 | { | |
36 | /* We got here via ttm_bo_put(), which means that the | |
37 | * TTM buffer object in 'bo' has already been cleaned | |
38 | * up; only release the GEM object. | |
39 | */ | |
37a48adf TZ |
40 | |
41 | WARN_ON(gbo->kmap_use_count); | |
2236439b | 42 | WARN_ON(gbo->kmap.virtual); |
37a48adf | 43 | |
0e580c6d | 44 | drm_gem_object_release(&gbo->bo.base); |
85438a8d TZ |
45 | } |
46 | ||
47 | static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo) | |
48 | { | |
49 | drm_gem_vram_cleanup(gbo); | |
50 | kfree(gbo); | |
51 | } | |
52 | ||
53 | static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo) | |
54 | { | |
55 | struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo); | |
56 | ||
57 | drm_gem_vram_destroy(gbo); | |
58 | } | |
59 | ||
60 | static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, | |
61 | unsigned long pl_flag) | |
62 | { | |
63 | unsigned int i; | |
64 | unsigned int c = 0; | |
245f44e7 | 65 | u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN; |
85438a8d TZ |
66 | |
67 | gbo->placement.placement = gbo->placements; | |
68 | gbo->placement.busy_placement = gbo->placements; | |
69 | ||
70 | if (pl_flag & TTM_PL_FLAG_VRAM) | |
71 | gbo->placements[c++].flags = TTM_PL_FLAG_WC | | |
72 | TTM_PL_FLAG_UNCACHED | | |
245f44e7 TZ |
73 | TTM_PL_FLAG_VRAM | |
74 | invariant_flags; | |
85438a8d TZ |
75 | |
76 | if (pl_flag & TTM_PL_FLAG_SYSTEM) | |
77 | gbo->placements[c++].flags = TTM_PL_MASK_CACHING | | |
245f44e7 TZ |
78 | TTM_PL_FLAG_SYSTEM | |
79 | invariant_flags; | |
85438a8d TZ |
80 | |
81 | if (!c) | |
82 | gbo->placements[c++].flags = TTM_PL_MASK_CACHING | | |
245f44e7 TZ |
83 | TTM_PL_FLAG_SYSTEM | |
84 | invariant_flags; | |
85438a8d TZ |
85 | |
86 | gbo->placement.num_placement = c; | |
87 | gbo->placement.num_busy_placement = c; | |
88 | ||
89 | for (i = 0; i < c; ++i) { | |
90 | gbo->placements[i].fpfn = 0; | |
91 | gbo->placements[i].lpfn = 0; | |
92 | } | |
93 | } | |
94 | ||
95 | static int drm_gem_vram_init(struct drm_device *dev, | |
85438a8d | 96 | struct drm_gem_vram_object *gbo, |
ebe9428b | 97 | size_t size, unsigned long pg_align) |
85438a8d | 98 | { |
a4d46a8e TZ |
99 | struct drm_vram_mm *vmm = dev->vram_mm; |
100 | struct ttm_bo_device *bdev; | |
85438a8d TZ |
101 | int ret; |
102 | size_t acc_size; | |
103 | ||
a4d46a8e TZ |
104 | if (WARN_ONCE(!vmm, "VRAM MM not initialized")) |
105 | return -EINVAL; | |
106 | bdev = &vmm->bdev; | |
107 | ||
abddeb7b | 108 | gbo->bo.base.funcs = &drm_gem_vram_object_funcs; |
31070a87 | 109 | |
0e580c6d | 110 | ret = drm_gem_object_init(dev, &gbo->bo.base, size); |
85438a8d TZ |
111 | if (ret) |
112 | return ret; | |
113 | ||
114 | acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo)); | |
115 | ||
116 | gbo->bo.bdev = bdev; | |
117 | drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | |
118 | ||
119 | ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, | |
ebe9428b | 120 | &gbo->placement, pg_align, false, acc_size, |
85438a8d TZ |
121 | NULL, NULL, ttm_buffer_object_destroy); |
122 | if (ret) | |
123 | goto err_drm_gem_object_release; | |
124 | ||
125 | return 0; | |
126 | ||
127 | err_drm_gem_object_release: | |
0e580c6d | 128 | drm_gem_object_release(&gbo->bo.base); |
85438a8d TZ |
129 | return ret; |
130 | } | |
131 | ||
132 | /** | |
133 | * drm_gem_vram_create() - Creates a VRAM-backed GEM object | |
134 | * @dev: the DRM device | |
85438a8d TZ |
135 | * @size: the buffer size in bytes |
136 | * @pg_align: the buffer's alignment in multiples of the page size | |
85438a8d TZ |
137 | * |
138 | * Returns: | |
139 | * A new instance of &struct drm_gem_vram_object on success, or | |
140 | * an ERR_PTR()-encoded error code otherwise. | |
141 | */ | |
142 | struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, | |
85438a8d | 143 | size_t size, |
ebe9428b | 144 | unsigned long pg_align) |
85438a8d TZ |
145 | { |
146 | struct drm_gem_vram_object *gbo; | |
147 | int ret; | |
148 | ||
0fb5f69e TZ |
149 | if (dev->driver->gem_create_object) { |
150 | struct drm_gem_object *gem = | |
151 | dev->driver->gem_create_object(dev, size); | |
152 | if (!gem) | |
153 | return ERR_PTR(-ENOMEM); | |
154 | gbo = drm_gem_vram_of_gem(gem); | |
155 | } else { | |
156 | gbo = kzalloc(sizeof(*gbo), GFP_KERNEL); | |
157 | if (!gbo) | |
158 | return ERR_PTR(-ENOMEM); | |
159 | } | |
85438a8d | 160 | |
a4d46a8e | 161 | ret = drm_gem_vram_init(dev, gbo, size, pg_align); |
85438a8d TZ |
162 | if (ret < 0) |
163 | goto err_kfree; | |
164 | ||
165 | return gbo; | |
166 | ||
167 | err_kfree: | |
168 | kfree(gbo); | |
169 | return ERR_PTR(ret); | |
170 | } | |
171 | EXPORT_SYMBOL(drm_gem_vram_create); | |
172 | ||
173 | /** | |
174 | * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object | |
175 | * @gbo: the GEM VRAM object | |
176 | * | |
177 | * See ttm_bo_put() for more information. | |
178 | */ | |
179 | void drm_gem_vram_put(struct drm_gem_vram_object *gbo) | |
180 | { | |
181 | ttm_bo_put(&gbo->bo); | |
182 | } | |
183 | EXPORT_SYMBOL(drm_gem_vram_put); | |
184 | ||
85438a8d TZ |
185 | /** |
186 | * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset | |
187 | * @gbo: the GEM VRAM object | |
188 | * | |
189 | * See drm_vma_node_offset_addr() for more information. | |
190 | * | |
191 | * Returns: | |
192 | * The buffer object's offset for userspace mappings on success, or | |
193 | * 0 if no offset is allocated. | |
194 | */ | |
195 | u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo) | |
196 | { | |
b96f3e7c | 197 | return drm_vma_node_offset_addr(&gbo->bo.base.vma_node); |
85438a8d TZ |
198 | } |
199 | EXPORT_SYMBOL(drm_gem_vram_mmap_offset); | |
200 | ||
201 | /** | |
202 | * drm_gem_vram_offset() - \ | |
203 | Returns a GEM VRAM object's offset in video memory | |
204 | * @gbo: the GEM VRAM object | |
205 | * | |
206 | * This function returns the buffer object's offset in the device's video | |
207 | * memory. The buffer object has to be pinned to %TTM_PL_VRAM. | |
208 | * | |
209 | * Returns: | |
210 | * The buffer object's offset in video memory on success, or | |
211 | * a negative errno code otherwise. | |
212 | */ | |
213 | s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo) | |
214 | { | |
215 | if (WARN_ON_ONCE(!gbo->pin_count)) | |
216 | return (s64)-ENODEV; | |
217 | return gbo->bo.offset; | |
218 | } | |
219 | EXPORT_SYMBOL(drm_gem_vram_offset); | |
220 | ||
bc25bb91 TZ |
221 | static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo, |
222 | unsigned long pl_flag) | |
85438a8d TZ |
223 | { |
224 | int i, ret; | |
225 | struct ttm_operation_ctx ctx = { false, false }; | |
226 | ||
5b24f715 TZ |
227 | if (gbo->pin_count) |
228 | goto out; | |
85438a8d | 229 | |
a6c3464f TZ |
230 | if (pl_flag) |
231 | drm_gem_vram_placement(gbo, pl_flag); | |
232 | ||
85438a8d TZ |
233 | for (i = 0; i < gbo->placement.num_placement; ++i) |
234 | gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; | |
235 | ||
236 | ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); | |
237 | if (ret < 0) | |
bc25bb91 | 238 | return ret; |
85438a8d | 239 | |
5b24f715 TZ |
240 | out: |
241 | ++gbo->pin_count; | |
85438a8d TZ |
242 | |
243 | return 0; | |
244 | } | |
85438a8d TZ |
245 | |
246 | /** | |
bc25bb91 | 247 | * drm_gem_vram_pin() - Pins a GEM VRAM object in a region. |
85438a8d | 248 | * @gbo: the GEM VRAM object |
bc25bb91 TZ |
249 | * @pl_flag: a bitmask of possible memory regions |
250 | * | |
251 | * Pinning a buffer object ensures that it is not evicted from | |
252 | * a memory region. A pinned buffer object has to be unpinned before | |
253 | * it can be pinned to another region. If the pl_flag argument is 0, | |
254 | * the buffer is pinned at its current location (video RAM or system | |
255 | * memory). | |
85438a8d | 256 | * |
245f44e7 TZ |
257 | * Small buffer objects, such as cursor images, can lead to memory |
258 | * fragmentation if they are pinned in the middle of video RAM. This | |
259 | * is especially a problem on devices with only a small amount of | |
260 | * video RAM. Fragmentation can prevent the primary framebuffer from | |
261 | * fitting in, even though there's enough memory overall. The modifier | |
262 | * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned | |
263 | * at the high end of the memory region to avoid fragmentation. | |
264 | * | |
85438a8d TZ |
265 | * Returns: |
266 | * 0 on success, or | |
267 | * a negative error code otherwise. | |
268 | */ | |
bc25bb91 | 269 | int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) |
85438a8d | 270 | { |
bc25bb91 | 271 | int ret; |
85438a8d | 272 | |
5b24f715 | 273 | ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); |
bc25bb91 | 274 | if (ret) |
5b24f715 | 275 | return ret; |
bc25bb91 TZ |
276 | ret = drm_gem_vram_pin_locked(gbo, pl_flag); |
277 | ttm_bo_unreserve(&gbo->bo); | |
278 | ||
279 | return ret; | |
280 | } | |
281 | EXPORT_SYMBOL(drm_gem_vram_pin); | |
282 | ||
283 | static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo) | |
284 | { | |
285 | int i, ret; | |
286 | struct ttm_operation_ctx ctx = { false, false }; | |
5b24f715 | 287 | |
85438a8d | 288 | if (WARN_ON_ONCE(!gbo->pin_count)) |
bc25bb91 | 289 | return 0; |
85438a8d TZ |
290 | |
291 | --gbo->pin_count; | |
292 | if (gbo->pin_count) | |
bc25bb91 | 293 | return 0; |
85438a8d TZ |
294 | |
295 | for (i = 0; i < gbo->placement.num_placement ; ++i) | |
296 | gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; | |
297 | ||
298 | ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); | |
299 | if (ret < 0) | |
bc25bb91 | 300 | return ret; |
85438a8d TZ |
301 | |
302 | return 0; | |
bc25bb91 | 303 | } |
5b24f715 | 304 | |
bc25bb91 TZ |
305 | /** |
306 | * drm_gem_vram_unpin() - Unpins a GEM VRAM object | |
307 | * @gbo: the GEM VRAM object | |
308 | * | |
309 | * Returns: | |
310 | * 0 on success, or | |
311 | * a negative error code otherwise. | |
312 | */ | |
313 | int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) | |
314 | { | |
315 | int ret; | |
316 | ||
317 | ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); | |
318 | if (ret) | |
319 | return ret; | |
320 | ret = drm_gem_vram_unpin_locked(gbo); | |
5b24f715 | 321 | ttm_bo_unreserve(&gbo->bo); |
bc25bb91 | 322 | |
5b24f715 | 323 | return ret; |
85438a8d TZ |
324 | } |
325 | EXPORT_SYMBOL(drm_gem_vram_unpin); | |
326 | ||
37a48adf TZ |
327 | static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, |
328 | bool map, bool *is_iomem) | |
329 | { | |
330 | int ret; | |
331 | struct ttm_bo_kmap_obj *kmap = &gbo->kmap; | |
332 | ||
333 | if (gbo->kmap_use_count > 0) | |
334 | goto out; | |
335 | ||
336 | if (kmap->virtual || !map) | |
337 | goto out; | |
338 | ||
339 | ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap); | |
340 | if (ret) | |
341 | return ERR_PTR(ret); | |
342 | ||
343 | out: | |
344 | if (!kmap->virtual) { | |
345 | if (is_iomem) | |
346 | *is_iomem = false; | |
347 | return NULL; /* not mapped; don't increment ref */ | |
348 | } | |
349 | ++gbo->kmap_use_count; | |
350 | if (is_iomem) | |
351 | return ttm_kmap_obj_virtual(kmap, is_iomem); | |
352 | return kmap->virtual; | |
353 | } | |
354 | ||
85438a8d | 355 | /** |
92172173 | 356 | * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space |
85438a8d TZ |
357 | * @gbo: the GEM VRAM object |
358 | * @map: establish a mapping if necessary | |
359 | * @is_iomem: returns true if the mapped memory is I/O memory, or false \ | |
360 | otherwise; can be NULL | |
85438a8d TZ |
361 | * |
362 | * This function maps the buffer object into the kernel's address space | |
363 | * or returns the current mapping. If the parameter map is false, the | |
364 | * function only queries the current mapping, but does not establish a | |
365 | * new one. | |
366 | * | |
367 | * Returns: | |
368 | * The buffers virtual address if mapped, or | |
369 | * NULL if not mapped, or | |
370 | * an ERR_PTR()-encoded error code otherwise. | |
371 | */ | |
92172173 TZ |
372 | void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map, |
373 | bool *is_iomem) | |
85438a8d TZ |
374 | { |
375 | int ret; | |
37a48adf | 376 | void *virtual; |
85438a8d | 377 | |
37a48adf | 378 | ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); |
85438a8d TZ |
379 | if (ret) |
380 | return ERR_PTR(ret); | |
37a48adf TZ |
381 | virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem); |
382 | ttm_bo_unreserve(&gbo->bo); | |
85438a8d | 383 | |
37a48adf | 384 | return virtual; |
85438a8d | 385 | } |
85438a8d TZ |
386 | EXPORT_SYMBOL(drm_gem_vram_kmap); |
387 | ||
37a48adf | 388 | static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) |
85438a8d | 389 | { |
37a48adf TZ |
390 | if (WARN_ON_ONCE(!gbo->kmap_use_count)) |
391 | return; | |
392 | if (--gbo->kmap_use_count > 0) | |
393 | return; | |
394 | ||
2236439b TZ |
395 | /* |
396 | * Permanently mapping and unmapping buffers adds overhead from | |
397 | * updating the page tables and creates debugging output. Therefore, | |
398 | * we delay the actual unmap operation until the BO gets evicted | |
399 | * from memory. See drm_gem_vram_bo_driver_move_notify(). | |
400 | */ | |
85438a8d | 401 | } |
37a48adf TZ |
402 | |
403 | /** | |
404 | * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object | |
405 | * @gbo: the GEM VRAM object | |
406 | */ | |
407 | void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo) | |
408 | { | |
409 | int ret; | |
410 | ||
411 | ret = ttm_bo_reserve(&gbo->bo, false, false, NULL); | |
412 | if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) | |
413 | return; | |
414 | drm_gem_vram_kunmap_locked(gbo); | |
415 | ttm_bo_unreserve(&gbo->bo); | |
416 | } | |
85438a8d | 417 | EXPORT_SYMBOL(drm_gem_vram_kunmap); |
6c812bc5 | 418 | |
c8908bde TZ |
419 | /** |
420 | * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address | |
421 | * space | |
422 | * @gbo: The GEM VRAM object to map | |
423 | * | |
424 | * The vmap function pins a GEM VRAM object to its current location, either | |
425 | * system or video memory, and maps its buffer into kernel address space. | |
426 | * As pinned object cannot be relocated, you should avoid pinning objects | |
427 | * permanently. Call drm_gem_vram_vunmap() with the returned address to | |
428 | * unmap and unpin the GEM VRAM object. | |
429 | * | |
430 | * If you have special requirements for the pinning or mapping operations, | |
431 | * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly. | |
432 | * | |
433 | * Returns: | |
434 | * The buffer's virtual address on success, or | |
435 | * an ERR_PTR()-encoded error code otherwise. | |
436 | */ | |
437 | void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo) | |
438 | { | |
439 | int ret; | |
440 | void *base; | |
441 | ||
442 | ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); | |
443 | if (ret) | |
444 | return ERR_PTR(ret); | |
445 | ||
446 | ret = drm_gem_vram_pin_locked(gbo, 0); | |
447 | if (ret) | |
448 | goto err_ttm_bo_unreserve; | |
449 | base = drm_gem_vram_kmap_locked(gbo, true, NULL); | |
450 | if (IS_ERR(base)) { | |
451 | ret = PTR_ERR(base); | |
452 | goto err_drm_gem_vram_unpin_locked; | |
453 | } | |
454 | ||
455 | ttm_bo_unreserve(&gbo->bo); | |
456 | ||
457 | return base; | |
458 | ||
459 | err_drm_gem_vram_unpin_locked: | |
460 | drm_gem_vram_unpin_locked(gbo); | |
461 | err_ttm_bo_unreserve: | |
462 | ttm_bo_unreserve(&gbo->bo); | |
463 | return ERR_PTR(ret); | |
464 | } | |
465 | EXPORT_SYMBOL(drm_gem_vram_vmap); | |
466 | ||
467 | /** | |
468 | * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object | |
469 | * @gbo: The GEM VRAM object to unmap | |
470 | * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap() | |
471 | * | |
472 | * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See | |
473 | * the documentation for drm_gem_vram_vmap() for more information. | |
474 | */ | |
475 | void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr) | |
476 | { | |
477 | int ret; | |
478 | ||
479 | ret = ttm_bo_reserve(&gbo->bo, false, false, NULL); | |
480 | if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) | |
481 | return; | |
482 | ||
483 | drm_gem_vram_kunmap_locked(gbo); | |
484 | drm_gem_vram_unpin_locked(gbo); | |
485 | ||
486 | ttm_bo_unreserve(&gbo->bo); | |
487 | } | |
488 | EXPORT_SYMBOL(drm_gem_vram_vunmap); | |
489 | ||
fed1eec0 TZ |
490 | /** |
491 | * drm_gem_vram_fill_create_dumb() - \ | |
492 | Helper for implementing &struct drm_driver.dumb_create | |
493 | * @file: the DRM file | |
494 | * @dev: the DRM device | |
fed1eec0 | 495 | * @pg_align: the buffer's alignment in multiples of the page size |
98707327 | 496 | * @pitch_align: the scanline's alignment in powers of 2 |
fed1eec0 TZ |
497 | * @args: the arguments as provided to \ |
498 | &struct drm_driver.dumb_create | |
499 | * | |
500 | * This helper function fills &struct drm_mode_create_dumb, which is used | |
501 | * by &struct drm_driver.dumb_create. Implementations of this interface | |
502 | * should forwards their arguments to this helper, plus the driver-specific | |
503 | * parameters. | |
504 | * | |
505 | * Returns: | |
506 | * 0 on success, or | |
507 | * a negative error code otherwise. | |
508 | */ | |
509 | int drm_gem_vram_fill_create_dumb(struct drm_file *file, | |
510 | struct drm_device *dev, | |
fed1eec0 | 511 | unsigned long pg_align, |
98707327 | 512 | unsigned long pitch_align, |
fed1eec0 TZ |
513 | struct drm_mode_create_dumb *args) |
514 | { | |
515 | size_t pitch, size; | |
516 | struct drm_gem_vram_object *gbo; | |
517 | int ret; | |
518 | u32 handle; | |
519 | ||
98707327 TZ |
520 | pitch = args->width * DIV_ROUND_UP(args->bpp, 8); |
521 | if (pitch_align) { | |
522 | if (WARN_ON_ONCE(!is_power_of_2(pitch_align))) | |
523 | return -EINVAL; | |
524 | pitch = ALIGN(pitch, pitch_align); | |
525 | } | |
fed1eec0 TZ |
526 | size = pitch * args->height; |
527 | ||
528 | size = roundup(size, PAGE_SIZE); | |
529 | if (!size) | |
530 | return -EINVAL; | |
531 | ||
a4d46a8e | 532 | gbo = drm_gem_vram_create(dev, size, pg_align); |
fed1eec0 TZ |
533 | if (IS_ERR(gbo)) |
534 | return PTR_ERR(gbo); | |
535 | ||
0e580c6d | 536 | ret = drm_gem_handle_create(file, &gbo->bo.base, &handle); |
fed1eec0 TZ |
537 | if (ret) |
538 | goto err_drm_gem_object_put_unlocked; | |
539 | ||
0e580c6d | 540 | drm_gem_object_put_unlocked(&gbo->bo.base); |
fed1eec0 TZ |
541 | |
542 | args->pitch = pitch; | |
543 | args->size = size; | |
544 | args->handle = handle; | |
545 | ||
546 | return 0; | |
547 | ||
548 | err_drm_gem_object_put_unlocked: | |
0e580c6d | 549 | drm_gem_object_put_unlocked(&gbo->bo.base); |
fed1eec0 TZ |
550 | return ret; |
551 | } | |
552 | EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb); | |
553 | ||
6c812bc5 TZ |
554 | /* |
555 | * Helpers for struct ttm_bo_driver | |
556 | */ | |
557 | ||
558 | static bool drm_is_gem_vram(struct ttm_buffer_object *bo) | |
559 | { | |
560 | return (bo->destroy == ttm_buffer_object_destroy); | |
561 | } | |
562 | ||
b0e40e08 TZ |
563 | static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, |
564 | struct ttm_placement *pl) | |
6c812bc5 | 565 | { |
6c812bc5 TZ |
566 | drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM); |
567 | *pl = gbo->placement; | |
568 | } | |
6c812bc5 | 569 | |
b0e40e08 TZ |
570 | static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, |
571 | bool evict, | |
572 | struct ttm_mem_reg *new_mem) | |
2236439b | 573 | { |
b0e40e08 | 574 | struct ttm_bo_kmap_obj *kmap = &gbo->kmap; |
2236439b TZ |
575 | |
576 | if (WARN_ON_ONCE(gbo->kmap_use_count)) | |
577 | return; | |
578 | ||
579 | if (!kmap->virtual) | |
580 | return; | |
581 | ttm_bo_kunmap(kmap); | |
582 | kmap->virtual = NULL; | |
583 | } | |
5c9dcacf | 584 | |
737000fd | 585 | /* |
0ccf52ba | 586 | * Helpers for struct drm_gem_object_funcs |
737000fd TZ |
587 | */ |
588 | ||
589 | /** | |
0ccf52ba TZ |
590 | * drm_gem_vram_object_free() - \ |
591 | Implements &struct drm_gem_object_funcs.free | |
592 | * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem | |
737000fd | 593 | */ |
0ccf52ba | 594 | static void drm_gem_vram_object_free(struct drm_gem_object *gem) |
737000fd TZ |
595 | { |
596 | struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); | |
597 | ||
598 | drm_gem_vram_put(gbo); | |
599 | } | |
0ccf52ba TZ |
600 | |
601 | /* | |
602 | * Helpers for dump buffers | |
603 | */ | |
737000fd | 604 | |
59f5989a TZ |
605 | /** |
606 | * drm_gem_vram_driver_create_dumb() - \ | |
607 | Implements &struct drm_driver.dumb_create | |
608 | * @file: the DRM file | |
609 | * @dev: the DRM device | |
610 | * @args: the arguments as provided to \ | |
611 | &struct drm_driver.dumb_create | |
612 | * | |
613 | * This function requires the driver to use @drm_device.vram_mm for its | |
614 | * instance of VRAM MM. | |
615 | * | |
616 | * Returns: | |
617 | * 0 on success, or | |
618 | * a negative error code otherwise. | |
619 | */ | |
620 | int drm_gem_vram_driver_dumb_create(struct drm_file *file, | |
621 | struct drm_device *dev, | |
622 | struct drm_mode_create_dumb *args) | |
623 | { | |
624 | if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized")) | |
625 | return -EINVAL; | |
626 | ||
a4d46a8e | 627 | return drm_gem_vram_fill_create_dumb(file, dev, 0, 0, args); |
59f5989a TZ |
628 | } |
629 | EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create); | |
630 | ||
737000fd TZ |
631 | /** |
632 | * drm_gem_vram_driver_dumb_mmap_offset() - \ | |
633 | Implements &struct drm_driver.dumb_mmap_offset | |
634 | * @file: DRM file pointer. | |
635 | * @dev: DRM device. | |
636 | * @handle: GEM handle | |
637 | * @offset: Returns the mapping's memory offset on success | |
638 | * | |
639 | * Returns: | |
640 | * 0 on success, or | |
641 | * a negative errno code otherwise. | |
642 | */ | |
643 | int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, | |
644 | struct drm_device *dev, | |
645 | uint32_t handle, uint64_t *offset) | |
646 | { | |
647 | struct drm_gem_object *gem; | |
648 | struct drm_gem_vram_object *gbo; | |
649 | ||
650 | gem = drm_gem_object_lookup(file, handle); | |
651 | if (!gem) | |
652 | return -ENOENT; | |
653 | ||
654 | gbo = drm_gem_vram_of_gem(gem); | |
655 | *offset = drm_gem_vram_mmap_offset(gbo); | |
656 | ||
657 | drm_gem_object_put_unlocked(gem); | |
658 | ||
659 | return 0; | |
660 | } | |
661 | EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset); | |
1f460b49 | 662 | |
6542ad89 TZ |
663 | /* |
664 | * Helpers for struct drm_plane_helper_funcs | |
665 | */ | |
666 | ||
667 | /** | |
668 | * drm_gem_vram_plane_helper_prepare_fb() - \ | |
669 | * Implements &struct drm_plane_helper_funcs.prepare_fb | |
670 | * @plane: a DRM plane | |
671 | * @new_state: the plane's new state | |
672 | * | |
673 | * During plane updates, this function pins the GEM VRAM | |
674 | * objects of the plane's new framebuffer to VRAM. Call | |
675 | * drm_gem_vram_plane_helper_cleanup_fb() to unpin them. | |
676 | * | |
677 | * Returns: | |
678 | * 0 on success, or | |
679 | * a negative errno code otherwise. | |
680 | */ | |
681 | int | |
682 | drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane, | |
683 | struct drm_plane_state *new_state) | |
684 | { | |
685 | size_t i; | |
686 | struct drm_gem_vram_object *gbo; | |
687 | int ret; | |
688 | ||
689 | if (!new_state->fb) | |
690 | return 0; | |
691 | ||
692 | for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) { | |
693 | if (!new_state->fb->obj[i]) | |
694 | continue; | |
695 | gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); | |
696 | ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); | |
697 | if (ret) | |
698 | goto err_drm_gem_vram_unpin; | |
699 | } | |
700 | ||
701 | return 0; | |
702 | ||
703 | err_drm_gem_vram_unpin: | |
704 | while (i) { | |
705 | --i; | |
706 | gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); | |
707 | drm_gem_vram_unpin(gbo); | |
708 | } | |
709 | return ret; | |
710 | } | |
711 | EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb); | |
712 | ||
713 | /** | |
714 | * drm_gem_vram_plane_helper_cleanup_fb() - \ | |
715 | * Implements &struct drm_plane_helper_funcs.cleanup_fb | |
716 | * @plane: a DRM plane | |
717 | * @old_state: the plane's old state | |
718 | * | |
719 | * During plane updates, this function unpins the GEM VRAM | |
720 | * objects of the plane's old framebuffer from VRAM. Complements | |
721 | * drm_gem_vram_plane_helper_prepare_fb(). | |
722 | */ | |
723 | void | |
724 | drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, | |
725 | struct drm_plane_state *old_state) | |
726 | { | |
727 | size_t i; | |
728 | struct drm_gem_vram_object *gbo; | |
729 | ||
730 | if (!old_state->fb) | |
731 | return; | |
732 | ||
733 | for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) { | |
734 | if (!old_state->fb->obj[i]) | |
735 | continue; | |
736 | gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]); | |
737 | drm_gem_vram_unpin(gbo); | |
738 | } | |
739 | } | |
740 | EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb); | |
741 | ||
742 | /* | |
743 | * Helpers for struct drm_simple_display_pipe_funcs | |
744 | */ | |
745 | ||
746 | /** | |
747 | * drm_gem_vram_simple_display_pipe_prepare_fb() - \ | |
748 | * Implements &struct drm_simple_display_pipe_funcs.prepare_fb | |
749 | * @pipe: a simple display pipe | |
750 | * @new_state: the plane's new state | |
751 | * | |
752 | * During plane updates, this function pins the GEM VRAM | |
753 | * objects of the plane's new framebuffer to VRAM. Call | |
754 | * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them. | |
755 | * | |
756 | * Returns: | |
757 | * 0 on success, or | |
758 | * a negative errno code otherwise. | |
759 | */ | |
760 | int drm_gem_vram_simple_display_pipe_prepare_fb( | |
761 | struct drm_simple_display_pipe *pipe, | |
762 | struct drm_plane_state *new_state) | |
763 | { | |
764 | return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state); | |
765 | } | |
766 | EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb); | |
767 | ||
768 | /** | |
769 | * drm_gem_vram_simple_display_pipe_cleanup_fb() - \ | |
770 | * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb | |
771 | * @pipe: a simple display pipe | |
772 | * @old_state: the plane's old state | |
773 | * | |
774 | * During plane updates, this function unpins the GEM VRAM | |
775 | * objects of the plane's old framebuffer from VRAM. Complements | |
776 | * drm_gem_vram_simple_display_pipe_prepare_fb(). | |
777 | */ | |
778 | void drm_gem_vram_simple_display_pipe_cleanup_fb( | |
779 | struct drm_simple_display_pipe *pipe, | |
780 | struct drm_plane_state *old_state) | |
781 | { | |
782 | drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state); | |
783 | } | |
784 | EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb); | |
785 | ||
1f460b49 | 786 | /* |
0ccf52ba | 787 | * PRIME helpers |
1f460b49 TZ |
788 | */ |
789 | ||
790 | /** | |
0ccf52ba TZ |
791 | * drm_gem_vram_object_pin() - \ |
792 | Implements &struct drm_gem_object_funcs.pin | |
1f460b49 TZ |
793 | * @gem: The GEM object to pin |
794 | * | |
795 | * Returns: | |
796 | * 0 on success, or | |
797 | * a negative errno code otherwise. | |
798 | */ | |
0ccf52ba | 799 | static int drm_gem_vram_object_pin(struct drm_gem_object *gem) |
1f460b49 TZ |
800 | { |
801 | struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); | |
802 | ||
a6c3464f TZ |
803 | /* Fbdev console emulation is the use case of these PRIME |
804 | * helpers. This may involve updating a hardware buffer from | |
805 | * a shadow FB. We pin the buffer to it's current location | |
806 | * (either video RAM or system memory) to prevent it from | |
807 | * being relocated during the update operation. If you require | |
808 | * the buffer to be pinned to VRAM, implement a callback that | |
809 | * sets the flags accordingly. | |
810 | */ | |
811 | return drm_gem_vram_pin(gbo, 0); | |
1f460b49 | 812 | } |
1f460b49 TZ |
813 | |
814 | /** | |
0ccf52ba TZ |
815 | * drm_gem_vram_object_unpin() - \ |
816 | Implements &struct drm_gem_object_funcs.unpin | |
1f460b49 TZ |
817 | * @gem: The GEM object to unpin |
818 | */ | |
0ccf52ba | 819 | static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) |
1f460b49 TZ |
820 | { |
821 | struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); | |
822 | ||
823 | drm_gem_vram_unpin(gbo); | |
824 | } | |
1f460b49 TZ |
825 | |
826 | /** | |
0ccf52ba TZ |
827 | * drm_gem_vram_object_vmap() - \ |
828 | Implements &struct drm_gem_object_funcs.vmap | |
1f460b49 TZ |
829 | * @gem: The GEM object to map |
830 | * | |
831 | * Returns: | |
832 | * The buffers virtual address on success, or | |
833 | * NULL otherwise. | |
834 | */ | |
0ccf52ba | 835 | static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem) |
1f460b49 TZ |
836 | { |
837 | struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); | |
1f460b49 TZ |
838 | void *base; |
839 | ||
c8908bde TZ |
840 | base = drm_gem_vram_vmap(gbo); |
841 | if (IS_ERR(base)) | |
842 | return NULL; | |
1f460b49 TZ |
843 | return base; |
844 | } | |
1f460b49 TZ |
845 | |
846 | /** | |
0ccf52ba TZ |
847 | * drm_gem_vram_object_vunmap() - \ |
848 | Implements &struct drm_gem_object_funcs.vunmap | |
1f460b49 TZ |
849 | * @gem: The GEM object to unmap |
850 | * @vaddr: The mapping's base address | |
851 | */ | |
0ccf52ba TZ |
852 | static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, |
853 | void *vaddr) | |
1f460b49 TZ |
854 | { |
855 | struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); | |
bc25bb91 | 856 | |
c8908bde | 857 | drm_gem_vram_vunmap(gbo, vaddr); |
1f460b49 | 858 | } |
31070a87 TZ |
859 | |
860 | /* | |
861 | * GEM object funcs | |
862 | */ | |
863 | ||
864 | static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { | |
0ccf52ba TZ |
865 | .free = drm_gem_vram_object_free, |
866 | .pin = drm_gem_vram_object_pin, | |
867 | .unpin = drm_gem_vram_object_unpin, | |
868 | .vmap = drm_gem_vram_object_vmap, | |
527f6d91 | 869 | .vunmap = drm_gem_vram_object_vunmap, |
5a8b7cf9 | 870 | .mmap = drm_gem_ttm_mmap, |
527f6d91 | 871 | .print_info = drm_gem_ttm_print_info, |
31070a87 | 872 | }; |
6b5ce4a1 TZ |
873 | |
874 | /* | |
875 | * VRAM memory manager | |
876 | */ | |
877 | ||
878 | /* | |
879 | * TTM TT | |
880 | */ | |
881 | ||
882 | static void backend_func_destroy(struct ttm_tt *tt) | |
883 | { | |
884 | ttm_tt_fini(tt); | |
885 | kfree(tt); | |
886 | } | |
887 | ||
888 | static struct ttm_backend_func backend_func = { | |
889 | .destroy = backend_func_destroy | |
890 | }; | |
891 | ||
892 | /* | |
893 | * TTM BO device | |
894 | */ | |
895 | ||
896 | static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, | |
897 | uint32_t page_flags) | |
898 | { | |
899 | struct ttm_tt *tt; | |
900 | int ret; | |
901 | ||
902 | tt = kzalloc(sizeof(*tt), GFP_KERNEL); | |
903 | if (!tt) | |
904 | return NULL; | |
905 | ||
906 | tt->func = &backend_func; | |
907 | ||
908 | ret = ttm_tt_init(tt, bo, page_flags); | |
909 | if (ret < 0) | |
910 | goto err_ttm_tt_init; | |
911 | ||
912 | return tt; | |
913 | ||
914 | err_ttm_tt_init: | |
915 | kfree(tt); | |
916 | return NULL; | |
917 | } | |
918 | ||
919 | static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
920 | struct ttm_mem_type_manager *man) | |
921 | { | |
922 | switch (type) { | |
923 | case TTM_PL_SYSTEM: | |
924 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
925 | man->available_caching = TTM_PL_MASK_CACHING; | |
926 | man->default_caching = TTM_PL_FLAG_CACHED; | |
927 | break; | |
928 | case TTM_PL_VRAM: | |
929 | man->func = &ttm_bo_manager_func; | |
930 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | |
931 | TTM_MEMTYPE_FLAG_MAPPABLE; | |
932 | man->available_caching = TTM_PL_FLAG_UNCACHED | | |
933 | TTM_PL_FLAG_WC; | |
934 | man->default_caching = TTM_PL_FLAG_WC; | |
935 | break; | |
936 | default: | |
937 | return -EINVAL; | |
938 | } | |
939 | return 0; | |
940 | } | |
941 | ||
942 | static void bo_driver_evict_flags(struct ttm_buffer_object *bo, | |
943 | struct ttm_placement *placement) | |
944 | { | |
b0e40e08 | 945 | struct drm_gem_vram_object *gbo; |
6b5ce4a1 | 946 | |
b0e40e08 TZ |
947 | /* TTM may pass BOs that are not GEM VRAM BOs. */ |
948 | if (!drm_is_gem_vram(bo)) | |
949 | return; | |
950 | ||
951 | gbo = drm_gem_vram_of_bo(bo); | |
952 | ||
953 | drm_gem_vram_bo_driver_evict_flags(gbo, placement); | |
6b5ce4a1 TZ |
954 | } |
955 | ||
6b5ce4a1 TZ |
956 | static void bo_driver_move_notify(struct ttm_buffer_object *bo, |
957 | bool evict, | |
958 | struct ttm_mem_reg *new_mem) | |
959 | { | |
b0e40e08 | 960 | struct drm_gem_vram_object *gbo; |
6b5ce4a1 | 961 | |
b0e40e08 TZ |
962 | /* TTM may pass BOs that are not GEM VRAM BOs. */ |
963 | if (!drm_is_gem_vram(bo)) | |
6b5ce4a1 | 964 | return; |
b0e40e08 TZ |
965 | |
966 | gbo = drm_gem_vram_of_bo(bo); | |
967 | ||
968 | drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem); | |
6b5ce4a1 TZ |
969 | } |
970 | ||
971 | static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, | |
972 | struct ttm_mem_reg *mem) | |
973 | { | |
974 | struct ttm_mem_type_manager *man = bdev->man + mem->mem_type; | |
975 | struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev); | |
976 | ||
977 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
978 | return -EINVAL; | |
979 | ||
980 | mem->bus.addr = NULL; | |
981 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
982 | ||
983 | switch (mem->mem_type) { | |
984 | case TTM_PL_SYSTEM: /* nothing to do */ | |
985 | mem->bus.offset = 0; | |
986 | mem->bus.base = 0; | |
987 | mem->bus.is_iomem = false; | |
988 | break; | |
989 | case TTM_PL_VRAM: | |
990 | mem->bus.offset = mem->start << PAGE_SHIFT; | |
991 | mem->bus.base = vmm->vram_base; | |
992 | mem->bus.is_iomem = true; | |
993 | break; | |
994 | default: | |
995 | return -EINVAL; | |
996 | } | |
997 | ||
998 | return 0; | |
999 | } | |
1000 | ||
1001 | static void bo_driver_io_mem_free(struct ttm_bo_device *bdev, | |
1002 | struct ttm_mem_reg *mem) | |
1003 | { } | |
1004 | ||
1005 | static struct ttm_bo_driver bo_driver = { | |
1006 | .ttm_tt_create = bo_driver_ttm_tt_create, | |
1007 | .ttm_tt_populate = ttm_pool_populate, | |
1008 | .ttm_tt_unpopulate = ttm_pool_unpopulate, | |
1009 | .init_mem_type = bo_driver_init_mem_type, | |
1010 | .eviction_valuable = ttm_bo_eviction_valuable, | |
1011 | .evict_flags = bo_driver_evict_flags, | |
6b5ce4a1 TZ |
1012 | .move_notify = bo_driver_move_notify, |
1013 | .io_mem_reserve = bo_driver_io_mem_reserve, | |
1014 | .io_mem_free = bo_driver_io_mem_free, | |
1015 | }; | |
1016 | ||
1017 | /* | |
1018 | * struct drm_vram_mm | |
1019 | */ | |
1020 | ||
1021 | #if defined(CONFIG_DEBUG_FS) | |
1022 | static int drm_vram_mm_debugfs(struct seq_file *m, void *data) | |
1023 | { | |
1024 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
1025 | struct drm_vram_mm *vmm = node->minor->dev->vram_mm; | |
1026 | struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv; | |
6b5ce4a1 TZ |
1027 | struct drm_printer p = drm_seq_file_printer(m); |
1028 | ||
97588b5b | 1029 | spin_lock(&ttm_bo_glob.lru_lock); |
6b5ce4a1 | 1030 | drm_mm_print(mm, &p); |
97588b5b | 1031 | spin_unlock(&ttm_bo_glob.lru_lock); |
6b5ce4a1 TZ |
1032 | return 0; |
1033 | } | |
1034 | ||
1035 | static const struct drm_info_list drm_vram_mm_debugfs_list[] = { | |
1036 | { "vram-mm", drm_vram_mm_debugfs, 0, NULL }, | |
1037 | }; | |
1038 | #endif | |
1039 | ||
1040 | /** | |
1041 | * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file. | |
1042 | * | |
1043 | * @minor: drm minor device. | |
1044 | * | |
1045 | * Returns: | |
1046 | * 0 on success, or | |
1047 | * a negative error code otherwise. | |
1048 | */ | |
1049 | int drm_vram_mm_debugfs_init(struct drm_minor *minor) | |
1050 | { | |
1051 | int ret = 0; | |
1052 | ||
1053 | #if defined(CONFIG_DEBUG_FS) | |
1054 | ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list, | |
1055 | ARRAY_SIZE(drm_vram_mm_debugfs_list), | |
1056 | minor->debugfs_root, minor); | |
1057 | #endif | |
1058 | return ret; | |
1059 | } | |
1060 | EXPORT_SYMBOL(drm_vram_mm_debugfs_init); | |
1061 | ||
c30b225d TZ |
1062 | static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, |
1063 | uint64_t vram_base, size_t vram_size) | |
6b5ce4a1 TZ |
1064 | { |
1065 | int ret; | |
1066 | ||
1067 | vmm->vram_base = vram_base; | |
1068 | vmm->vram_size = vram_size; | |
6b5ce4a1 TZ |
1069 | |
1070 | ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, | |
1071 | dev->anon_inode->i_mapping, | |
1072 | dev->vma_offset_manager, | |
1073 | true); | |
1074 | if (ret) | |
1075 | return ret; | |
1076 | ||
1077 | ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT); | |
1078 | if (ret) | |
1079 | return ret; | |
1080 | ||
1081 | return 0; | |
1082 | } | |
6b5ce4a1 | 1083 | |
c30b225d | 1084 | static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) |
6b5ce4a1 TZ |
1085 | { |
1086 | ttm_bo_device_release(&vmm->bdev); | |
1087 | } | |
6b5ce4a1 | 1088 | |
6b5ce4a1 TZ |
1089 | /* |
1090 | * Helpers for integration with struct drm_device | |
1091 | */ | |
1092 | ||
1093 | /** | |
1094 | * drm_vram_helper_alloc_mm - Allocates a device's instance of \ | |
1095 | &struct drm_vram_mm | |
1096 | * @dev: the DRM device | |
1097 | * @vram_base: the base address of the video memory | |
1098 | * @vram_size: the size of the video memory in bytes | |
6b5ce4a1 TZ |
1099 | * |
1100 | * Returns: | |
1101 | * The new instance of &struct drm_vram_mm on success, or | |
1102 | * an ERR_PTR()-encoded errno code otherwise. | |
1103 | */ | |
1104 | struct drm_vram_mm *drm_vram_helper_alloc_mm( | |
b0e40e08 | 1105 | struct drm_device *dev, uint64_t vram_base, size_t vram_size) |
6b5ce4a1 TZ |
1106 | { |
1107 | int ret; | |
1108 | ||
1109 | if (WARN_ON(dev->vram_mm)) | |
1110 | return dev->vram_mm; | |
1111 | ||
1112 | dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL); | |
1113 | if (!dev->vram_mm) | |
1114 | return ERR_PTR(-ENOMEM); | |
1115 | ||
b0e40e08 | 1116 | ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size); |
6b5ce4a1 TZ |
1117 | if (ret) |
1118 | goto err_kfree; | |
1119 | ||
1120 | return dev->vram_mm; | |
1121 | ||
1122 | err_kfree: | |
1123 | kfree(dev->vram_mm); | |
1124 | dev->vram_mm = NULL; | |
1125 | return ERR_PTR(ret); | |
1126 | } | |
1127 | EXPORT_SYMBOL(drm_vram_helper_alloc_mm); | |
1128 | ||
1129 | /** | |
1130 | * drm_vram_helper_release_mm - Releases a device's instance of \ | |
1131 | &struct drm_vram_mm | |
1132 | * @dev: the DRM device | |
1133 | */ | |
1134 | void drm_vram_helper_release_mm(struct drm_device *dev) | |
1135 | { | |
1136 | if (!dev->vram_mm) | |
1137 | return; | |
1138 | ||
1139 | drm_vram_mm_cleanup(dev->vram_mm); | |
1140 | kfree(dev->vram_mm); | |
1141 | dev->vram_mm = NULL; | |
1142 | } | |
1143 | EXPORT_SYMBOL(drm_vram_helper_release_mm); |