Commit | Line | Data |
---|---|---|
8038d2a9 | 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
d991ef03 JB |
2 | /************************************************************************** |
3 | * | |
e9431ea5 | 4 | * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA |
d991ef03 JB |
5 | * All Rights Reserved. |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | **************************************************************************/ | |
28 | ||
760285e7 | 29 | #include <drm/ttm/ttm_placement.h> |
d991ef03 | 30 | |
d991ef03 | 31 | #include "vmwgfx_drv.h" |
0b8762e9 | 32 | #include "ttm_object.h" |
e9431ea5 TH |
33 | |
34 | ||
e9431ea5 TH |
35 | /** |
36 | * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct | |
37 | * vmw_buffer_object. | |
38 | * | |
39 | * @bo: Pointer to the TTM buffer object. | |
40 | * Return: Pointer to the struct vmw_buffer_object embedding the | |
41 | * TTM buffer object. | |
42 | */ | |
43 | static struct vmw_buffer_object * | |
44 | vmw_buffer_object(struct ttm_buffer_object *bo) | |
45 | { | |
46 | return container_of(bo, struct vmw_buffer_object, base); | |
47 | } | |
48 | ||
298799a2 ZR |
49 | /** |
50 | * bo_is_vmw - check if the buffer object is a &vmw_buffer_object | |
51 | * @bo: ttm buffer object to be checked | |
52 | * | |
53 | * Uses destroy function associated with the object to determine if this is | |
54 | * a &vmw_buffer_object. | |
55 | * | |
56 | * Returns: | |
57 | * true if the object is of &vmw_buffer_object type, false if not. | |
58 | */ | |
59 | static bool bo_is_vmw(struct ttm_buffer_object *bo) | |
60 | { | |
61 | return bo->destroy == &vmw_bo_bo_free || | |
62 | bo->destroy == &vmw_gem_destroy; | |
63 | } | |
e9431ea5 | 64 | |
d991ef03 | 65 | /** |
f1d34bfd | 66 | * vmw_bo_pin_in_placement - Validate a buffer to placement. |
d991ef03 | 67 | * |
b37a6b9a TH |
68 | * @dev_priv: Driver private. |
69 | * @buf: DMA buffer to move. | |
459d0fa7 | 70 | * @placement: The placement to pin it. |
b37a6b9a | 71 | * @interruptible: Use interruptible wait. |
e9431ea5 TH |
72 | * Return: Zero on success, Negative error code on failure. In particular |
73 | * -ERESTARTSYS if interrupted by a signal | |
d991ef03 | 74 | */ |
f1d34bfd TH |
75 | int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, |
76 | struct vmw_buffer_object *buf, | |
77 | struct ttm_placement *placement, | |
78 | bool interruptible) | |
d991ef03 | 79 | { |
19be5570 | 80 | struct ttm_operation_ctx ctx = {interruptible, false }; |
d991ef03 JB |
81 | struct ttm_buffer_object *bo = &buf->base; |
82 | int ret; | |
83 | ||
c0951b79 | 84 | vmw_execbuf_release_pinned_bo(dev_priv); |
e2fa3a76 | 85 | |
dfd5e50e | 86 | ret = ttm_bo_reserve(bo, interruptible, false, NULL); |
d991ef03 JB |
87 | if (unlikely(ret != 0)) |
88 | goto err; | |
89 | ||
fbe86ca5 | 90 | if (buf->base.pin_count > 0) |
98cca519 CK |
91 | ret = ttm_resource_compat(bo->resource, placement) |
92 | ? 0 : -EINVAL; | |
4ed7e224 | 93 | else |
19be5570 | 94 | ret = ttm_bo_validate(bo, placement, &ctx); |
4ed7e224 | 95 | |
459d0fa7 TH |
96 | if (!ret) |
97 | vmw_bo_pin_reserved(buf, true); | |
d991ef03 JB |
98 | |
99 | ttm_bo_unreserve(bo); | |
d991ef03 | 100 | err: |
d991ef03 JB |
101 | return ret; |
102 | } | |
103 | ||
e9431ea5 | 104 | |
d991ef03 | 105 | /** |
f1d34bfd | 106 | * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr. |
d991ef03 | 107 | * |
459d0fa7 TH |
108 | * This function takes the reservation_sem in write mode. |
109 | * Flushes and unpins the query bo to avoid failures. | |
d991ef03 JB |
110 | * |
111 | * @dev_priv: Driver private. | |
112 | * @buf: DMA buffer to move. | |
d991ef03 | 113 | * @interruptible: Use interruptible wait. |
e9431ea5 TH |
114 | * Return: Zero on success, Negative error code on failure. In particular |
115 | * -ERESTARTSYS if interrupted by a signal | |
d991ef03 | 116 | */ |
f1d34bfd TH |
117 | int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, |
118 | struct vmw_buffer_object *buf, | |
119 | bool interruptible) | |
d991ef03 | 120 | { |
19be5570 | 121 | struct ttm_operation_ctx ctx = {interruptible, false }; |
d991ef03 | 122 | struct ttm_buffer_object *bo = &buf->base; |
d991ef03 JB |
123 | int ret; |
124 | ||
459d0fa7 | 125 | vmw_execbuf_release_pinned_bo(dev_priv); |
e2fa3a76 | 126 | |
dfd5e50e | 127 | ret = ttm_bo_reserve(bo, interruptible, false, NULL); |
d991ef03 JB |
128 | if (unlikely(ret != 0)) |
129 | goto err; | |
130 | ||
fbe86ca5 | 131 | if (buf->base.pin_count > 0) { |
98cca519 CK |
132 | ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement) |
133 | ? 0 : -EINVAL; | |
4ed7e224 SY |
134 | goto out_unreserve; |
135 | } | |
136 | ||
19be5570 | 137 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); |
d991ef03 | 138 | if (likely(ret == 0) || ret == -ERESTARTSYS) |
459d0fa7 | 139 | goto out_unreserve; |
d991ef03 | 140 | |
19be5570 | 141 | ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); |
d991ef03 | 142 | |
459d0fa7 TH |
143 | out_unreserve: |
144 | if (!ret) | |
145 | vmw_bo_pin_reserved(buf, true); | |
d991ef03 | 146 | |
d991ef03 JB |
147 | ttm_bo_unreserve(bo); |
148 | err: | |
d991ef03 JB |
149 | return ret; |
150 | } | |
151 | ||
e9431ea5 | 152 | |
d991ef03 | 153 | /** |
f1d34bfd | 154 | * vmw_bo_pin_in_vram - Move a buffer to vram. |
d991ef03 | 155 | * |
459d0fa7 TH |
156 | * This function takes the reservation_sem in write mode. |
157 | * Flushes and unpins the query bo to avoid failures. | |
d991ef03 JB |
158 | * |
159 | * @dev_priv: Driver private. | |
160 | * @buf: DMA buffer to move. | |
d991ef03 | 161 | * @interruptible: Use interruptible wait. |
e9431ea5 TH |
162 | * Return: Zero on success, Negative error code on failure. In particular |
163 | * -ERESTARTSYS if interrupted by a signal | |
d991ef03 | 164 | */ |
f1d34bfd TH |
165 | int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, |
166 | struct vmw_buffer_object *buf, | |
167 | bool interruptible) | |
d991ef03 | 168 | { |
f1d34bfd TH |
169 | return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement, |
170 | interruptible); | |
d991ef03 JB |
171 | } |
172 | ||
e9431ea5 | 173 | |
d991ef03 | 174 | /** |
f1d34bfd | 175 | * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram. |
d991ef03 | 176 | * |
459d0fa7 TH |
177 | * This function takes the reservation_sem in write mode. |
178 | * Flushes and unpins the query bo to avoid failures. | |
d991ef03 JB |
179 | * |
180 | * @dev_priv: Driver private. | |
459d0fa7 | 181 | * @buf: DMA buffer to pin. |
d991ef03 | 182 | * @interruptible: Use interruptible wait. |
e9431ea5 TH |
183 | * Return: Zero on success, Negative error code on failure. In particular |
184 | * -ERESTARTSYS if interrupted by a signal | |
d991ef03 | 185 | */ |
f1d34bfd TH |
186 | int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, |
187 | struct vmw_buffer_object *buf, | |
188 | bool interruptible) | |
d991ef03 | 189 | { |
19be5570 | 190 | struct ttm_operation_ctx ctx = {interruptible, false }; |
d991ef03 JB |
191 | struct ttm_buffer_object *bo = &buf->base; |
192 | struct ttm_placement placement; | |
f1217ed0 | 193 | struct ttm_place place; |
d991ef03 JB |
194 | int ret = 0; |
195 | ||
459d0fa7 | 196 | place = vmw_vram_placement.placement[0]; |
e3c92eb4 | 197 | place.lpfn = PFN_UP(bo->resource->size); |
f1217ed0 CK |
198 | placement.num_placement = 1; |
199 | placement.placement = &place; | |
200 | placement.num_busy_placement = 1; | |
201 | placement.busy_placement = &place; | |
d991ef03 | 202 | |
459d0fa7 | 203 | vmw_execbuf_release_pinned_bo(dev_priv); |
dfd5e50e | 204 | ret = ttm_bo_reserve(bo, interruptible, false, NULL); |
d991ef03 JB |
205 | if (unlikely(ret != 0)) |
206 | goto err_unlock; | |
207 | ||
459d0fa7 TH |
208 | /* |
209 | * Is this buffer already in vram but not at the start of it? | |
210 | * In that case, evict it first because TTM isn't good at handling | |
211 | * that situation. | |
212 | */ | |
d3116756 | 213 | if (bo->resource->mem_type == TTM_PL_VRAM && |
e3c92eb4 | 214 | bo->resource->start < PFN_UP(bo->resource->size) && |
d3116756 | 215 | bo->resource->start > 0 && |
fbe86ca5 | 216 | buf->base.pin_count == 0) { |
19be5570 CK |
217 | ctx.interruptible = false; |
218 | (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx); | |
219 | } | |
d991ef03 | 220 | |
fbe86ca5 | 221 | if (buf->base.pin_count > 0) |
98cca519 CK |
222 | ret = ttm_resource_compat(bo->resource, &placement) |
223 | ? 0 : -EINVAL; | |
4ed7e224 | 224 | else |
19be5570 | 225 | ret = ttm_bo_validate(bo, &placement, &ctx); |
d991ef03 | 226 | |
459d0fa7 | 227 | /* For some reason we didn't end up at the start of vram */ |
d3116756 | 228 | WARN_ON(ret == 0 && bo->resource->start != 0); |
459d0fa7 TH |
229 | if (!ret) |
230 | vmw_bo_pin_reserved(buf, true); | |
d991ef03 JB |
231 | |
232 | ttm_bo_unreserve(bo); | |
233 | err_unlock: | |
d991ef03 JB |
234 | |
235 | return ret; | |
236 | } | |
237 | ||
e9431ea5 | 238 | |
d991ef03 | 239 | /** |
f1d34bfd | 240 | * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer. |
d991ef03 | 241 | * |
459d0fa7 | 242 | * This function takes the reservation_sem in write mode. |
d991ef03 JB |
243 | * |
244 | * @dev_priv: Driver private. | |
245 | * @buf: DMA buffer to unpin. | |
246 | * @interruptible: Use interruptible wait. | |
e9431ea5 TH |
247 | * Return: Zero on success, Negative error code on failure. In particular |
248 | * -ERESTARTSYS if interrupted by a signal | |
d991ef03 | 249 | */ |
f1d34bfd TH |
250 | int vmw_bo_unpin(struct vmw_private *dev_priv, |
251 | struct vmw_buffer_object *buf, | |
252 | bool interruptible) | |
d991ef03 | 253 | { |
459d0fa7 TH |
254 | struct ttm_buffer_object *bo = &buf->base; |
255 | int ret; | |
d991ef03 | 256 | |
dfd5e50e | 257 | ret = ttm_bo_reserve(bo, interruptible, false, NULL); |
459d0fa7 TH |
258 | if (unlikely(ret != 0)) |
259 | goto err; | |
260 | ||
261 | vmw_bo_pin_reserved(buf, false); | |
262 | ||
263 | ttm_bo_unreserve(bo); | |
264 | ||
265 | err: | |
459d0fa7 TH |
266 | return ret; |
267 | } | |
b37a6b9a | 268 | |
d991ef03 | 269 | /** |
b37a6b9a TH |
270 | * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement |
271 | * of a buffer. | |
d991ef03 | 272 | * |
b37a6b9a TH |
273 | * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved. |
274 | * @ptr: SVGAGuestPtr returning the result. | |
d991ef03 | 275 | */ |
b37a6b9a TH |
276 | void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, |
277 | SVGAGuestPtr *ptr) | |
d991ef03 | 278 | { |
d3116756 | 279 | if (bo->resource->mem_type == TTM_PL_VRAM) { |
d991ef03 | 280 | ptr->gmrId = SVGA_GMR_FRAMEBUFFER; |
d3116756 | 281 | ptr->offset = bo->resource->start << PAGE_SHIFT; |
d991ef03 | 282 | } else { |
d3116756 | 283 | ptr->gmrId = bo->resource->start; |
d991ef03 JB |
284 | ptr->offset = 0; |
285 | } | |
286 | } | |
e2fa3a76 TH |
287 | |
288 | ||
289 | /** | |
459d0fa7 | 290 | * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it. |
e2fa3a76 | 291 | * |
459d0fa7 | 292 | * @vbo: The buffer object. Must be reserved. |
e2fa3a76 TH |
293 | * @pin: Whether to pin or unpin. |
294 | * | |
295 | */ | |
f1d34bfd | 296 | void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) |
e2fa3a76 | 297 | { |
19be5570 | 298 | struct ttm_operation_ctx ctx = { false, true }; |
f1217ed0 | 299 | struct ttm_place pl; |
e2fa3a76 | 300 | struct ttm_placement placement; |
459d0fa7 | 301 | struct ttm_buffer_object *bo = &vbo->base; |
d3116756 | 302 | uint32_t old_mem_type = bo->resource->mem_type; |
e2fa3a76 TH |
303 | int ret; |
304 | ||
52791eee | 305 | dma_resv_assert_held(bo->base.resv); |
e2fa3a76 | 306 | |
fbe86ca5 CK |
307 | if (pin == !!bo->pin_count) |
308 | return; | |
459d0fa7 | 309 | |
f1217ed0 CK |
310 | pl.fpfn = 0; |
311 | pl.lpfn = 0; | |
d3116756 CK |
312 | pl.mem_type = bo->resource->mem_type; |
313 | pl.flags = bo->resource->placement; | |
e2fa3a76 TH |
314 | |
315 | memset(&placement, 0, sizeof(placement)); | |
316 | placement.num_placement = 1; | |
f1217ed0 | 317 | placement.placement = &pl; |
e2fa3a76 | 318 | |
19be5570 | 319 | ret = ttm_bo_validate(bo, &placement, &ctx); |
e2fa3a76 | 320 | |
d3116756 | 321 | BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type); |
bf833fd3 | 322 | |
fbe86ca5 CK |
323 | if (pin) |
324 | ttm_bo_pin(bo); | |
325 | else | |
326 | ttm_bo_unpin(bo); | |
327 | } | |
bf833fd3 | 328 | |
e9431ea5 TH |
329 | /** |
330 | * vmw_bo_map_and_cache - Map a buffer object and cache the map | |
bf833fd3 TH |
331 | * |
332 | * @vbo: The buffer object to map | |
333 | * Return: A kernel virtual address or NULL if mapping failed. | |
334 | * | |
335 | * This function maps a buffer object into the kernel address space, or | |
336 | * returns the virtual kernel address of an already existing map. The virtual | |
337 | * address remains valid as long as the buffer object is pinned or reserved. | |
338 | * The cached map is torn down on either | |
339 | * 1) Buffer object move | |
340 | * 2) Buffer object swapout | |
341 | * 3) Buffer object destruction | |
342 | * | |
343 | */ | |
e9431ea5 | 344 | void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo) |
bf833fd3 TH |
345 | { |
346 | struct ttm_buffer_object *bo = &vbo->base; | |
347 | bool not_used; | |
348 | void *virtual; | |
349 | int ret; | |
350 | ||
351 | virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used); | |
352 | if (virtual) | |
353 | return virtual; | |
354 | ||
e3c92eb4 | 355 | ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map); |
bf833fd3 TH |
356 | if (ret) |
357 | DRM_ERROR("Buffer object map failed: %d.\n", ret); | |
358 | ||
359 | return ttm_kmap_obj_virtual(&vbo->map, ¬_used); | |
360 | } | |
e9431ea5 TH |
361 | |
362 | ||
363 | /** | |
364 | * vmw_bo_unmap - Tear down a cached buffer object map. | |
365 | * | |
366 | * @vbo: The buffer object whose map we are tearing down. | |
367 | * | |
368 | * This function tears down a cached map set up using | |
369 | * vmw_buffer_object_map_and_cache(). | |
370 | */ | |
371 | void vmw_bo_unmap(struct vmw_buffer_object *vbo) | |
372 | { | |
373 | if (vbo->map.bo == NULL) | |
374 | return; | |
375 | ||
376 | ttm_bo_kunmap(&vbo->map); | |
377 | } | |
378 | ||
379 | ||
e9431ea5 TH |
380 | /** |
381 | * vmw_bo_bo_free - vmw buffer object destructor | |
382 | * | |
383 | * @bo: Pointer to the embedded struct ttm_buffer_object | |
384 | */ | |
385 | void vmw_bo_bo_free(struct ttm_buffer_object *bo) | |
386 | { | |
387 | struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); | |
388 | ||
b7468b15 | 389 | WARN_ON(vmw_bo->dirty); |
61335d7a | 390 | WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); |
e9431ea5 | 391 | vmw_bo_unmap(vmw_bo); |
8afa13a0 | 392 | drm_gem_object_release(&bo->base); |
e9431ea5 TH |
393 | kfree(vmw_bo); |
394 | } | |
395 | ||
35079323 CK |
396 | /* default destructor */ |
397 | static void vmw_bo_default_destroy(struct ttm_buffer_object *bo) | |
398 | { | |
399 | kfree(bo); | |
400 | } | |
401 | ||
b254557c CK |
402 | /** |
403 | * vmw_bo_create_kernel - Create a pinned BO for internal kernel use. | |
404 | * | |
405 | * @dev_priv: Pointer to the device private struct | |
406 | * @size: size of the BO we need | |
407 | * @placement: where to put it | |
408 | * @p_bo: resulting BO | |
409 | * | |
410 | * Creates and pin a simple BO for in kernel use. | |
411 | */ | |
412 | int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, | |
413 | struct ttm_placement *placement, | |
414 | struct ttm_buffer_object **p_bo) | |
415 | { | |
8aadeb8a ZR |
416 | struct ttm_operation_ctx ctx = { |
417 | .interruptible = false, | |
418 | .no_wait_gpu = false | |
419 | }; | |
b254557c | 420 | struct ttm_buffer_object *bo; |
8afa13a0 | 421 | struct drm_device *vdev = &dev_priv->drm; |
b254557c CK |
422 | int ret; |
423 | ||
424 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | |
425 | if (unlikely(!bo)) | |
426 | return -ENOMEM; | |
427 | ||
8afa13a0 ZR |
428 | size = ALIGN(size, PAGE_SIZE); |
429 | ||
430 | drm_gem_private_object_init(vdev, &bo->base, size); | |
d02117f8 | 431 | |
347987a2 CK |
432 | ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, ttm_bo_type_kernel, |
433 | placement, 0, &ctx, NULL, NULL, | |
434 | vmw_bo_default_destroy); | |
b254557c | 435 | if (unlikely(ret)) |
8aadeb8a | 436 | goto error_free; |
b254557c CK |
437 | |
438 | ttm_bo_pin(bo); | |
439 | ttm_bo_unreserve(bo); | |
440 | *p_bo = bo; | |
441 | ||
442 | return 0; | |
443 | ||
444 | error_free: | |
445 | kfree(bo); | |
446 | return ret; | |
447 | } | |
e9431ea5 | 448 | |
8afa13a0 ZR |
449 | int vmw_bo_create(struct vmw_private *vmw, |
450 | size_t size, struct ttm_placement *placement, | |
451 | bool interruptible, bool pin, | |
452 | void (*bo_free)(struct ttm_buffer_object *bo), | |
453 | struct vmw_buffer_object **p_bo) | |
454 | { | |
455 | int ret; | |
456 | ||
35079323 CK |
457 | BUG_ON(!bo_free); |
458 | ||
8afa13a0 ZR |
459 | *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL); |
460 | if (unlikely(!*p_bo)) { | |
461 | DRM_ERROR("Failed to allocate a buffer.\n"); | |
462 | return -ENOMEM; | |
463 | } | |
464 | ||
465 | ret = vmw_bo_init(vmw, *p_bo, size, | |
466 | placement, interruptible, pin, | |
467 | bo_free); | |
468 | if (unlikely(ret != 0)) | |
469 | goto out_error; | |
470 | ||
471 | return ret; | |
472 | out_error: | |
473 | kfree(*p_bo); | |
474 | *p_bo = NULL; | |
475 | return ret; | |
476 | } | |
477 | ||
e9431ea5 TH |
478 | /** |
479 | * vmw_bo_init - Initialize a vmw buffer object | |
480 | * | |
481 | * @dev_priv: Pointer to the device private struct | |
482 | * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize. | |
483 | * @size: Buffer object size in bytes. | |
484 | * @placement: Initial placement. | |
485 | * @interruptible: Whether waits should be performed interruptible. | |
fbe86ca5 | 486 | * @pin: If the BO should be created pinned at a fixed location. |
e9431ea5 TH |
487 | * @bo_free: The buffer object destructor. |
488 | * Returns: Zero on success, negative error code on error. | |
489 | * | |
490 | * Note that on error, the code will free the buffer object. | |
491 | */ | |
492 | int vmw_bo_init(struct vmw_private *dev_priv, | |
493 | struct vmw_buffer_object *vmw_bo, | |
494 | size_t size, struct ttm_placement *placement, | |
fbe86ca5 | 495 | bool interruptible, bool pin, |
e9431ea5 TH |
496 | void (*bo_free)(struct ttm_buffer_object *bo)) |
497 | { | |
8aadeb8a ZR |
498 | struct ttm_operation_ctx ctx = { |
499 | .interruptible = interruptible, | |
500 | .no_wait_gpu = false | |
501 | }; | |
8af8a109 | 502 | struct ttm_device *bdev = &dev_priv->bdev; |
8afa13a0 | 503 | struct drm_device *vdev = &dev_priv->drm; |
e9431ea5 | 504 | int ret; |
e9431ea5 | 505 | |
8afa13a0 | 506 | WARN_ON_ONCE(!bo_free); |
e9431ea5 | 507 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
a0a63940 TH |
508 | BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); |
509 | vmw_bo->base.priority = 3; | |
61335d7a | 510 | vmw_bo->res_tree = RB_ROOT; |
e9431ea5 | 511 | |
8afa13a0 ZR |
512 | size = ALIGN(size, PAGE_SIZE); |
513 | drm_gem_private_object_init(vdev, &vmw_bo->base.base, size); | |
d02117f8 | 514 | |
347987a2 CK |
515 | ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, ttm_bo_type_device, |
516 | placement, 0, &ctx, NULL, NULL, bo_free); | |
f07069da | 517 | if (unlikely(ret)) { |
fbe86ca5 | 518 | return ret; |
f07069da | 519 | } |
fbe86ca5 CK |
520 | |
521 | if (pin) | |
522 | ttm_bo_pin(&vmw_bo->base); | |
523 | ttm_bo_unreserve(&vmw_bo->base); | |
e9431ea5 | 524 | |
8afa13a0 | 525 | return 0; |
e9431ea5 TH |
526 | } |
527 | ||
e9431ea5 | 528 | /** |
8afa13a0 | 529 | * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu |
e9431ea5 TH |
530 | * access, idling previous GPU operations on the buffer and optionally |
531 | * blocking it for further command submissions. | |
532 | * | |
8afa13a0 | 533 | * @vmw_bo: Pointer to the buffer object being grabbed for CPU access |
e9431ea5 TH |
534 | * @flags: Flags indicating how the grab should be performed. |
535 | * Return: Zero on success, Negative error code on error. In particular, | |
536 | * -EBUSY will be returned if a dontblock operation is requested and the | |
537 | * buffer object is busy, and -ERESTARTSYS will be returned if a wait is | |
538 | * interrupted by a signal. | |
539 | * | |
540 | * A blocking grab will be automatically released when @tfile is closed. | |
541 | */ | |
8afa13a0 | 542 | static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo, |
e9431ea5 TH |
543 | uint32_t flags) |
544 | { | |
7fb03cc3 | 545 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
8afa13a0 | 546 | struct ttm_buffer_object *bo = &vmw_bo->base; |
e9431ea5 TH |
547 | int ret; |
548 | ||
549 | if (flags & drm_vmw_synccpu_allow_cs) { | |
e9431ea5 TH |
550 | long lret; |
551 | ||
7bc80a54 CK |
552 | lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, |
553 | true, nonblock ? 0 : | |
d3fae3b3 | 554 | MAX_SCHEDULE_TIMEOUT); |
e9431ea5 TH |
555 | if (!lret) |
556 | return -EBUSY; | |
557 | else if (lret < 0) | |
558 | return lret; | |
559 | return 0; | |
560 | } | |
561 | ||
7fb03cc3 CK |
562 | ret = ttm_bo_reserve(bo, true, nonblock, NULL); |
563 | if (unlikely(ret != 0)) | |
564 | return ret; | |
565 | ||
566 | ret = ttm_bo_wait(bo, true, nonblock); | |
567 | if (likely(ret == 0)) | |
8afa13a0 | 568 | atomic_inc(&vmw_bo->cpu_writers); |
7fb03cc3 CK |
569 | |
570 | ttm_bo_unreserve(bo); | |
e9431ea5 TH |
571 | if (unlikely(ret != 0)) |
572 | return ret; | |
573 | ||
e9431ea5 TH |
574 | return ret; |
575 | } | |
576 | ||
577 | /** | |
578 | * vmw_user_bo_synccpu_release - Release a previous grab for CPU access, | |
579 | * and unblock command submission on the buffer if blocked. | |
580 | * | |
8afa13a0 | 581 | * @filp: Identifying the caller. |
e9431ea5 | 582 | * @handle: Handle identifying the buffer object. |
e9431ea5 TH |
583 | * @flags: Flags indicating the type of release. |
584 | */ | |
8afa13a0 ZR |
585 | static int vmw_user_bo_synccpu_release(struct drm_file *filp, |
586 | uint32_t handle, | |
587 | uint32_t flags) | |
e9431ea5 | 588 | { |
8afa13a0 ZR |
589 | struct vmw_buffer_object *vmw_bo; |
590 | int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo); | |
e9431ea5 | 591 | |
60c9ecd7 ZR |
592 | if (!ret) { |
593 | if (!(flags & drm_vmw_synccpu_allow_cs)) { | |
594 | atomic_dec(&vmw_bo->cpu_writers); | |
595 | } | |
596 | ttm_bo_put(&vmw_bo->base); | |
8afa13a0 | 597 | } |
8afa13a0 ZR |
598 | |
599 | return ret; | |
e9431ea5 TH |
600 | } |
601 | ||
602 | ||
603 | /** | |
604 | * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu | |
605 | * functionality. | |
606 | * | |
607 | * @dev: Identifies the drm device. | |
608 | * @data: Pointer to the ioctl argument. | |
609 | * @file_priv: Identifies the caller. | |
610 | * Return: Zero on success, negative error code on error. | |
611 | * | |
612 | * This function checks the ioctl arguments for validity and calls the | |
613 | * relevant synccpu functions. | |
614 | */ | |
615 | int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, | |
616 | struct drm_file *file_priv) | |
617 | { | |
618 | struct drm_vmw_synccpu_arg *arg = | |
619 | (struct drm_vmw_synccpu_arg *) data; | |
620 | struct vmw_buffer_object *vbo; | |
e9431ea5 TH |
621 | int ret; |
622 | ||
623 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 | |
624 | || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | | |
625 | drm_vmw_synccpu_dontblock | | |
626 | drm_vmw_synccpu_allow_cs)) != 0) { | |
627 | DRM_ERROR("Illegal synccpu flags.\n"); | |
628 | return -EINVAL; | |
629 | } | |
630 | ||
631 | switch (arg->op) { | |
632 | case drm_vmw_synccpu_grab: | |
8afa13a0 | 633 | ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo); |
e9431ea5 TH |
634 | if (unlikely(ret != 0)) |
635 | return ret; | |
636 | ||
8afa13a0 | 637 | ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); |
e9431ea5 | 638 | vmw_bo_unreference(&vbo); |
298799a2 ZR |
639 | if (unlikely(ret != 0)) { |
640 | if (ret == -ERESTARTSYS || ret == -EBUSY) | |
641 | return -EBUSY; | |
e9431ea5 TH |
642 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", |
643 | (unsigned int) arg->handle); | |
644 | return ret; | |
645 | } | |
646 | break; | |
647 | case drm_vmw_synccpu_release: | |
8afa13a0 ZR |
648 | ret = vmw_user_bo_synccpu_release(file_priv, |
649 | arg->handle, | |
e9431ea5 TH |
650 | arg->flags); |
651 | if (unlikely(ret != 0)) { | |
652 | DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", | |
653 | (unsigned int) arg->handle); | |
654 | return ret; | |
655 | } | |
656 | break; | |
657 | default: | |
658 | DRM_ERROR("Invalid synccpu operation.\n"); | |
659 | return -EINVAL; | |
660 | } | |
661 | ||
662 | return 0; | |
663 | } | |
664 | ||
e9431ea5 TH |
665 | /** |
666 | * vmw_bo_unref_ioctl - Generic handle close ioctl. | |
667 | * | |
668 | * @dev: Identifies the drm device. | |
669 | * @data: Pointer to the ioctl argument. | |
670 | * @file_priv: Identifies the caller. | |
671 | * Return: Zero on success, negative error code on error. | |
672 | * | |
673 | * This function checks the ioctl arguments for validity and closes a | |
674 | * handle to a TTM base object, optionally freeing the object. | |
675 | */ | |
676 | int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, | |
677 | struct drm_file *file_priv) | |
678 | { | |
679 | struct drm_vmw_unref_dmabuf_arg *arg = | |
680 | (struct drm_vmw_unref_dmabuf_arg *)data; | |
681 | ||
8afa13a0 ZR |
682 | drm_gem_handle_delete(file_priv, arg->handle); |
683 | return 0; | |
e9431ea5 TH |
684 | } |
685 | ||
686 | ||
687 | /** | |
688 | * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle. | |
689 | * | |
8afa13a0 | 690 | * @filp: The file the handle is registered with. |
e9431ea5 TH |
691 | * @handle: The user buffer object handle |
692 | * @out: Pointer to a where a pointer to the embedded | |
693 | * struct vmw_buffer_object should be placed. | |
e9431ea5 TH |
694 | * Return: Zero on success, Negative error code on error. |
695 | * | |
8afa13a0 | 696 | * The vmw buffer object pointer will be refcounted. |
e9431ea5 | 697 | */ |
8afa13a0 ZR |
698 | int vmw_user_bo_lookup(struct drm_file *filp, |
699 | uint32_t handle, | |
700 | struct vmw_buffer_object **out) | |
e9431ea5 | 701 | { |
8afa13a0 | 702 | struct drm_gem_object *gobj; |
e9431ea5 | 703 | |
8afa13a0 ZR |
704 | gobj = drm_gem_object_lookup(filp, handle); |
705 | if (!gobj) { | |
e9431ea5 TH |
706 | DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", |
707 | (unsigned long)handle); | |
708 | return -ESRCH; | |
709 | } | |
710 | ||
8afa13a0 ZR |
711 | *out = gem_to_vmw_bo(gobj); |
712 | ttm_bo_get(&(*out)->base); | |
713 | drm_gem_object_put(gobj); | |
e9431ea5 TH |
714 | |
715 | return 0; | |
716 | } | |
717 | ||
e9431ea5 TH |
718 | /** |
719 | * vmw_bo_fence_single - Utility function to fence a single TTM buffer | |
720 | * object without unreserving it. | |
721 | * | |
722 | * @bo: Pointer to the struct ttm_buffer_object to fence. | |
723 | * @fence: Pointer to the fence. If NULL, this function will | |
724 | * insert a fence into the command stream.. | |
725 | * | |
726 | * Contrary to the ttm_eu version of this function, it takes only | |
727 | * a single buffer object instead of a list, and it also doesn't | |
728 | * unreserve the buffer object, which needs to be done separately. | |
729 | */ | |
730 | void vmw_bo_fence_single(struct ttm_buffer_object *bo, | |
731 | struct vmw_fence_obj *fence) | |
732 | { | |
8af8a109 | 733 | struct ttm_device *bdev = bo->bdev; |
e9431ea5 TH |
734 | struct vmw_private *dev_priv = |
735 | container_of(bdev, struct vmw_private, bdev); | |
c8d4c18b | 736 | int ret; |
e9431ea5 | 737 | |
c8d4c18b | 738 | if (fence == NULL) |
e9431ea5 | 739 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
c8d4c18b CK |
740 | else |
741 | dma_fence_get(&fence->base); | |
742 | ||
743 | ret = dma_resv_reserve_fences(bo->base.resv, 1); | |
744 | if (!ret) | |
73511edf | 745 | dma_resv_add_fence(bo->base.resv, &fence->base, |
b29895e1 | 746 | DMA_RESV_USAGE_KERNEL); |
c8d4c18b CK |
747 | else |
748 | /* Last resort fallback when we are OOM */ | |
749 | dma_fence_wait(&fence->base, false); | |
750 | dma_fence_put(&fence->base); | |
e9431ea5 TH |
751 | } |
752 | ||
753 | ||
754 | /** | |
755 | * vmw_dumb_create - Create a dumb kms buffer | |
756 | * | |
757 | * @file_priv: Pointer to a struct drm_file identifying the caller. | |
758 | * @dev: Pointer to the drm device. | |
759 | * @args: Pointer to a struct drm_mode_create_dumb structure | |
760 | * Return: Zero on success, negative error code on failure. | |
761 | * | |
762 | * This is a driver callback for the core drm create_dumb functionality. | |
763 | * Note that this is very similar to the vmw_bo_alloc ioctl, except | |
764 | * that the arguments have a different format. | |
765 | */ | |
766 | int vmw_dumb_create(struct drm_file *file_priv, | |
767 | struct drm_device *dev, | |
768 | struct drm_mode_create_dumb *args) | |
769 | { | |
770 | struct vmw_private *dev_priv = vmw_priv(dev); | |
771 | struct vmw_buffer_object *vbo; | |
1c8d537b | 772 | int cpp = DIV_ROUND_UP(args->bpp, 8); |
e9431ea5 TH |
773 | int ret; |
774 | ||
1c8d537b ZR |
775 | switch (cpp) { |
776 | case 1: /* DRM_FORMAT_C8 */ | |
777 | case 2: /* DRM_FORMAT_RGB565 */ | |
778 | case 4: /* DRM_FORMAT_XRGB8888 */ | |
779 | break; | |
780 | default: | |
781 | /* | |
782 | * Dumb buffers don't allow anything else. | |
783 | * This is tested via IGT's dumb_buffers | |
784 | */ | |
785 | return -EINVAL; | |
786 | } | |
787 | ||
788 | args->pitch = args->width * cpp; | |
8afa13a0 | 789 | args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); |
e9431ea5 | 790 | |
8afa13a0 ZR |
791 | ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, |
792 | args->size, &args->handle, | |
793 | &vbo); | |
e9431ea5 | 794 | |
e9431ea5 TH |
795 | return ret; |
796 | } | |
797 | ||
e9431ea5 TH |
798 | /** |
799 | * vmw_bo_swap_notify - swapout notify callback. | |
800 | * | |
801 | * @bo: The buffer object to be swapped out. | |
802 | */ | |
803 | void vmw_bo_swap_notify(struct ttm_buffer_object *bo) | |
804 | { | |
805 | /* Is @bo embedded in a struct vmw_buffer_object? */ | |
298799a2 | 806 | if (!bo_is_vmw(bo)) |
e9431ea5 TH |
807 | return; |
808 | ||
809 | /* Kill any cached kernel maps before swapout */ | |
810 | vmw_bo_unmap(vmw_buffer_object(bo)); | |
811 | } | |
812 | ||
813 | ||
814 | /** | |
815 | * vmw_bo_move_notify - TTM move_notify_callback | |
816 | * | |
817 | * @bo: The TTM buffer object about to move. | |
2966141a | 818 | * @mem: The struct ttm_resource indicating to what memory |
e9431ea5 TH |
819 | * region the move is taking place. |
820 | * | |
821 | * Detaches cached maps and device bindings that require that the | |
822 | * buffer doesn't move. | |
823 | */ | |
824 | void vmw_bo_move_notify(struct ttm_buffer_object *bo, | |
2966141a | 825 | struct ttm_resource *mem) |
e9431ea5 TH |
826 | { |
827 | struct vmw_buffer_object *vbo; | |
828 | ||
e9431ea5 | 829 | /* Make sure @bo is embedded in a struct vmw_buffer_object? */ |
298799a2 | 830 | if (!bo_is_vmw(bo)) |
e9431ea5 TH |
831 | return; |
832 | ||
833 | vbo = container_of(bo, struct vmw_buffer_object, base); | |
834 | ||
835 | /* | |
098d7d53 TH |
836 | * Kill any cached kernel maps before move to or from VRAM. |
837 | * With other types of moves, the underlying pages stay the same, | |
838 | * and the map can be kept. | |
e9431ea5 | 839 | */ |
d3116756 | 840 | if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM) |
098d7d53 | 841 | vmw_bo_unmap(vbo); |
e9431ea5 TH |
842 | |
843 | /* | |
844 | * If we're moving a backup MOB out of MOB placement, then make sure we | |
845 | * read back all resource content first, and unbind the MOB from | |
846 | * the resource. | |
847 | */ | |
d3116756 | 848 | if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) |
e9431ea5 TH |
849 | vmw_resource_unbind_list(vbo); |
850 | } |