drm/vmwgfx: Kill a bunch of sparse warnings
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
760285e7
DH
29#include <drm/vmwgfx_drm.h>
30#include <drm/ttm/ttm_object.h>
31#include <drm/ttm/ttm_placement.h>
32#include <drm/drmP.h>
543831cf 33#include "vmwgfx_resource_priv.h"
fb1d9738 34
ea029c28
TH
35#define VMW_RES_EVICT_ERR_COUNT 10
36
fb1d9738 37struct vmw_user_dma_buffer {
c486d4f8 38 struct ttm_prime_object prime;
fb1d9738
JB
39 struct vmw_dma_buffer dma;
40};
41
42struct vmw_bo_user_rep {
43 uint32_t handle;
44 uint64_t map_handle;
45};
46
47struct vmw_stream {
48 struct vmw_resource res;
49 uint32_t stream_id;
50};
51
52struct vmw_user_stream {
53 struct ttm_base_object base;
54 struct vmw_stream stream;
55};
56
c0951b79
TH
57
58static uint64_t vmw_user_stream_size;
59
60static const struct vmw_res_func vmw_stream_func = {
61 .res_type = vmw_res_stream,
62 .needs_backup = false,
63 .may_evict = false,
64 .type_name = "video streams",
65 .backup_placement = NULL,
66 .create = NULL,
67 .destroy = NULL,
68 .bind = NULL,
69 .unbind = NULL
70};
71
fb1d9738
JB
72static inline struct vmw_dma_buffer *
73vmw_dma_buffer(struct ttm_buffer_object *bo)
74{
75 return container_of(bo, struct vmw_dma_buffer, base);
76}
77
78static inline struct vmw_user_dma_buffer *
79vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80{
81 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83}
84
85struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86{
87 kref_get(&res->kref);
88 return res;
89}
90
30f82d81
TH
91struct vmw_resource *
92vmw_resource_reference_unless_doomed(struct vmw_resource *res)
93{
94 return kref_get_unless_zero(&res->kref) ? res : NULL;
95}
5bb39e81
TH
96
97/**
98 * vmw_resource_release_id - release a resource id to the id manager.
99 *
100 * @res: Pointer to the resource.
101 *
102 * Release the resource id to the resource id manager and set it to -1
103 */
543831cf 104void vmw_resource_release_id(struct vmw_resource *res)
5bb39e81
TH
105{
106 struct vmw_private *dev_priv = res->dev_priv;
c0951b79 107 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
5bb39e81
TH
108
109 write_lock(&dev_priv->resource_lock);
110 if (res->id != -1)
c0951b79 111 idr_remove(idr, res->id);
5bb39e81
TH
112 res->id = -1;
113 write_unlock(&dev_priv->resource_lock);
114}
115
fb1d9738
JB
116static void vmw_resource_release(struct kref *kref)
117{
118 struct vmw_resource *res =
119 container_of(kref, struct vmw_resource, kref);
120 struct vmw_private *dev_priv = res->dev_priv;
c0951b79
TH
121 int id;
122 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
fb1d9738 123
b9eb1a61 124 write_lock(&dev_priv->resource_lock);
5bb39e81 125 res->avail = false;
c0951b79 126 list_del_init(&res->lru_head);
fb1d9738 127 write_unlock(&dev_priv->resource_lock);
c0951b79
TH
128 if (res->backup) {
129 struct ttm_buffer_object *bo = &res->backup->base;
130
ee3939e0 131 ttm_bo_reserve(bo, false, false, false, NULL);
c0951b79
TH
132 if (!list_empty(&res->mob_head) &&
133 res->func->unbind != NULL) {
134 struct ttm_validate_buffer val_buf;
135
136 val_buf.bo = bo;
ae9c0af2 137 val_buf.shared = false;
c0951b79
TH
138 res->func->unbind(res, false, &val_buf);
139 }
140 res->backup_dirty = false;
141 list_del_init(&res->mob_head);
142 ttm_bo_unreserve(bo);
143 vmw_dmabuf_unreference(&res->backup);
144 }
fb1d9738 145
30f82d81 146 if (likely(res->hw_destroy != NULL)) {
fb1d9738 147 res->hw_destroy(res);
30f82d81
TH
148 mutex_lock(&dev_priv->binding_mutex);
149 vmw_context_binding_res_list_kill(&res->binding_head);
150 mutex_unlock(&dev_priv->binding_mutex);
151 }
fb1d9738 152
c0951b79 153 id = res->id;
fb1d9738
JB
154 if (res->res_free != NULL)
155 res->res_free(res);
156 else
157 kfree(res);
158
159 write_lock(&dev_priv->resource_lock);
5bb39e81
TH
160 if (id != -1)
161 idr_remove(idr, id);
b9eb1a61 162 write_unlock(&dev_priv->resource_lock);
fb1d9738
JB
163}
164
165void vmw_resource_unreference(struct vmw_resource **p_res)
166{
167 struct vmw_resource *res = *p_res;
fb1d9738
JB
168
169 *p_res = NULL;
fb1d9738 170 kref_put(&res->kref, vmw_resource_release);
fb1d9738
JB
171}
172
5bb39e81
TH
173
174/**
175 * vmw_resource_alloc_id - release a resource id to the id manager.
176 *
5bb39e81
TH
177 * @res: Pointer to the resource.
178 *
179 * Allocate the lowest free resource from the resource manager, and set
180 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
181 */
543831cf 182int vmw_resource_alloc_id(struct vmw_resource *res)
5bb39e81 183{
c0951b79 184 struct vmw_private *dev_priv = res->dev_priv;
5bb39e81 185 int ret;
c0951b79 186 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
5bb39e81
TH
187
188 BUG_ON(res->id != -1);
189
cc39a8fa
TH
190 idr_preload(GFP_KERNEL);
191 write_lock(&dev_priv->resource_lock);
5bb39e81 192
cc39a8fa
TH
193 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
194 if (ret >= 0)
195 res->id = ret;
5bb39e81 196
cc39a8fa
TH
197 write_unlock(&dev_priv->resource_lock);
198 idr_preload_end();
199 return ret < 0 ? ret : 0;
5bb39e81
TH
200}
201
c0951b79
TH
202/**
203 * vmw_resource_init - initialize a struct vmw_resource
204 *
205 * @dev_priv: Pointer to a device private struct.
206 * @res: The struct vmw_resource to initialize.
207 * @obj_type: Resource object type.
208 * @delay_id: Boolean whether to defer device id allocation until
209 * the first validation.
210 * @res_free: Resource destructor.
211 * @func: Resource function table.
212 */
543831cf
TH
213int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
214 bool delay_id,
215 void (*res_free) (struct vmw_resource *res),
216 const struct vmw_res_func *func)
fb1d9738 217{
fb1d9738
JB
218 kref_init(&res->kref);
219 res->hw_destroy = NULL;
220 res->res_free = res_free;
fb1d9738
JB
221 res->avail = false;
222 res->dev_priv = dev_priv;
c0951b79
TH
223 res->func = func;
224 INIT_LIST_HEAD(&res->lru_head);
225 INIT_LIST_HEAD(&res->mob_head);
173fb7d4 226 INIT_LIST_HEAD(&res->binding_head);
5bb39e81 227 res->id = -1;
c0951b79
TH
228 res->backup = NULL;
229 res->backup_offset = 0;
230 res->backup_dirty = false;
231 res->res_dirty = false;
5bb39e81
TH
232 if (delay_id)
233 return 0;
234 else
c0951b79 235 return vmw_resource_alloc_id(res);
fb1d9738
JB
236}
237
238/**
239 * vmw_resource_activate
240 *
241 * @res: Pointer to the newly created resource
242 * @hw_destroy: Destroy function. NULL if none.
243 *
244 * Activate a resource after the hardware has been made aware of it.
245 * Set tye destroy function to @destroy. Typically this frees the
246 * resource and destroys the hardware resources associated with it.
247 * Activate basically means that the function vmw_resource_lookup will
248 * find it.
249 */
543831cf
TH
250void vmw_resource_activate(struct vmw_resource *res,
251 void (*hw_destroy) (struct vmw_resource *))
fb1d9738
JB
252{
253 struct vmw_private *dev_priv = res->dev_priv;
254
255 write_lock(&dev_priv->resource_lock);
256 res->avail = true;
257 res->hw_destroy = hw_destroy;
258 write_unlock(&dev_priv->resource_lock);
259}
260
b9eb1a61
TH
261static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
262 struct idr *idr, int id)
fb1d9738
JB
263{
264 struct vmw_resource *res;
265
266 read_lock(&dev_priv->resource_lock);
267 res = idr_find(idr, id);
b9eb1a61 268 if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
fb1d9738 269 res = NULL;
b9eb1a61 270
fb1d9738
JB
271 read_unlock(&dev_priv->resource_lock);
272
273 if (unlikely(res == NULL))
274 return NULL;
275
276 return res;
277}
278
c0951b79
TH
279/**
280 * vmw_user_resource_lookup_handle - lookup a struct resource from a
281 * TTM user-space handle and perform basic type checks
282 *
283 * @dev_priv: Pointer to a device private struct
284 * @tfile: Pointer to a struct ttm_object_file identifying the caller
285 * @handle: The TTM user-space handle
286 * @converter: Pointer to an object describing the resource type
287 * @p_res: On successful return the location pointed to will contain
288 * a pointer to a refcounted struct vmw_resource.
289 *
290 * If the handle can't be found or is associated with an incorrect resource
291 * type, -EINVAL will be returned.
292 */
293int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
294 struct ttm_object_file *tfile,
295 uint32_t handle,
296 const struct vmw_user_resource_conv
297 *converter,
298 struct vmw_resource **p_res)
fb1d9738 299{
7a73ba74 300 struct ttm_base_object *base;
c0951b79
TH
301 struct vmw_resource *res;
302 int ret = -EINVAL;
fb1d9738 303
7a73ba74
TH
304 base = ttm_base_object_lookup(tfile, handle);
305 if (unlikely(base == NULL))
306 return -EINVAL;
307
79e5f810 308 if (unlikely(ttm_base_object_type(base) != converter->object_type))
c0951b79 309 goto out_bad_resource;
7a73ba74 310
c0951b79 311 res = converter->base_obj_to_res(base);
7a73ba74 312
c0951b79
TH
313 read_lock(&dev_priv->resource_lock);
314 if (!res->avail || res->res_free != converter->res_free) {
315 read_unlock(&dev_priv->resource_lock);
316 goto out_bad_resource;
317 }
fb1d9738 318
c0951b79
TH
319 kref_get(&res->kref);
320 read_unlock(&dev_priv->resource_lock);
321
322 *p_res = res;
323 ret = 0;
324
325out_bad_resource:
7a73ba74 326 ttm_base_object_unref(&base);
c0951b79
TH
327
328 return ret;
329}
330
331/**
332 * Helper function that looks either a surface or dmabuf.
333 *
334 * The pointer this pointed at by out_surf and out_buf needs to be null.
335 */
336int vmw_user_lookup_handle(struct vmw_private *dev_priv,
337 struct ttm_object_file *tfile,
338 uint32_t handle,
339 struct vmw_surface **out_surf,
340 struct vmw_dma_buffer **out_buf)
341{
342 struct vmw_resource *res;
343 int ret;
344
345 BUG_ON(*out_surf || *out_buf);
346
347 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
348 user_surface_converter,
349 &res);
350 if (!ret) {
351 *out_surf = vmw_res_to_srf(res);
352 return 0;
353 }
354
355 *out_surf = NULL;
356 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
fb1d9738
JB
357 return ret;
358}
359
360/**
361 * Buffer management.
362 */
308d17ef
TH
363
364/**
365 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
366 *
367 * @dev_priv: Pointer to a struct vmw_private identifying the device.
368 * @size: The requested buffer size.
369 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
370 */
371static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
372 bool user)
373{
374 static size_t struct_size, user_struct_size;
375 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
376 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
377
378 if (unlikely(struct_size == 0)) {
379 size_t backend_size = ttm_round_pot(vmw_tt_size);
380
381 struct_size = backend_size +
382 ttm_round_pot(sizeof(struct vmw_dma_buffer));
383 user_struct_size = backend_size +
384 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
385 }
386
387 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
388 page_array_size +=
389 ttm_round_pot(num_pages * sizeof(dma_addr_t));
390
391 return ((user) ? user_struct_size : struct_size) +
392 page_array_size;
393}
394
effe1105
TH
395void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
396{
397 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
effe1105 398
fb1d9738
JB
399 kfree(vmw_bo);
400}
401
308d17ef
TH
402static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
403{
404 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
405
406 ttm_prime_object_kfree(vmw_user_bo, prime);
407}
408
fb1d9738
JB
409int vmw_dmabuf_init(struct vmw_private *dev_priv,
410 struct vmw_dma_buffer *vmw_bo,
411 size_t size, struct ttm_placement *placement,
412 bool interruptible,
413 void (*bo_free) (struct ttm_buffer_object *bo))
414{
415 struct ttm_bo_device *bdev = &dev_priv->bdev;
fb1d9738
JB
416 size_t acc_size;
417 int ret;
308d17ef 418 bool user = (bo_free == &vmw_user_dmabuf_destroy);
fb1d9738 419
308d17ef 420 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
fb1d9738 421
308d17ef 422 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
fb1d9738
JB
423 memset(vmw_bo, 0, sizeof(*vmw_bo));
424
c0951b79 425 INIT_LIST_HEAD(&vmw_bo->res_list);
fb1d9738
JB
426
427 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
a34417f6 428 ttm_bo_type_device, placement,
0b91c4a1 429 0, interruptible,
f4f4e3e3 430 NULL, acc_size, NULL, NULL, bo_free);
fb1d9738
JB
431 return ret;
432}
433
fb1d9738
JB
434static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
435{
436 struct vmw_user_dma_buffer *vmw_user_bo;
437 struct ttm_base_object *base = *p_base;
438 struct ttm_buffer_object *bo;
439
440 *p_base = NULL;
441
442 if (unlikely(base == NULL))
443 return;
444
c486d4f8
TH
445 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
446 prime.base);
fb1d9738
JB
447 bo = &vmw_user_bo->dma.base;
448 ttm_bo_unref(&bo);
449}
450
1d7a5cbf
TH
451static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
452 enum ttm_ref_type ref_type)
453{
454 struct vmw_user_dma_buffer *user_bo;
455 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
456
457 switch (ref_type) {
458 case TTM_REF_SYNCCPU_WRITE:
459 ttm_bo_synccpu_write_release(&user_bo->dma.base);
460 break;
461 default:
462 BUG();
463 }
464}
465
c0951b79
TH
466/**
467 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
468 *
469 * @dev_priv: Pointer to a struct device private.
470 * @tfile: Pointer to a struct ttm_object_file on which to register the user
471 * object.
472 * @size: Size of the dma buffer.
473 * @shareable: Boolean whether the buffer is shareable with other open files.
474 * @handle: Pointer to where the handle value should be assigned.
475 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
476 * should be assigned.
477 */
478int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
479 struct ttm_object_file *tfile,
480 uint32_t size,
481 bool shareable,
482 uint32_t *handle,
483 struct vmw_dma_buffer **p_dma_buf)
484{
485 struct vmw_user_dma_buffer *user_bo;
486 struct ttm_buffer_object *tmp;
487 int ret;
488
489 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
490 if (unlikely(user_bo == NULL)) {
491 DRM_ERROR("Failed to allocate a buffer.\n");
492 return -ENOMEM;
493 }
494
495 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
96c5f0df 496 (dev_priv->has_mob) ?
6bf6bf03 497 &vmw_sys_placement :
c0951b79
TH
498 &vmw_vram_sys_placement, true,
499 &vmw_user_dmabuf_destroy);
500 if (unlikely(ret != 0))
501 return ret;
502
503 tmp = ttm_bo_reference(&user_bo->dma.base);
c486d4f8
TH
504 ret = ttm_prime_object_init(tfile,
505 size,
506 &user_bo->prime,
507 shareable,
508 ttm_buffer_type,
1d7a5cbf
TH
509 &vmw_user_dmabuf_release,
510 &vmw_user_dmabuf_ref_obj_release);
c0951b79
TH
511 if (unlikely(ret != 0)) {
512 ttm_bo_unref(&tmp);
513 goto out_no_base_object;
514 }
515
516 *p_dma_buf = &user_bo->dma;
c486d4f8 517 *handle = user_bo->prime.base.hash.key;
c0951b79
TH
518
519out_no_base_object:
520 return ret;
521}
522
d08a9b9c
TH
523/**
524 * vmw_user_dmabuf_verify_access - verify access permissions on this
525 * buffer object.
526 *
527 * @bo: Pointer to the buffer object being accessed
528 * @tfile: Identifying the caller.
529 */
530int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
531 struct ttm_object_file *tfile)
532{
533 struct vmw_user_dma_buffer *vmw_user_bo;
534
535 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
536 return -EPERM;
537
538 vmw_user_bo = vmw_user_dma_buffer(bo);
f6dfe73a
TH
539
540 /* Check that the caller has opened the object. */
541 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
542 return 0;
543
544 DRM_ERROR("Could not grant buffer access.\n");
545 return -EPERM;
d08a9b9c
TH
546}
547
1d7a5cbf
TH
548/**
549 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
550 * access, idling previous GPU operations on the buffer and optionally
551 * blocking it for further command submissions.
552 *
553 * @user_bo: Pointer to the buffer object being grabbed for CPU access
554 * @tfile: Identifying the caller.
555 * @flags: Flags indicating how the grab should be performed.
556 *
557 * A blocking grab will be automatically released when @tfile is closed.
558 */
559static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
560 struct ttm_object_file *tfile,
561 uint32_t flags)
562{
563 struct ttm_buffer_object *bo = &user_bo->dma.base;
564 bool existed;
565 int ret;
566
567 if (flags & drm_vmw_synccpu_allow_cs) {
dd7cfd64 568 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
5fbad992 569 long lret;
1d7a5cbf 570
5fbad992
ML
571 if (nonblock)
572 return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
573
574 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
575 if (!lret)
576 return -EBUSY;
577 else if (lret < 0)
578 return lret;
579 return 0;
1d7a5cbf
TH
580 }
581
582 ret = ttm_bo_synccpu_write_grab
583 (bo, !!(flags & drm_vmw_synccpu_dontblock));
584 if (unlikely(ret != 0))
585 return ret;
586
587 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
588 TTM_REF_SYNCCPU_WRITE, &existed);
589 if (ret != 0 || existed)
590 ttm_bo_synccpu_write_release(&user_bo->dma.base);
591
592 return ret;
593}
594
595/**
596 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
597 * and unblock command submission on the buffer if blocked.
598 *
599 * @handle: Handle identifying the buffer object.
600 * @tfile: Identifying the caller.
601 * @flags: Flags indicating the type of release.
602 */
603static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
604 struct ttm_object_file *tfile,
605 uint32_t flags)
606{
607 if (!(flags & drm_vmw_synccpu_allow_cs))
608 return ttm_ref_object_base_unref(tfile, handle,
609 TTM_REF_SYNCCPU_WRITE);
610
611 return 0;
612}
613
614/**
615 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
616 * functionality.
617 *
618 * @dev: Identifies the drm device.
619 * @data: Pointer to the ioctl argument.
620 * @file_priv: Identifies the caller.
621 *
622 * This function checks the ioctl arguments for validity and calls the
623 * relevant synccpu functions.
624 */
625int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
626 struct drm_file *file_priv)
627{
628 struct drm_vmw_synccpu_arg *arg =
629 (struct drm_vmw_synccpu_arg *) data;
630 struct vmw_dma_buffer *dma_buf;
631 struct vmw_user_dma_buffer *user_bo;
632 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
633 int ret;
634
635 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
636 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
637 drm_vmw_synccpu_dontblock |
638 drm_vmw_synccpu_allow_cs)) != 0) {
639 DRM_ERROR("Illegal synccpu flags.\n");
640 return -EINVAL;
641 }
642
643 switch (arg->op) {
644 case drm_vmw_synccpu_grab:
645 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
646 if (unlikely(ret != 0))
647 return ret;
648
649 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
650 dma);
651 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
652 vmw_dmabuf_unreference(&dma_buf);
653 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
654 ret != -EBUSY)) {
655 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
656 (unsigned int) arg->handle);
657 return ret;
658 }
659 break;
660 case drm_vmw_synccpu_release:
661 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
662 arg->flags);
663 if (unlikely(ret != 0)) {
664 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
665 (unsigned int) arg->handle);
666 return ret;
667 }
668 break;
669 default:
670 DRM_ERROR("Invalid synccpu operation.\n");
671 return -EINVAL;
672 }
673
674 return 0;
675}
676
fb1d9738
JB
677int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
678 struct drm_file *file_priv)
679{
680 struct vmw_private *dev_priv = vmw_priv(dev);
681 union drm_vmw_alloc_dmabuf_arg *arg =
682 (union drm_vmw_alloc_dmabuf_arg *)data;
683 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
684 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
c0951b79
TH
685 struct vmw_dma_buffer *dma_buf;
686 uint32_t handle;
fb1d9738
JB
687 int ret;
688
294adf7d 689 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
c0951b79 690 if (unlikely(ret != 0))
fb1d9738 691 return ret;
fb1d9738 692
c0951b79
TH
693 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
694 req->size, false, &handle, &dma_buf);
fb1d9738 695 if (unlikely(ret != 0))
2f5993cc 696 goto out_no_dmabuf;
fb1d9738 697
c0951b79 698 rep->handle = handle;
72525b3f 699 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
c0951b79
TH
700 rep->cur_gmr_id = handle;
701 rep->cur_gmr_offset = 0;
702
703 vmw_dmabuf_unreference(&dma_buf);
fb1d9738 704
2f5993cc 705out_no_dmabuf:
294adf7d 706 ttm_read_unlock(&dev_priv->reservation_sem);
fb1d9738 707
2f5993cc 708 return ret;
fb1d9738
JB
709}
710
711int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
712 struct drm_file *file_priv)
713{
714 struct drm_vmw_unref_dmabuf_arg *arg =
715 (struct drm_vmw_unref_dmabuf_arg *)data;
716
717 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
718 arg->handle,
719 TTM_REF_USAGE);
720}
721
fb1d9738
JB
722int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
723 uint32_t handle, struct vmw_dma_buffer **out)
724{
725 struct vmw_user_dma_buffer *vmw_user_bo;
726 struct ttm_base_object *base;
727
728 base = ttm_base_object_lookup(tfile, handle);
729 if (unlikely(base == NULL)) {
730 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
731 (unsigned long)handle);
732 return -ESRCH;
733 }
734
c486d4f8 735 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
fb1d9738
JB
736 ttm_base_object_unref(&base);
737 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
738 (unsigned long)handle);
739 return -EINVAL;
740 }
741
c486d4f8
TH
742 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
743 prime.base);
fb1d9738
JB
744 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
745 ttm_base_object_unref(&base);
746 *out = &vmw_user_bo->dma;
747
748 return 0;
749}
750
c0951b79 751int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
a97e2192
TH
752 struct vmw_dma_buffer *dma_buf,
753 uint32_t *handle)
c0951b79
TH
754{
755 struct vmw_user_dma_buffer *user_bo;
756
757 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
758 return -EINVAL;
759
760 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
a97e2192
TH
761
762 *handle = user_bo->prime.base.hash.key;
c486d4f8
TH
763 return ttm_ref_object_add(tfile, &user_bo->prime.base,
764 TTM_REF_USAGE, NULL);
c0951b79
TH
765}
766
fb1d9738 767/*
65155b37 768 * Stream management
fb1d9738
JB
769 */
770
771static void vmw_stream_destroy(struct vmw_resource *res)
772{
773 struct vmw_private *dev_priv = res->dev_priv;
774 struct vmw_stream *stream;
775 int ret;
776
777 DRM_INFO("%s: unref\n", __func__);
778 stream = container_of(res, struct vmw_stream, res);
779
780 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
781 WARN_ON(ret != 0);
782}
783
784static int vmw_stream_init(struct vmw_private *dev_priv,
785 struct vmw_stream *stream,
786 void (*res_free) (struct vmw_resource *res))
787{
788 struct vmw_resource *res = &stream->res;
789 int ret;
790
c0951b79
TH
791 ret = vmw_resource_init(dev_priv, res, false, res_free,
792 &vmw_stream_func);
fb1d9738
JB
793
794 if (unlikely(ret != 0)) {
795 if (res_free == NULL)
796 kfree(stream);
797 else
798 res_free(&stream->res);
799 return ret;
800 }
801
802 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
803 if (ret) {
804 vmw_resource_unreference(&res);
805 return ret;
806 }
807
808 DRM_INFO("%s: claimed\n", __func__);
809
810 vmw_resource_activate(&stream->res, vmw_stream_destroy);
811 return 0;
812}
813
fb1d9738
JB
814static void vmw_user_stream_free(struct vmw_resource *res)
815{
816 struct vmw_user_stream *stream =
817 container_of(res, struct vmw_user_stream, stream.res);
414ee50b 818 struct vmw_private *dev_priv = res->dev_priv;
fb1d9738 819
cdad0521 820 ttm_base_object_kfree(stream, base);
414ee50b
TH
821 ttm_mem_global_free(vmw_mem_glob(dev_priv),
822 vmw_user_stream_size);
fb1d9738
JB
823}
824
825/**
826 * This function is called when user space has no more references on the
827 * base object. It releases the base-object's reference on the resource object.
828 */
829
830static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
831{
832 struct ttm_base_object *base = *p_base;
833 struct vmw_user_stream *stream =
834 container_of(base, struct vmw_user_stream, base);
835 struct vmw_resource *res = &stream->stream.res;
836
837 *p_base = NULL;
838 vmw_resource_unreference(&res);
839}
840
841int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
842 struct drm_file *file_priv)
843{
844 struct vmw_private *dev_priv = vmw_priv(dev);
845 struct vmw_resource *res;
846 struct vmw_user_stream *stream;
847 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
848 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
c0951b79 849 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
fb1d9738
JB
850 int ret = 0;
851
c0951b79
TH
852
853 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
fb1d9738
JB
854 if (unlikely(res == NULL))
855 return -EINVAL;
856
857 if (res->res_free != &vmw_user_stream_free) {
858 ret = -EINVAL;
859 goto out;
860 }
861
862 stream = container_of(res, struct vmw_user_stream, stream.res);
863 if (stream->base.tfile != tfile) {
864 ret = -EINVAL;
865 goto out;
866 }
867
868 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
869out:
870 vmw_resource_unreference(&res);
871 return ret;
872}
873
874int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
875 struct drm_file *file_priv)
876{
877 struct vmw_private *dev_priv = vmw_priv(dev);
414ee50b 878 struct vmw_user_stream *stream;
fb1d9738
JB
879 struct vmw_resource *res;
880 struct vmw_resource *tmp;
881 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
882 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
883 int ret;
884
414ee50b
TH
885 /*
886 * Approximate idr memory usage with 128 bytes. It will be limited
887 * by maximum number_of streams anyway?
888 */
889
890 if (unlikely(vmw_user_stream_size == 0))
891 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
892
294adf7d 893 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
414ee50b
TH
894 if (unlikely(ret != 0))
895 return ret;
896
897 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
898 vmw_user_stream_size,
899 false, true);
ee511a83 900 ttm_read_unlock(&dev_priv->reservation_sem);
414ee50b
TH
901 if (unlikely(ret != 0)) {
902 if (ret != -ERESTARTSYS)
903 DRM_ERROR("Out of graphics memory for stream"
904 " creation.\n");
414ee50b 905
ee511a83
TH
906 goto out_ret;
907 }
414ee50b
TH
908
909 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
910 if (unlikely(stream == NULL)) {
911 ttm_mem_global_free(vmw_mem_glob(dev_priv),
912 vmw_user_stream_size);
913 ret = -ENOMEM;
ee511a83 914 goto out_ret;
414ee50b 915 }
fb1d9738
JB
916
917 res = &stream->stream.res;
918 stream->base.shareable = false;
919 stream->base.tfile = NULL;
920
414ee50b
TH
921 /*
922 * From here on, the destructor takes over resource freeing.
923 */
924
fb1d9738
JB
925 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
926 if (unlikely(ret != 0))
ee511a83 927 goto out_ret;
fb1d9738
JB
928
929 tmp = vmw_resource_reference(res);
930 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
931 &vmw_user_stream_base_release, NULL);
932
933 if (unlikely(ret != 0)) {
934 vmw_resource_unreference(&tmp);
935 goto out_err;
936 }
937
938 arg->stream_id = res->id;
939out_err:
940 vmw_resource_unreference(&res);
ee511a83 941out_ret:
fb1d9738
JB
942 return ret;
943}
944
945int vmw_user_stream_lookup(struct vmw_private *dev_priv,
946 struct ttm_object_file *tfile,
947 uint32_t *inout_id, struct vmw_resource **out)
948{
949 struct vmw_user_stream *stream;
950 struct vmw_resource *res;
951 int ret;
952
c0951b79
TH
953 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
954 *inout_id);
fb1d9738
JB
955 if (unlikely(res == NULL))
956 return -EINVAL;
957
958 if (res->res_free != &vmw_user_stream_free) {
959 ret = -EINVAL;
960 goto err_ref;
961 }
962
963 stream = container_of(res, struct vmw_user_stream, stream.res);
964 if (stream->base.tfile != tfile) {
965 ret = -EPERM;
966 goto err_ref;
967 }
968
969 *inout_id = stream->stream.stream_id;
970 *out = res;
971 return 0;
972err_ref:
973 vmw_resource_unreference(&res);
974 return ret;
975}
5e1782d2
DA
976
977
d69d51d7
TH
978/**
979 * vmw_dumb_create - Create a dumb kms buffer
980 *
981 * @file_priv: Pointer to a struct drm_file identifying the caller.
982 * @dev: Pointer to the drm device.
983 * @args: Pointer to a struct drm_mode_create_dumb structure
984 *
985 * This is a driver callback for the core drm create_dumb functionality.
986 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
987 * that the arguments have a different format.
988 */
5e1782d2
DA
989int vmw_dumb_create(struct drm_file *file_priv,
990 struct drm_device *dev,
991 struct drm_mode_create_dumb *args)
992{
993 struct vmw_private *dev_priv = vmw_priv(dev);
d69d51d7 994 struct vmw_dma_buffer *dma_buf;
5e1782d2
DA
995 int ret;
996
997 args->pitch = args->width * ((args->bpp + 7) / 8);
998 args->size = args->pitch * args->height;
999
294adf7d 1000 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
d69d51d7 1001 if (unlikely(ret != 0))
5e1782d2 1002 return ret;
5e1782d2 1003
d69d51d7
TH
1004 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1005 args->size, false, &args->handle,
1006 &dma_buf);
5e1782d2 1007 if (unlikely(ret != 0))
d69d51d7 1008 goto out_no_dmabuf;
5e1782d2 1009
d69d51d7 1010 vmw_dmabuf_unreference(&dma_buf);
5e1782d2 1011out_no_dmabuf:
294adf7d 1012 ttm_read_unlock(&dev_priv->reservation_sem);
5e1782d2
DA
1013 return ret;
1014}
1015
d69d51d7
TH
1016/**
1017 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1018 *
1019 * @file_priv: Pointer to a struct drm_file identifying the caller.
1020 * @dev: Pointer to the drm device.
1021 * @handle: Handle identifying the dumb buffer.
1022 * @offset: The address space offset returned.
1023 *
1024 * This is a driver callback for the core drm dumb_map_offset functionality.
1025 */
5e1782d2
DA
1026int vmw_dumb_map_offset(struct drm_file *file_priv,
1027 struct drm_device *dev, uint32_t handle,
1028 uint64_t *offset)
1029{
1030 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1031 struct vmw_dma_buffer *out_buf;
1032 int ret;
1033
1034 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1035 if (ret != 0)
1036 return -EINVAL;
1037
72525b3f 1038 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
5e1782d2
DA
1039 vmw_dmabuf_unreference(&out_buf);
1040 return 0;
1041}
1042
d69d51d7
TH
1043/**
1044 * vmw_dumb_destroy - Destroy a dumb boffer
1045 *
1046 * @file_priv: Pointer to a struct drm_file identifying the caller.
1047 * @dev: Pointer to the drm device.
1048 * @handle: Handle identifying the dumb buffer.
1049 *
1050 * This is a driver callback for the core drm dumb_destroy functionality.
1051 */
5e1782d2
DA
1052int vmw_dumb_destroy(struct drm_file *file_priv,
1053 struct drm_device *dev,
1054 uint32_t handle)
1055{
1056 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1057 handle, TTM_REF_USAGE);
1058}
c0951b79
TH
1059
1060/**
1061 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1062 *
1063 * @res: The resource for which to allocate a backup buffer.
1064 * @interruptible: Whether any sleeps during allocation should be
1065 * performed while interruptible.
1066 */
1067static int vmw_resource_buf_alloc(struct vmw_resource *res,
1068 bool interruptible)
1069{
1070 unsigned long size =
1071 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1072 struct vmw_dma_buffer *backup;
1073 int ret;
1074
1075 if (likely(res->backup)) {
1076 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1077 return 0;
1078 }
1079
1080 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1081 if (unlikely(backup == NULL))
1082 return -ENOMEM;
1083
1084 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1085 res->func->backup_placement,
1086 interruptible,
1087 &vmw_dmabuf_bo_free);
1088 if (unlikely(ret != 0))
1089 goto out_no_dmabuf;
1090
1091 res->backup = backup;
1092
1093out_no_dmabuf:
1094 return ret;
1095}
1096
1097/**
1098 * vmw_resource_do_validate - Make a resource up-to-date and visible
1099 * to the device.
1100 *
1101 * @res: The resource to make visible to the device.
1102 * @val_buf: Information about a buffer possibly
1103 * containing backup data if a bind operation is needed.
1104 *
1105 * On hardware resource shortage, this function returns -EBUSY and
1106 * should be retried once resources have been freed up.
1107 */
1108static int vmw_resource_do_validate(struct vmw_resource *res,
1109 struct ttm_validate_buffer *val_buf)
1110{
1111 int ret = 0;
1112 const struct vmw_res_func *func = res->func;
1113
1114 if (unlikely(res->id == -1)) {
1115 ret = func->create(res);
1116 if (unlikely(ret != 0))
1117 return ret;
1118 }
1119
1120 if (func->bind &&
1121 ((func->needs_backup && list_empty(&res->mob_head) &&
1122 val_buf->bo != NULL) ||
1123 (!func->needs_backup && val_buf->bo != NULL))) {
1124 ret = func->bind(res, val_buf);
1125 if (unlikely(ret != 0))
1126 goto out_bind_failed;
1127 if (func->needs_backup)
1128 list_add_tail(&res->mob_head, &res->backup->res_list);
1129 }
1130
1131 /*
1132 * Only do this on write operations, and move to
1133 * vmw_resource_unreserve if it can be called after
1134 * backup buffers have been unreserved. Otherwise
1135 * sort out locking.
1136 */
1137 res->res_dirty = true;
1138
1139 return 0;
1140
1141out_bind_failed:
1142 func->destroy(res);
1143
1144 return ret;
1145}
1146
1147/**
1148 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1149 * command submission.
1150 *
1151 * @res: Pointer to the struct vmw_resource to unreserve.
1152 * @new_backup: Pointer to new backup buffer if command submission
1153 * switched.
1154 * @new_backup_offset: New backup offset if @new_backup is !NULL.
1155 *
1156 * Currently unreserving a resource means putting it back on the device's
1157 * resource lru list, so that it can be evicted if necessary.
1158 */
1159void vmw_resource_unreserve(struct vmw_resource *res,
1160 struct vmw_dma_buffer *new_backup,
1161 unsigned long new_backup_offset)
1162{
1163 struct vmw_private *dev_priv = res->dev_priv;
1164
1165 if (!list_empty(&res->lru_head))
1166 return;
1167
1168 if (new_backup && new_backup != res->backup) {
1169
1170 if (res->backup) {
8bd4ce56 1171 lockdep_assert_held(&res->backup->base.resv->lock.base);
c0951b79
TH
1172 list_del_init(&res->mob_head);
1173 vmw_dmabuf_unreference(&res->backup);
1174 }
1175
1176 res->backup = vmw_dmabuf_reference(new_backup);
8bd4ce56 1177 lockdep_assert_held(&new_backup->base.resv->lock.base);
c0951b79
TH
1178 list_add_tail(&res->mob_head, &new_backup->res_list);
1179 }
1180 if (new_backup)
1181 res->backup_offset = new_backup_offset;
1182
ed93394c 1183 if (!res->func->may_evict || res->id == -1 || res->pin_count)
c0951b79
TH
1184 return;
1185
1186 write_lock(&dev_priv->resource_lock);
1187 list_add_tail(&res->lru_head,
1188 &res->dev_priv->res_lru[res->func->res_type]);
1189 write_unlock(&dev_priv->resource_lock);
1190}
1191
1192/**
1193 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1194 * for a resource and in that case, allocate
1195 * one, reserve and validate it.
1196 *
1197 * @res: The resource for which to allocate a backup buffer.
1198 * @interruptible: Whether any sleeps during allocation should be
1199 * performed while interruptible.
1200 * @val_buf: On successful return contains data about the
1201 * reserved and validated backup buffer.
1202 */
ecff665f
ML
1203static int
1204vmw_resource_check_buffer(struct vmw_resource *res,
ecff665f
ML
1205 bool interruptible,
1206 struct ttm_validate_buffer *val_buf)
c0951b79
TH
1207{
1208 struct list_head val_list;
1209 bool backup_dirty = false;
1210 int ret;
1211
1212 if (unlikely(res->backup == NULL)) {
1213 ret = vmw_resource_buf_alloc(res, interruptible);
1214 if (unlikely(ret != 0))
1215 return ret;
1216 }
1217
1218 INIT_LIST_HEAD(&val_list);
1219 val_buf->bo = ttm_bo_reference(&res->backup->base);
ae9c0af2 1220 val_buf->shared = false;
c0951b79 1221 list_add_tail(&val_buf->head, &val_list);
aa35071c 1222 ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
c0951b79
TH
1223 if (unlikely(ret != 0))
1224 goto out_no_reserve;
1225
1226 if (res->func->needs_backup && list_empty(&res->mob_head))
1227 return 0;
1228
1229 backup_dirty = res->backup_dirty;
1230 ret = ttm_bo_validate(&res->backup->base,
1231 res->func->backup_placement,
97a875cb 1232 true, false);
c0951b79
TH
1233
1234 if (unlikely(ret != 0))
1235 goto out_no_validate;
1236
1237 return 0;
1238
1239out_no_validate:
ac49251b 1240 ttm_eu_backoff_reservation(NULL, &val_list);
c0951b79
TH
1241out_no_reserve:
1242 ttm_bo_unref(&val_buf->bo);
1243 if (backup_dirty)
1244 vmw_dmabuf_unreference(&res->backup);
1245
1246 return ret;
1247}
1248
1249/**
1250 * vmw_resource_reserve - Reserve a resource for command submission
1251 *
1252 * @res: The resource to reserve.
1253 *
1254 * This function takes the resource off the LRU list and make sure
1255 * a backup buffer is present for guest-backed resources. However,
1256 * the buffer may not be bound to the resource at this point.
1257 *
1258 */
1a4b172a
TH
1259int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1260 bool no_backup)
c0951b79
TH
1261{
1262 struct vmw_private *dev_priv = res->dev_priv;
1263 int ret;
1264
1265 write_lock(&dev_priv->resource_lock);
1266 list_del_init(&res->lru_head);
1267 write_unlock(&dev_priv->resource_lock);
1268
1269 if (res->func->needs_backup && res->backup == NULL &&
1270 !no_backup) {
1a4b172a 1271 ret = vmw_resource_buf_alloc(res, interruptible);
c0951b79
TH
1272 if (unlikely(ret != 0))
1273 return ret;
1274 }
1275
1276 return 0;
1277}
1278
1279/**
1280 * vmw_resource_backoff_reservation - Unreserve and unreference a
1281 * backup buffer
1282 *.
1283 * @val_buf: Backup buffer information.
1284 */
ecff665f 1285static void
ac49251b 1286vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
c0951b79
TH
1287{
1288 struct list_head val_list;
1289
1290 if (likely(val_buf->bo == NULL))
1291 return;
1292
1293 INIT_LIST_HEAD(&val_list);
1294 list_add_tail(&val_buf->head, &val_list);
ac49251b 1295 ttm_eu_backoff_reservation(NULL, &val_list);
c0951b79
TH
1296 ttm_bo_unref(&val_buf->bo);
1297}
1298
1299/**
1300 * vmw_resource_do_evict - Evict a resource, and transfer its data
1301 * to a backup buffer.
1302 *
1303 * @res: The resource to evict.
ea029c28 1304 * @interruptible: Whether to wait interruptible.
c0951b79 1305 */
b9eb1a61 1306static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
c0951b79
TH
1307{
1308 struct ttm_validate_buffer val_buf;
1309 const struct vmw_res_func *func = res->func;
1310 int ret;
1311
1312 BUG_ON(!func->may_evict);
1313
1314 val_buf.bo = NULL;
ae9c0af2 1315 val_buf.shared = false;
ac49251b 1316 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
c0951b79
TH
1317 if (unlikely(ret != 0))
1318 return ret;
1319
1320 if (unlikely(func->unbind != NULL &&
1321 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1322 ret = func->unbind(res, res->res_dirty, &val_buf);
1323 if (unlikely(ret != 0))
1324 goto out_no_unbind;
1325 list_del_init(&res->mob_head);
1326 }
1327 ret = func->destroy(res);
1328 res->backup_dirty = true;
1329 res->res_dirty = false;
1330out_no_unbind:
ac49251b 1331 vmw_resource_backoff_reservation(&val_buf);
c0951b79
TH
1332
1333 return ret;
1334}
1335
1336
1337/**
1338 * vmw_resource_validate - Make a resource up-to-date and visible
1339 * to the device.
1340 *
1341 * @res: The resource to make visible to the device.
1342 *
1343 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1344 * be reserved and validated.
1345 * On hardware resource shortage, this function will repeatedly evict
1346 * resources of the same type until the validation succeeds.
1347 */
1348int vmw_resource_validate(struct vmw_resource *res)
1349{
1350 int ret;
1351 struct vmw_resource *evict_res;
1352 struct vmw_private *dev_priv = res->dev_priv;
1353 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1354 struct ttm_validate_buffer val_buf;
ea029c28 1355 unsigned err_count = 0;
c0951b79
TH
1356
1357 if (likely(!res->func->may_evict))
1358 return 0;
1359
1360 val_buf.bo = NULL;
ae9c0af2 1361 val_buf.shared = false;
c0951b79
TH
1362 if (res->backup)
1363 val_buf.bo = &res->backup->base;
1364 do {
1365 ret = vmw_resource_do_validate(res, &val_buf);
1366 if (likely(ret != -EBUSY))
1367 break;
1368
1369 write_lock(&dev_priv->resource_lock);
1370 if (list_empty(lru_list) || !res->func->may_evict) {
ea029c28 1371 DRM_ERROR("Out of device device resources "
c0951b79
TH
1372 "for %s.\n", res->func->type_name);
1373 ret = -EBUSY;
1374 write_unlock(&dev_priv->resource_lock);
1375 break;
1376 }
1377
1378 evict_res = vmw_resource_reference
1379 (list_first_entry(lru_list, struct vmw_resource,
1380 lru_head));
1381 list_del_init(&evict_res->lru_head);
1382
1383 write_unlock(&dev_priv->resource_lock);
ea029c28
TH
1384
1385 ret = vmw_resource_do_evict(evict_res, true);
1386 if (unlikely(ret != 0)) {
1387 write_lock(&dev_priv->resource_lock);
1388 list_add_tail(&evict_res->lru_head, lru_list);
1389 write_unlock(&dev_priv->resource_lock);
1390 if (ret == -ERESTARTSYS ||
1391 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1392 vmw_resource_unreference(&evict_res);
1393 goto out_no_validate;
1394 }
1395 }
1396
c0951b79
TH
1397 vmw_resource_unreference(&evict_res);
1398 } while (1);
1399
1400 if (unlikely(ret != 0))
1401 goto out_no_validate;
1402 else if (!res->func->needs_backup && res->backup) {
1403 list_del_init(&res->mob_head);
1404 vmw_dmabuf_unreference(&res->backup);
1405 }
1406
1407 return 0;
1408
1409out_no_validate:
1410 return ret;
1411}
1412
1413/**
1414 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1415 * object without unreserving it.
1416 *
1417 * @bo: Pointer to the struct ttm_buffer_object to fence.
1418 * @fence: Pointer to the fence. If NULL, this function will
1419 * insert a fence into the command stream..
1420 *
1421 * Contrary to the ttm_eu version of this function, it takes only
1422 * a single buffer object instead of a list, and it also doesn't
1423 * unreserve the buffer object, which needs to be done separately.
1424 */
1425void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1426 struct vmw_fence_obj *fence)
1427{
1428 struct ttm_bo_device *bdev = bo->bdev;
f2c24b83 1429
c0951b79
TH
1430 struct vmw_private *dev_priv =
1431 container_of(bdev, struct vmw_private, bdev);
1432
2298e804 1433 if (fence == NULL) {
c0951b79 1434 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
f2c24b83
ML
1435 reservation_object_add_excl_fence(bo->resv, &fence->base);
1436 fence_put(&fence->base);
2298e804 1437 } else
f2c24b83 1438 reservation_object_add_excl_fence(bo->resv, &fence->base);
c0951b79
TH
1439}
1440
1441/**
1442 * vmw_resource_move_notify - TTM move_notify_callback
1443 *
1444 * @bo: The TTM buffer object about to move.
1445 * @mem: The truct ttm_mem_reg indicating to what memory
1446 * region the move is taking place.
1447 *
f468911f
TH
1448 * Evicts the Guest Backed hardware resource if the backup
1449 * buffer is being moved out of MOB memory.
1450 * Note that this function should not race with the resource
1451 * validation code as long as it accesses only members of struct
1452 * resource that remain static while bo::res is !NULL and
1453 * while we have @bo reserved. struct resource::backup is *not* a
1454 * static member. The resource validation code will take care
1455 * to set @bo::res to NULL, while having @bo reserved when the
1456 * buffer is no longer bound to the resource, so @bo:res can be
1457 * used to determine whether there is a need to unbind and whether
1458 * it is safe to unbind.
c0951b79
TH
1459 */
1460void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1461 struct ttm_mem_reg *mem)
1462{
f468911f
TH
1463 struct vmw_dma_buffer *dma_buf;
1464
1465 if (mem == NULL)
1466 return;
1467
1468 if (bo->destroy != vmw_dmabuf_bo_free &&
1469 bo->destroy != vmw_user_dmabuf_destroy)
1470 return;
1471
1472 dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1473
1474 if (mem->mem_type != VMW_PL_MOB) {
1475 struct vmw_resource *res, *n;
f468911f
TH
1476 struct ttm_validate_buffer val_buf;
1477
1478 val_buf.bo = bo;
ae9c0af2 1479 val_buf.shared = false;
f468911f
TH
1480
1481 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1482
1483 if (unlikely(res->func->unbind == NULL))
1484 continue;
1485
1486 (void) res->func->unbind(res, true, &val_buf);
1487 res->backup_dirty = true;
1488 res->res_dirty = false;
1489 list_del_init(&res->mob_head);
1490 }
1491
f468911f 1492 (void) ttm_bo_wait(bo, false, false, false);
f468911f 1493 }
c0951b79
TH
1494}
1495
1496/**
1497 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1498 *
1499 * @res: The resource being queried.
1500 */
1501bool vmw_resource_needs_backup(const struct vmw_resource *res)
1502{
1503 return res->func->needs_backup;
1504}
1505
1506/**
1507 * vmw_resource_evict_type - Evict all resources of a specific type
1508 *
1509 * @dev_priv: Pointer to a device private struct
1510 * @type: The resource type to evict
1511 *
1512 * To avoid thrashing starvation or as part of the hibernation sequence,
ea029c28 1513 * try to evict all evictable resources of a specific type.
c0951b79
TH
1514 */
1515static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1516 enum vmw_res_type type)
1517{
1518 struct list_head *lru_list = &dev_priv->res_lru[type];
1519 struct vmw_resource *evict_res;
ea029c28
TH
1520 unsigned err_count = 0;
1521 int ret;
c0951b79
TH
1522
1523 do {
1524 write_lock(&dev_priv->resource_lock);
1525
1526 if (list_empty(lru_list))
1527 goto out_unlock;
1528
1529 evict_res = vmw_resource_reference(
1530 list_first_entry(lru_list, struct vmw_resource,
1531 lru_head));
1532 list_del_init(&evict_res->lru_head);
1533 write_unlock(&dev_priv->resource_lock);
ea029c28
TH
1534
1535 ret = vmw_resource_do_evict(evict_res, false);
1536 if (unlikely(ret != 0)) {
1537 write_lock(&dev_priv->resource_lock);
1538 list_add_tail(&evict_res->lru_head, lru_list);
1539 write_unlock(&dev_priv->resource_lock);
1540 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1541 vmw_resource_unreference(&evict_res);
1542 return;
1543 }
1544 }
1545
c0951b79
TH
1546 vmw_resource_unreference(&evict_res);
1547 } while (1);
1548
1549out_unlock:
1550 write_unlock(&dev_priv->resource_lock);
1551}
1552
1553/**
1554 * vmw_resource_evict_all - Evict all evictable resources
1555 *
1556 * @dev_priv: Pointer to a device private struct
1557 *
1558 * To avoid thrashing starvation or as part of the hibernation sequence,
1559 * evict all evictable resources. In particular this means that all
1560 * guest-backed resources that are registered with the device are
1561 * evicted and the OTable becomes clean.
1562 */
1563void vmw_resource_evict_all(struct vmw_private *dev_priv)
1564{
1565 enum vmw_res_type type;
1566
1567 mutex_lock(&dev_priv->cmdbuf_mutex);
1568
1569 for (type = 0; type < vmw_res_max; ++type)
1570 vmw_resource_evict_type(dev_priv, type);
1571
1572 mutex_unlock(&dev_priv->cmdbuf_mutex);
1573}
ed93394c
TH
1574
1575/**
1576 * vmw_resource_pin - Add a pin reference on a resource
1577 *
1578 * @res: The resource to add a pin reference on
1579 *
1580 * This function adds a pin reference, and if needed validates the resource.
1581 * Having a pin reference means that the resource can never be evicted, and
1582 * its id will never change as long as there is a pin reference.
1583 * This function returns 0 on success and a negative error code on failure.
1584 */
1a4b172a 1585int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
ed93394c
TH
1586{
1587 struct vmw_private *dev_priv = res->dev_priv;
1588 int ret;
1589
1a4b172a 1590 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
ed93394c 1591 mutex_lock(&dev_priv->cmdbuf_mutex);
1a4b172a 1592 ret = vmw_resource_reserve(res, interruptible, false);
ed93394c
TH
1593 if (ret)
1594 goto out_no_reserve;
1595
1596 if (res->pin_count == 0) {
459d0fa7 1597 struct vmw_dma_buffer *vbo = NULL;
ed93394c
TH
1598
1599 if (res->backup) {
459d0fa7
TH
1600 vbo = res->backup;
1601
1a4b172a
TH
1602 ttm_bo_reserve(&vbo->base, interruptible, false, false,
1603 NULL);
459d0fa7
TH
1604 if (!vbo->pin_count) {
1605 ret = ttm_bo_validate
1606 (&vbo->base,
1607 res->func->backup_placement,
1a4b172a 1608 interruptible, false);
459d0fa7
TH
1609 if (ret) {
1610 ttm_bo_unreserve(&vbo->base);
1611 goto out_no_validate;
1612 }
ed93394c
TH
1613 }
1614
1615 /* Do we really need to pin the MOB as well? */
459d0fa7 1616 vmw_bo_pin_reserved(vbo, true);
ed93394c
TH
1617 }
1618 ret = vmw_resource_validate(res);
459d0fa7
TH
1619 if (vbo)
1620 ttm_bo_unreserve(&vbo->base);
ed93394c
TH
1621 if (ret)
1622 goto out_no_validate;
1623 }
1624 res->pin_count++;
1625
1626out_no_validate:
1627 vmw_resource_unreserve(res, NULL, 0UL);
1628out_no_reserve:
1629 mutex_unlock(&dev_priv->cmdbuf_mutex);
1630 ttm_write_unlock(&dev_priv->reservation_sem);
1631
1632 return ret;
1633}
1634
1635/**
1636 * vmw_resource_unpin - Remove a pin reference from a resource
1637 *
1638 * @res: The resource to remove a pin reference from
1639 *
1640 * Having a pin reference means that the resource can never be evicted, and
1641 * its id will never change as long as there is a pin reference.
1642 */
1643void vmw_resource_unpin(struct vmw_resource *res)
1644{
1645 struct vmw_private *dev_priv = res->dev_priv;
1646 int ret;
1647
1648 ttm_read_lock(&dev_priv->reservation_sem, false);
1649 mutex_lock(&dev_priv->cmdbuf_mutex);
1650
1a4b172a 1651 ret = vmw_resource_reserve(res, false, true);
ed93394c
TH
1652 WARN_ON(ret);
1653
1654 WARN_ON(res->pin_count == 0);
1655 if (--res->pin_count == 0 && res->backup) {
459d0fa7 1656 struct vmw_dma_buffer *vbo = res->backup;
ed93394c 1657
459d0fa7
TH
1658 ttm_bo_reserve(&vbo->base, false, false, false, NULL);
1659 vmw_bo_pin_reserved(vbo, false);
1660 ttm_bo_unreserve(&vbo->base);
ed93394c
TH
1661 }
1662
1663 vmw_resource_unreserve(res, NULL, 0UL);
1664
1665 mutex_unlock(&dev_priv->cmdbuf_mutex);
1666 ttm_read_unlock(&dev_priv->reservation_sem);
1667}