drm/vmwgfx: Update device includes for DX device functionality
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
760285e7
DH
30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
fb1d9738 32
c0951b79
TH
33#define VMW_RES_HT_ORDER 12
34
35/**
36 * struct vmw_resource_relocation - Relocation info for resources
37 *
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
42 */
43struct vmw_resource_relocation {
44 struct list_head head;
45 const struct vmw_resource *res;
46 unsigned long offset;
47};
48
49/**
50 * struct vmw_resource_val_node - Validation info for resources
51 *
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
b5c3b1a6
TH
57 * @staged_bindings: If @res is a context, tracks bindings set up during
58 * the command batch. Otherwise NULL.
c0951b79
TH
59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60 * @first_usage: Set to true the first time the resource is referenced in
61 * the command stream.
62 * @no_buffer_needed: Resources do not need to allocate buffer backup on
63 * reservation. The command stream will provide one.
64 */
65struct vmw_resource_val_node {
66 struct list_head head;
67 struct drm_hash_item hash;
68 struct vmw_resource *res;
69 struct vmw_dma_buffer *new_backup;
b5c3b1a6 70 struct vmw_ctx_binding_state *staged_bindings;
c0951b79
TH
71 unsigned long new_backup_offset;
72 bool first_usage;
73 bool no_buffer_needed;
74};
75
c373d4ea
TH
76/**
77 * struct vmw_cmd_entry - Describe a command for the verifier
78 *
79 * @user_allow: Whether allowed from the execbuf ioctl.
80 * @gb_disable: Whether disabled if guest-backed objects are available.
81 * @gb_enable: Whether enabled iff guest-backed objects are available.
82 */
83struct vmw_cmd_entry {
84 int (*func) (struct vmw_private *, struct vmw_sw_context *,
85 SVGA3dCmdHeader *);
86 bool user_allow;
87 bool gb_disable;
88 bool gb_enable;
89};
90
91#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 (_gb_disable), (_gb_enable)}
94
c0951b79
TH
95/**
96 * vmw_resource_unreserve - unreserve resources previously reserved for
97 * command submission.
98 *
99 * @list_head: list of resources to unreserve.
100 * @backoff: Whether command submission failed.
101 */
102static void vmw_resource_list_unreserve(struct list_head *list,
103 bool backoff)
104{
105 struct vmw_resource_val_node *val;
106
107 list_for_each_entry(val, list, head) {
108 struct vmw_resource *res = val->res;
109 struct vmw_dma_buffer *new_backup =
110 backoff ? NULL : val->new_backup;
111
173fb7d4
TH
112 /*
113 * Transfer staged context bindings to the
114 * persistent context binding tracker.
115 */
b5c3b1a6 116 if (unlikely(val->staged_bindings)) {
76c7d18b
TH
117 if (!backoff) {
118 vmw_context_binding_state_transfer
119 (val->res, val->staged_bindings);
120 }
b5c3b1a6
TH
121 kfree(val->staged_bindings);
122 val->staged_bindings = NULL;
123 }
c0951b79
TH
124 vmw_resource_unreserve(res, new_backup,
125 val->new_backup_offset);
126 vmw_dmabuf_unreference(&val->new_backup);
127 }
128}
129
130
131/**
132 * vmw_resource_val_add - Add a resource to the software context's
133 * resource list if it's not already on it.
134 *
135 * @sw_context: Pointer to the software context.
136 * @res: Pointer to the resource.
137 * @p_node On successful return points to a valid pointer to a
138 * struct vmw_resource_val_node, if non-NULL on entry.
139 */
140static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141 struct vmw_resource *res,
142 struct vmw_resource_val_node **p_node)
143{
144 struct vmw_resource_val_node *node;
145 struct drm_hash_item *hash;
146 int ret;
147
148 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
149 &hash) == 0)) {
150 node = container_of(hash, struct vmw_resource_val_node, hash);
151 node->first_usage = false;
152 if (unlikely(p_node != NULL))
153 *p_node = node;
154 return 0;
155 }
156
157 node = kzalloc(sizeof(*node), GFP_KERNEL);
158 if (unlikely(node == NULL)) {
159 DRM_ERROR("Failed to allocate a resource validation "
160 "entry.\n");
161 return -ENOMEM;
162 }
163
164 node->hash.key = (unsigned long) res;
165 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166 if (unlikely(ret != 0)) {
167 DRM_ERROR("Failed to initialize a resource validation "
168 "entry.\n");
169 kfree(node);
170 return ret;
171 }
172 list_add_tail(&node->head, &sw_context->resource_list);
173 node->res = vmw_resource_reference(res);
174 node->first_usage = true;
175
176 if (unlikely(p_node != NULL))
177 *p_node = node;
178
179 return 0;
180}
181
30f82d81
TH
182/**
183 * vmw_resource_context_res_add - Put resources previously bound to a context on
184 * the validation list
185 *
186 * @dev_priv: Pointer to a device private structure
187 * @sw_context: Pointer to a software context used for this command submission
188 * @ctx: Pointer to the context resource
189 *
190 * This function puts all resources that were previously bound to @ctx on
191 * the resource validation list. This is part of the context state reemission
192 */
193static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194 struct vmw_sw_context *sw_context,
195 struct vmw_resource *ctx)
196{
197 struct list_head *binding_list;
198 struct vmw_ctx_binding *entry;
199 int ret = 0;
200 struct vmw_resource *res;
201
202 mutex_lock(&dev_priv->binding_mutex);
203 binding_list = vmw_context_binding_list(ctx);
204
205 list_for_each_entry(entry, binding_list, ctx_list) {
206 res = vmw_resource_reference_unless_doomed(entry->bi.res);
207 if (unlikely(res == NULL))
208 continue;
209
210 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211 vmw_resource_unreference(&res);
212 if (unlikely(ret != 0))
213 break;
214 }
215
216 mutex_unlock(&dev_priv->binding_mutex);
217 return ret;
218}
219
c0951b79
TH
220/**
221 * vmw_resource_relocation_add - Add a relocation to the relocation list
222 *
223 * @list: Pointer to head of relocation list.
224 * @res: The resource.
225 * @offset: Offset into the command buffer currently being parsed where the
226 * id that needs fixup is located. Granularity is 4 bytes.
227 */
228static int vmw_resource_relocation_add(struct list_head *list,
229 const struct vmw_resource *res,
230 unsigned long offset)
231{
232 struct vmw_resource_relocation *rel;
233
234 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
235 if (unlikely(rel == NULL)) {
236 DRM_ERROR("Failed to allocate a resource relocation.\n");
237 return -ENOMEM;
238 }
239
240 rel->res = res;
241 rel->offset = offset;
242 list_add_tail(&rel->head, list);
243
244 return 0;
245}
246
247/**
248 * vmw_resource_relocations_free - Free all relocations on a list
249 *
250 * @list: Pointer to the head of the relocation list.
251 */
252static void vmw_resource_relocations_free(struct list_head *list)
253{
254 struct vmw_resource_relocation *rel, *n;
255
256 list_for_each_entry_safe(rel, n, list, head) {
257 list_del(&rel->head);
258 kfree(rel);
259 }
260}
261
262/**
263 * vmw_resource_relocations_apply - Apply all relocations on a list
264 *
265 * @cb: Pointer to the start of the command buffer bein patch. This need
266 * not be the same buffer as the one being parsed when the relocation
267 * list was built, but the contents must be the same modulo the
268 * resource ids.
269 * @list: Pointer to the head of the relocation list.
270 */
271static void vmw_resource_relocations_apply(uint32_t *cb,
272 struct list_head *list)
273{
274 struct vmw_resource_relocation *rel;
275
d5bde956
TH
276 list_for_each_entry(rel, list, head) {
277 if (likely(rel->res != NULL))
278 cb[rel->offset] = rel->res->id;
279 else
280 cb[rel->offset] = SVGA_3D_CMD_NOP;
281 }
c0951b79
TH
282}
283
fb1d9738
JB
284static int vmw_cmd_invalid(struct vmw_private *dev_priv,
285 struct vmw_sw_context *sw_context,
286 SVGA3dCmdHeader *header)
287{
288 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
289}
290
291static int vmw_cmd_ok(struct vmw_private *dev_priv,
292 struct vmw_sw_context *sw_context,
293 SVGA3dCmdHeader *header)
294{
295 return 0;
296}
297
e2fa3a76
TH
298/**
299 * vmw_bo_to_validate_list - add a bo to a validate list
300 *
301 * @sw_context: The software context used for this command submission batch.
302 * @bo: The buffer object to add.
96c5f0df 303 * @validate_as_mob: Validate this buffer as a MOB.
e2fa3a76
TH
304 * @p_val_node: If non-NULL Will be updated with the validate node number
305 * on return.
306 *
307 * Returns -EINVAL if the limit of number of buffer objects per command
308 * submission is reached.
309 */
310static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
459d0fa7 311 struct vmw_dma_buffer *vbo,
96c5f0df 312 bool validate_as_mob,
e2fa3a76
TH
313 uint32_t *p_val_node)
314{
315 uint32_t val_node;
c0951b79 316 struct vmw_validate_buffer *vval_buf;
e2fa3a76 317 struct ttm_validate_buffer *val_buf;
c0951b79
TH
318 struct drm_hash_item *hash;
319 int ret;
e2fa3a76 320
459d0fa7 321 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
c0951b79
TH
322 &hash) == 0)) {
323 vval_buf = container_of(hash, struct vmw_validate_buffer,
324 hash);
96c5f0df
TH
325 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
326 DRM_ERROR("Inconsistent buffer usage.\n");
327 return -EINVAL;
328 }
c0951b79
TH
329 val_buf = &vval_buf->base;
330 val_node = vval_buf - sw_context->val_bufs;
331 } else {
332 val_node = sw_context->cur_val_buf;
333 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
334 DRM_ERROR("Max number of DMA buffers per submission "
335 "exceeded.\n");
336 return -EINVAL;
337 }
338 vval_buf = &sw_context->val_bufs[val_node];
459d0fa7 339 vval_buf->hash.key = (unsigned long) vbo;
c0951b79
TH
340 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341 if (unlikely(ret != 0)) {
342 DRM_ERROR("Failed to initialize a buffer validation "
343 "entry.\n");
344 return ret;
345 }
346 ++sw_context->cur_val_buf;
347 val_buf = &vval_buf->base;
459d0fa7 348 val_buf->bo = ttm_bo_reference(&vbo->base);
ae9c0af2 349 val_buf->shared = false;
e2fa3a76 350 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
96c5f0df 351 vval_buf->validate_as_mob = validate_as_mob;
e2fa3a76
TH
352 }
353
e2fa3a76
TH
354 if (p_val_node)
355 *p_val_node = val_node;
356
357 return 0;
358}
359
c0951b79
TH
360/**
361 * vmw_resources_reserve - Reserve all resources on the sw_context's
362 * resource list.
363 *
364 * @sw_context: Pointer to the software context.
365 *
366 * Note that since vmware's command submission currently is protected by
367 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
368 * since only a single thread at once will attempt this.
369 */
370static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
fb1d9738 371{
c0951b79 372 struct vmw_resource_val_node *val;
fb1d9738
JB
373 int ret;
374
c0951b79
TH
375 list_for_each_entry(val, &sw_context->resource_list, head) {
376 struct vmw_resource *res = val->res;
fb1d9738 377
1a4b172a 378 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
c0951b79
TH
379 if (unlikely(ret != 0))
380 return ret;
381
382 if (res->backup) {
459d0fa7 383 struct vmw_dma_buffer *vbo = res->backup;
c0951b79
TH
384
385 ret = vmw_bo_to_validate_list
459d0fa7 386 (sw_context, vbo,
96c5f0df 387 vmw_resource_needs_backup(res), NULL);
c0951b79
TH
388
389 if (unlikely(ret != 0))
390 return ret;
391 }
fb1d9738 392 }
c0951b79
TH
393 return 0;
394}
fb1d9738 395
c0951b79
TH
396/**
397 * vmw_resources_validate - Validate all resources on the sw_context's
398 * resource list.
399 *
400 * @sw_context: Pointer to the software context.
401 *
402 * Before this function is called, all resource backup buffers must have
403 * been validated.
404 */
405static int vmw_resources_validate(struct vmw_sw_context *sw_context)
406{
407 struct vmw_resource_val_node *val;
408 int ret;
409
410 list_for_each_entry(val, &sw_context->resource_list, head) {
411 struct vmw_resource *res = val->res;
f18c8840 412
c0951b79
TH
413 ret = vmw_resource_validate(res);
414 if (unlikely(ret != 0)) {
415 if (ret != -ERESTARTSYS)
416 DRM_ERROR("Failed to validate resource.\n");
417 return ret;
418 }
419 }
f18c8840 420 return 0;
fb1d9738
JB
421}
422
18e4a466
TH
423
424/**
425 * vmw_cmd_res_reloc_add - Add a resource to a software context's
426 * relocation- and validation lists.
427 *
428 * @dev_priv: Pointer to a struct vmw_private identifying the device.
429 * @sw_context: Pointer to the software context.
430 * @res_type: Resource type.
431 * @id_loc: Pointer to where the id that needs translation is located.
432 * @res: Valid pointer to a struct vmw_resource.
433 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
434 * used for this resource is returned here.
435 */
436static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
437 struct vmw_sw_context *sw_context,
438 enum vmw_res_type res_type,
439 uint32_t *id_loc,
440 struct vmw_resource *res,
441 struct vmw_resource_val_node **p_val)
442{
443 int ret;
444 struct vmw_resource_val_node *node;
445
446 *p_val = NULL;
447 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
448 res,
449 id_loc - sw_context->buf_start);
450 if (unlikely(ret != 0))
9f9cb84f 451 return ret;
18e4a466
TH
452
453 ret = vmw_resource_val_add(sw_context, res, &node);
454 if (unlikely(ret != 0))
9f9cb84f 455 return ret;
18e4a466
TH
456
457 if (res_type == vmw_res_context && dev_priv->has_mob &&
458 node->first_usage) {
459
460 /*
461 * Put contexts first on the list to be able to exit
462 * list traversal for contexts early.
463 */
464 list_del(&node->head);
465 list_add(&node->head, &sw_context->resource_list);
466
467 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
468 if (unlikely(ret != 0))
9f9cb84f 469 return ret;
18e4a466
TH
470 node->staged_bindings =
471 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
472 if (node->staged_bindings == NULL) {
473 DRM_ERROR("Failed to allocate context binding "
474 "information.\n");
9f9cb84f 475 return -ENOMEM;
18e4a466
TH
476 }
477 INIT_LIST_HEAD(&node->staged_bindings->list);
478 }
479
480 if (p_val)
481 *p_val = node;
482
9f9cb84f 483 return 0;
18e4a466
TH
484}
485
486
c0951b79 487/**
18e4a466 488 * vmw_cmd_res_check - Check that a resource is present and if so, put it
c0951b79
TH
489 * on the resource validate list unless it's already there.
490 *
491 * @dev_priv: Pointer to a device private structure.
492 * @sw_context: Pointer to the software context.
493 * @res_type: Resource type.
494 * @converter: User-space visisble type specific information.
d5bde956 495 * @id_loc: Pointer to the location in the command buffer currently being
c0951b79 496 * parsed from where the user-space resource id handle is located.
d5bde956
TH
497 * @p_val: Pointer to pointer to resource validalidation node. Populated
498 * on exit.
c0951b79 499 */
d5bde956 500static int
18e4a466
TH
501vmw_cmd_res_check(struct vmw_private *dev_priv,
502 struct vmw_sw_context *sw_context,
503 enum vmw_res_type res_type,
504 const struct vmw_user_resource_conv *converter,
505 uint32_t *id_loc,
506 struct vmw_resource_val_node **p_val)
fb1d9738 507{
c0951b79
TH
508 struct vmw_res_cache_entry *rcache =
509 &sw_context->res_cache[res_type];
be38ab6e 510 struct vmw_resource *res;
c0951b79
TH
511 struct vmw_resource_val_node *node;
512 int ret;
be38ab6e 513
18e4a466 514 if (*id_loc == SVGA3D_INVALID_ID) {
b5c3b1a6
TH
515 if (p_val)
516 *p_val = NULL;
517 if (res_type == vmw_res_context) {
518 DRM_ERROR("Illegal context invalid id.\n");
519 return -EINVAL;
520 }
7a73ba74 521 return 0;
b5c3b1a6 522 }
7a73ba74 523
c0951b79
TH
524 /*
525 * Fastpath in case of repeated commands referencing the same
526 * resource
527 */
7a73ba74 528
18e4a466 529 if (likely(rcache->valid && *id_loc == rcache->handle)) {
c0951b79
TH
530 const struct vmw_resource *res = rcache->res;
531
532 rcache->node->first_usage = false;
533 if (p_val)
534 *p_val = rcache->node;
535
536 return vmw_resource_relocation_add
537 (&sw_context->res_relocations, res,
d5bde956 538 id_loc - sw_context->buf_start);
be38ab6e
TH
539 }
540
c0951b79 541 ret = vmw_user_resource_lookup_handle(dev_priv,
d5bde956 542 sw_context->fp->tfile,
18e4a466 543 *id_loc,
c0951b79
TH
544 converter,
545 &res);
5bb39e81 546 if (unlikely(ret != 0)) {
c0951b79 547 DRM_ERROR("Could not find or use resource 0x%08x.\n",
18e4a466 548 (unsigned) *id_loc);
c0951b79 549 dump_stack();
5bb39e81
TH
550 return ret;
551 }
552
c0951b79
TH
553 rcache->valid = true;
554 rcache->res = res;
18e4a466 555 rcache->handle = *id_loc;
c0951b79 556
18e4a466
TH
557 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
558 res, &node);
c0951b79
TH
559 if (unlikely(ret != 0))
560 goto out_no_reloc;
f18c8840 561
c0951b79
TH
562 rcache->node = node;
563 if (p_val)
564 *p_val = node;
565 vmw_resource_unreference(&res);
f18c8840 566 return 0;
c0951b79
TH
567
568out_no_reloc:
569 BUG_ON(sw_context->error_resource != NULL);
570 sw_context->error_resource = res;
571
572 return ret;
fb1d9738
JB
573}
574
30f82d81
TH
575/**
576 * vmw_rebind_contexts - Rebind all resources previously bound to
577 * referenced contexts.
578 *
579 * @sw_context: Pointer to the software context.
580 *
581 * Rebind context binding points that have been scrubbed because of eviction.
582 */
583static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
584{
585 struct vmw_resource_val_node *val;
586 int ret;
587
588 list_for_each_entry(val, &sw_context->resource_list, head) {
18e4a466
TH
589 if (unlikely(!val->staged_bindings))
590 break;
30f82d81
TH
591
592 ret = vmw_context_rebind_all(val->res);
593 if (unlikely(ret != 0)) {
594 if (ret != -ERESTARTSYS)
595 DRM_ERROR("Failed to rebind context.\n");
596 return ret;
597 }
598 }
599
600 return 0;
601}
602
c0951b79
TH
603/**
604 * vmw_cmd_cid_check - Check a command header for valid context information.
605 *
606 * @dev_priv: Pointer to a device private structure.
607 * @sw_context: Pointer to the software context.
608 * @header: A command header with an embedded user-space context handle.
609 *
610 * Convenience function: Call vmw_cmd_res_check with the user-space context
611 * handle embedded in @header.
612 */
613static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
614 struct vmw_sw_context *sw_context,
615 SVGA3dCmdHeader *header)
616{
617 struct vmw_cid_cmd {
618 SVGA3dCmdHeader header;
8e67bbbc 619 uint32_t cid;
c0951b79
TH
620 } *cmd;
621
622 cmd = container_of(header, struct vmw_cid_cmd, header);
623 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
624 user_context_converter, &cmd->cid, NULL);
625}
fb1d9738
JB
626
627static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
628 struct vmw_sw_context *sw_context,
629 SVGA3dCmdHeader *header)
630{
631 struct vmw_sid_cmd {
632 SVGA3dCmdHeader header;
633 SVGA3dCmdSetRenderTarget body;
634 } *cmd;
b5c3b1a6 635 struct vmw_resource_val_node *ctx_node;
173fb7d4 636 struct vmw_resource_val_node *res_node;
fb1d9738
JB
637 int ret;
638
b5c3b1a6
TH
639 cmd = container_of(header, struct vmw_sid_cmd, header);
640
641 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
642 user_context_converter, &cmd->body.cid,
643 &ctx_node);
fb1d9738
JB
644 if (unlikely(ret != 0))
645 return ret;
646
c0951b79
TH
647 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
648 user_surface_converter,
173fb7d4 649 &cmd->body.target.sid, &res_node);
b5c3b1a6
TH
650 if (unlikely(ret != 0))
651 return ret;
652
653 if (dev_priv->has_mob) {
654 struct vmw_ctx_bindinfo bi;
655
656 bi.ctx = ctx_node->res;
173fb7d4 657 bi.res = res_node ? res_node->res : NULL;
b5c3b1a6
TH
658 bi.bt = vmw_ctx_binding_rt;
659 bi.i1.rt_type = cmd->body.type;
660 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
661 }
662
663 return 0;
fb1d9738
JB
664}
665
666static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
667 struct vmw_sw_context *sw_context,
668 SVGA3dCmdHeader *header)
669{
670 struct vmw_sid_cmd {
671 SVGA3dCmdHeader header;
672 SVGA3dCmdSurfaceCopy body;
673 } *cmd;
674 int ret;
675
676 cmd = container_of(header, struct vmw_sid_cmd, header);
c9146cd9 677
6bf6bf03
TH
678 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
679 user_surface_converter,
680 &cmd->body.src.sid, NULL);
681 if (ret)
682 return ret;
c9146cd9 683
c0951b79
TH
684 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
685 user_surface_converter,
686 &cmd->body.dest.sid, NULL);
fb1d9738
JB
687}
688
689static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
690 struct vmw_sw_context *sw_context,
691 SVGA3dCmdHeader *header)
692{
693 struct vmw_sid_cmd {
694 SVGA3dCmdHeader header;
695 SVGA3dCmdSurfaceStretchBlt body;
696 } *cmd;
697 int ret;
698
699 cmd = container_of(header, struct vmw_sid_cmd, header);
c0951b79
TH
700 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
701 user_surface_converter,
702 &cmd->body.src.sid, NULL);
fb1d9738
JB
703 if (unlikely(ret != 0))
704 return ret;
c0951b79
TH
705 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
706 user_surface_converter,
707 &cmd->body.dest.sid, NULL);
fb1d9738
JB
708}
709
710static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
711 struct vmw_sw_context *sw_context,
712 SVGA3dCmdHeader *header)
713{
714 struct vmw_sid_cmd {
715 SVGA3dCmdHeader header;
716 SVGA3dCmdBlitSurfaceToScreen body;
717 } *cmd;
718
719 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6 720
c0951b79
TH
721 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
722 user_surface_converter,
723 &cmd->body.srcImage.sid, NULL);
fb1d9738
JB
724}
725
726static int vmw_cmd_present_check(struct vmw_private *dev_priv,
727 struct vmw_sw_context *sw_context,
728 SVGA3dCmdHeader *header)
729{
730 struct vmw_sid_cmd {
731 SVGA3dCmdHeader header;
732 SVGA3dCmdPresent body;
733 } *cmd;
734
5bb39e81 735
fb1d9738 736 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6 737
c0951b79
TH
738 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
739 user_surface_converter, &cmd->body.sid,
740 NULL);
fb1d9738
JB
741}
742
e2fa3a76
TH
743/**
744 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
745 *
746 * @dev_priv: The device private structure.
e2fa3a76
TH
747 * @new_query_bo: The new buffer holding query results.
748 * @sw_context: The software context used for this command submission.
749 *
750 * This function checks whether @new_query_bo is suitable for holding
751 * query results, and if another buffer currently is pinned for query
752 * results. If so, the function prepares the state of @sw_context for
753 * switching pinned buffers after successful submission of the current
c0951b79 754 * command batch.
e2fa3a76
TH
755 */
756static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
459d0fa7 757 struct vmw_dma_buffer *new_query_bo,
e2fa3a76
TH
758 struct vmw_sw_context *sw_context)
759{
c0951b79
TH
760 struct vmw_res_cache_entry *ctx_entry =
761 &sw_context->res_cache[vmw_res_context];
e2fa3a76 762 int ret;
c0951b79
TH
763
764 BUG_ON(!ctx_entry->valid);
765 sw_context->last_query_ctx = ctx_entry->res;
e2fa3a76
TH
766
767 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
768
459d0fa7 769 if (unlikely(new_query_bo->base.num_pages > 4)) {
e2fa3a76
TH
770 DRM_ERROR("Query buffer too large.\n");
771 return -EINVAL;
772 }
773
774 if (unlikely(sw_context->cur_query_bo != NULL)) {
c0951b79 775 sw_context->needs_post_query_barrier = true;
e2fa3a76
TH
776 ret = vmw_bo_to_validate_list(sw_context,
777 sw_context->cur_query_bo,
96c5f0df 778 dev_priv->has_mob, NULL);
e2fa3a76
TH
779 if (unlikely(ret != 0))
780 return ret;
781 }
782 sw_context->cur_query_bo = new_query_bo;
783
784 ret = vmw_bo_to_validate_list(sw_context,
785 dev_priv->dummy_query_bo,
96c5f0df 786 dev_priv->has_mob, NULL);
e2fa3a76
TH
787 if (unlikely(ret != 0))
788 return ret;
789
790 }
791
e2fa3a76
TH
792 return 0;
793}
794
795
796/**
797 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
798 *
799 * @dev_priv: The device private structure.
800 * @sw_context: The software context used for this command submission batch.
801 *
802 * This function will check if we're switching query buffers, and will then,
e2fa3a76
TH
803 * issue a dummy occlusion query wait used as a query barrier. When the fence
804 * object following that query wait has signaled, we are sure that all
c0951b79 805 * preceding queries have finished, and the old query buffer can be unpinned.
e2fa3a76
TH
806 * However, since both the new query buffer and the old one are fenced with
807 * that fence, we can do an asynchronus unpin now, and be sure that the
808 * old query buffer won't be moved until the fence has signaled.
809 *
810 * As mentioned above, both the new - and old query buffers need to be fenced
811 * using a sequence emitted *after* calling this function.
812 */
813static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
814 struct vmw_sw_context *sw_context)
815{
e2fa3a76
TH
816 /*
817 * The validate list should still hold references to all
818 * contexts here.
819 */
820
c0951b79
TH
821 if (sw_context->needs_post_query_barrier) {
822 struct vmw_res_cache_entry *ctx_entry =
823 &sw_context->res_cache[vmw_res_context];
824 struct vmw_resource *ctx;
825 int ret;
e2fa3a76 826
c0951b79
TH
827 BUG_ON(!ctx_entry->valid);
828 ctx = ctx_entry->res;
e2fa3a76
TH
829
830 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
831
832 if (unlikely(ret != 0))
833 DRM_ERROR("Out of fifo space for dummy query.\n");
834 }
835
836 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
837 if (dev_priv->pinned_bo) {
459d0fa7
TH
838 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
839 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
e2fa3a76
TH
840 }
841
c0951b79 842 if (!sw_context->needs_post_query_barrier) {
459d0fa7 843 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
e2fa3a76 844
c0951b79
TH
845 /*
846 * We pin also the dummy_query_bo buffer so that we
847 * don't need to validate it when emitting
848 * dummy queries in context destroy paths.
849 */
e2fa3a76 850
459d0fa7
TH
851 if (!dev_priv->dummy_query_bo_pinned) {
852 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
853 true);
854 dev_priv->dummy_query_bo_pinned = true;
855 }
e2fa3a76 856
c0951b79
TH
857 BUG_ON(sw_context->last_query_ctx == NULL);
858 dev_priv->query_cid = sw_context->last_query_ctx->id;
859 dev_priv->query_cid_valid = true;
860 dev_priv->pinned_bo =
459d0fa7 861 vmw_dmabuf_reference(sw_context->cur_query_bo);
c0951b79 862 }
e2fa3a76
TH
863 }
864}
865
ddcda24e
TH
866/**
867 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
868 * handle to a MOB id.
869 *
870 * @dev_priv: Pointer to a device private structure.
871 * @sw_context: The software context used for this command batch validation.
872 * @id: Pointer to the user-space handle to be translated.
873 * @vmw_bo_p: Points to a location that, on successful return will carry
874 * a reference-counted pointer to the DMA buffer identified by the
875 * user-space handle in @id.
876 *
877 * This function saves information needed to translate a user-space buffer
878 * handle to a MOB id. The translation does not take place immediately, but
879 * during a call to vmw_apply_relocations(). This function builds a relocation
880 * list and a list of buffers to validate. The former needs to be freed using
881 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
882 * needs to be freed using vmw_clear_validations.
883 */
884static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
885 struct vmw_sw_context *sw_context,
886 SVGAMobId *id,
887 struct vmw_dma_buffer **vmw_bo_p)
888{
889 struct vmw_dma_buffer *vmw_bo = NULL;
ddcda24e
TH
890 uint32_t handle = *id;
891 struct vmw_relocation *reloc;
892 int ret;
893
d5bde956 894 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
ddcda24e
TH
895 if (unlikely(ret != 0)) {
896 DRM_ERROR("Could not find or use MOB buffer.\n");
da5efffc
CIK
897 ret = -EINVAL;
898 goto out_no_reloc;
ddcda24e 899 }
ddcda24e
TH
900
901 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
902 DRM_ERROR("Max number relocations per submission"
903 " exceeded\n");
904 ret = -EINVAL;
905 goto out_no_reloc;
906 }
907
908 reloc = &sw_context->relocs[sw_context->cur_reloc++];
909 reloc->mob_loc = id;
910 reloc->location = NULL;
911
459d0fa7 912 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
ddcda24e
TH
913 if (unlikely(ret != 0))
914 goto out_no_reloc;
915
916 *vmw_bo_p = vmw_bo;
917 return 0;
918
919out_no_reloc:
920 vmw_dmabuf_unreference(&vmw_bo);
da5efffc 921 *vmw_bo_p = NULL;
ddcda24e
TH
922 return ret;
923}
924
e2fa3a76 925/**
c0951b79
TH
926 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
927 * handle to a valid SVGAGuestPtr
e2fa3a76 928 *
c0951b79
TH
929 * @dev_priv: Pointer to a device private structure.
930 * @sw_context: The software context used for this command batch validation.
931 * @ptr: Pointer to the user-space handle to be translated.
932 * @vmw_bo_p: Points to a location that, on successful return will carry
933 * a reference-counted pointer to the DMA buffer identified by the
934 * user-space handle in @id.
e2fa3a76 935 *
c0951b79
TH
936 * This function saves information needed to translate a user-space buffer
937 * handle to a valid SVGAGuestPtr. The translation does not take place
938 * immediately, but during a call to vmw_apply_relocations().
939 * This function builds a relocation list and a list of buffers to validate.
940 * The former needs to be freed using either vmw_apply_relocations() or
941 * vmw_free_relocations(). The latter needs to be freed using
942 * vmw_clear_validations.
e2fa3a76 943 */
4e4ddd47
TH
944static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
945 struct vmw_sw_context *sw_context,
946 SVGAGuestPtr *ptr,
947 struct vmw_dma_buffer **vmw_bo_p)
fb1d9738 948{
fb1d9738 949 struct vmw_dma_buffer *vmw_bo = NULL;
4e4ddd47 950 uint32_t handle = ptr->gmrId;
fb1d9738 951 struct vmw_relocation *reloc;
4e4ddd47 952 int ret;
fb1d9738 953
d5bde956 954 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
fb1d9738
JB
955 if (unlikely(ret != 0)) {
956 DRM_ERROR("Could not find or use GMR region.\n");
da5efffc
CIK
957 ret = -EINVAL;
958 goto out_no_reloc;
fb1d9738 959 }
fb1d9738
JB
960
961 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
4e4ddd47 962 DRM_ERROR("Max number relocations per submission"
fb1d9738
JB
963 " exceeded\n");
964 ret = -EINVAL;
965 goto out_no_reloc;
966 }
967
968 reloc = &sw_context->relocs[sw_context->cur_reloc++];
4e4ddd47 969 reloc->location = ptr;
fb1d9738 970
459d0fa7 971 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
e2fa3a76 972 if (unlikely(ret != 0))
fb1d9738 973 goto out_no_reloc;
fb1d9738 974
4e4ddd47
TH
975 *vmw_bo_p = vmw_bo;
976 return 0;
977
978out_no_reloc:
979 vmw_dmabuf_unreference(&vmw_bo);
da5efffc 980 *vmw_bo_p = NULL;
4e4ddd47
TH
981 return ret;
982}
983
ddcda24e
TH
984/**
985 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
986 *
987 * @dev_priv: Pointer to a device private struct.
988 * @sw_context: The software context used for this command submission.
989 * @header: Pointer to the command header in the command stream.
990 */
991static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
992 struct vmw_sw_context *sw_context,
993 SVGA3dCmdHeader *header)
994{
995 struct vmw_begin_gb_query_cmd {
996 SVGA3dCmdHeader header;
997 SVGA3dCmdBeginGBQuery q;
998 } *cmd;
999
1000 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1001 header);
1002
1003 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1004 user_context_converter, &cmd->q.cid,
1005 NULL);
1006}
1007
c0951b79
TH
1008/**
1009 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1010 *
1011 * @dev_priv: Pointer to a device private struct.
1012 * @sw_context: The software context used for this command submission.
1013 * @header: Pointer to the command header in the command stream.
1014 */
1015static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1016 struct vmw_sw_context *sw_context,
1017 SVGA3dCmdHeader *header)
1018{
1019 struct vmw_begin_query_cmd {
1020 SVGA3dCmdHeader header;
1021 SVGA3dCmdBeginQuery q;
1022 } *cmd;
1023
1024 cmd = container_of(header, struct vmw_begin_query_cmd,
1025 header);
1026
ddcda24e
TH
1027 if (unlikely(dev_priv->has_mob)) {
1028 struct {
1029 SVGA3dCmdHeader header;
1030 SVGA3dCmdBeginGBQuery q;
1031 } gb_cmd;
1032
1033 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1034
1035 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1036 gb_cmd.header.size = cmd->header.size;
1037 gb_cmd.q.cid = cmd->q.cid;
1038 gb_cmd.q.type = cmd->q.type;
1039
1040 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1041 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1042 }
1043
c0951b79
TH
1044 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1045 user_context_converter, &cmd->q.cid,
1046 NULL);
1047}
1048
ddcda24e
TH
1049/**
1050 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1051 *
1052 * @dev_priv: Pointer to a device private struct.
1053 * @sw_context: The software context used for this command submission.
1054 * @header: Pointer to the command header in the command stream.
1055 */
1056static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1057 struct vmw_sw_context *sw_context,
1058 SVGA3dCmdHeader *header)
1059{
1060 struct vmw_dma_buffer *vmw_bo;
1061 struct vmw_query_cmd {
1062 SVGA3dCmdHeader header;
1063 SVGA3dCmdEndGBQuery q;
1064 } *cmd;
1065 int ret;
1066
1067 cmd = container_of(header, struct vmw_query_cmd, header);
1068 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1069 if (unlikely(ret != 0))
1070 return ret;
1071
1072 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1073 &cmd->q.mobid,
1074 &vmw_bo);
1075 if (unlikely(ret != 0))
1076 return ret;
1077
459d0fa7 1078 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
ddcda24e
TH
1079
1080 vmw_dmabuf_unreference(&vmw_bo);
1081 return ret;
1082}
1083
c0951b79
TH
1084/**
1085 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1086 *
1087 * @dev_priv: Pointer to a device private struct.
1088 * @sw_context: The software context used for this command submission.
1089 * @header: Pointer to the command header in the command stream.
1090 */
4e4ddd47
TH
1091static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1092 struct vmw_sw_context *sw_context,
1093 SVGA3dCmdHeader *header)
1094{
1095 struct vmw_dma_buffer *vmw_bo;
1096 struct vmw_query_cmd {
1097 SVGA3dCmdHeader header;
1098 SVGA3dCmdEndQuery q;
1099 } *cmd;
1100 int ret;
1101
1102 cmd = container_of(header, struct vmw_query_cmd, header);
ddcda24e
TH
1103 if (dev_priv->has_mob) {
1104 struct {
1105 SVGA3dCmdHeader header;
1106 SVGA3dCmdEndGBQuery q;
1107 } gb_cmd;
1108
1109 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1110
1111 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1112 gb_cmd.header.size = cmd->header.size;
1113 gb_cmd.q.cid = cmd->q.cid;
1114 gb_cmd.q.type = cmd->q.type;
1115 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1116 gb_cmd.q.offset = cmd->q.guestResult.offset;
1117
1118 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1119 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1120 }
1121
4e4ddd47
TH
1122 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1123 if (unlikely(ret != 0))
1124 return ret;
1125
1126 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1127 &cmd->q.guestResult,
1128 &vmw_bo);
1129 if (unlikely(ret != 0))
1130 return ret;
1131
459d0fa7 1132 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
e2fa3a76 1133
4e4ddd47 1134 vmw_dmabuf_unreference(&vmw_bo);
e2fa3a76 1135 return ret;
4e4ddd47 1136}
fb1d9738 1137
ddcda24e
TH
1138/**
1139 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1140 *
1141 * @dev_priv: Pointer to a device private struct.
1142 * @sw_context: The software context used for this command submission.
1143 * @header: Pointer to the command header in the command stream.
1144 */
1145static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1146 struct vmw_sw_context *sw_context,
1147 SVGA3dCmdHeader *header)
1148{
1149 struct vmw_dma_buffer *vmw_bo;
1150 struct vmw_query_cmd {
1151 SVGA3dCmdHeader header;
1152 SVGA3dCmdWaitForGBQuery q;
1153 } *cmd;
1154 int ret;
1155
1156 cmd = container_of(header, struct vmw_query_cmd, header);
1157 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1158 if (unlikely(ret != 0))
1159 return ret;
1160
1161 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1162 &cmd->q.mobid,
1163 &vmw_bo);
1164 if (unlikely(ret != 0))
1165 return ret;
1166
1167 vmw_dmabuf_unreference(&vmw_bo);
1168 return 0;
1169}
1170
1171/**
c0951b79
TH
1172 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1173 *
1174 * @dev_priv: Pointer to a device private struct.
1175 * @sw_context: The software context used for this command submission.
1176 * @header: Pointer to the command header in the command stream.
1177 */
4e4ddd47
TH
1178static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1179 struct vmw_sw_context *sw_context,
1180 SVGA3dCmdHeader *header)
1181{
1182 struct vmw_dma_buffer *vmw_bo;
1183 struct vmw_query_cmd {
1184 SVGA3dCmdHeader header;
1185 SVGA3dCmdWaitForQuery q;
1186 } *cmd;
1187 int ret;
1188
1189 cmd = container_of(header, struct vmw_query_cmd, header);
ddcda24e
TH
1190 if (dev_priv->has_mob) {
1191 struct {
1192 SVGA3dCmdHeader header;
1193 SVGA3dCmdWaitForGBQuery q;
1194 } gb_cmd;
1195
1196 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1197
1198 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1199 gb_cmd.header.size = cmd->header.size;
1200 gb_cmd.q.cid = cmd->q.cid;
1201 gb_cmd.q.type = cmd->q.type;
1202 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1203 gb_cmd.q.offset = cmd->q.guestResult.offset;
1204
1205 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1206 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1207 }
1208
4e4ddd47
TH
1209 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1210 if (unlikely(ret != 0))
1211 return ret;
1212
1213 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1214 &cmd->q.guestResult,
1215 &vmw_bo);
1216 if (unlikely(ret != 0))
1217 return ret;
1218
1219 vmw_dmabuf_unreference(&vmw_bo);
1220 return 0;
1221}
1222
4e4ddd47
TH
1223static int vmw_cmd_dma(struct vmw_private *dev_priv,
1224 struct vmw_sw_context *sw_context,
1225 SVGA3dCmdHeader *header)
1226{
1227 struct vmw_dma_buffer *vmw_bo = NULL;
4e4ddd47
TH
1228 struct vmw_surface *srf = NULL;
1229 struct vmw_dma_cmd {
1230 SVGA3dCmdHeader header;
1231 SVGA3dCmdSurfaceDMA dma;
1232 } *cmd;
1233 int ret;
cbd75e97
TH
1234 SVGA3dCmdSurfaceDMASuffix *suffix;
1235 uint32_t bo_size;
4e4ddd47
TH
1236
1237 cmd = container_of(header, struct vmw_dma_cmd, header);
cbd75e97
TH
1238 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1239 header->size - sizeof(*suffix));
1240
1241 /* Make sure device and verifier stays in sync. */
1242 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1243 DRM_ERROR("Invalid DMA suffix size.\n");
1244 return -EINVAL;
1245 }
1246
4e4ddd47
TH
1247 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1248 &cmd->dma.guest.ptr,
1249 &vmw_bo);
1250 if (unlikely(ret != 0))
1251 return ret;
1252
cbd75e97
TH
1253 /* Make sure DMA doesn't cross BO boundaries. */
1254 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1255 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1256 DRM_ERROR("Invalid DMA offset.\n");
1257 return -EINVAL;
1258 }
1259
1260 bo_size -= cmd->dma.guest.ptr.offset;
1261 if (unlikely(suffix->maximumOffset > bo_size))
1262 suffix->maximumOffset = bo_size;
1263
c0951b79
TH
1264 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1265 user_surface_converter, &cmd->dma.host.sid,
1266 NULL);
5bb39e81 1267 if (unlikely(ret != 0)) {
c0951b79
TH
1268 if (unlikely(ret != -ERESTARTSYS))
1269 DRM_ERROR("could not find surface for DMA.\n");
1270 goto out_no_surface;
5bb39e81
TH
1271 }
1272
c0951b79 1273 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
f18c8840 1274
d5bde956
TH
1275 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1276 header);
fb1d9738 1277
c0951b79 1278out_no_surface:
fb1d9738
JB
1279 vmw_dmabuf_unreference(&vmw_bo);
1280 return ret;
1281}
1282
7a73ba74
TH
1283static int vmw_cmd_draw(struct vmw_private *dev_priv,
1284 struct vmw_sw_context *sw_context,
1285 SVGA3dCmdHeader *header)
1286{
1287 struct vmw_draw_cmd {
1288 SVGA3dCmdHeader header;
1289 SVGA3dCmdDrawPrimitives body;
1290 } *cmd;
1291 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1292 (unsigned long)header + sizeof(*cmd));
1293 SVGA3dPrimitiveRange *range;
1294 uint32_t i;
1295 uint32_t maxnum;
1296 int ret;
1297
1298 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1299 if (unlikely(ret != 0))
1300 return ret;
1301
1302 cmd = container_of(header, struct vmw_draw_cmd, header);
1303 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1304
1305 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1306 DRM_ERROR("Illegal number of vertex declarations.\n");
1307 return -EINVAL;
1308 }
1309
1310 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
c0951b79
TH
1311 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1312 user_surface_converter,
1313 &decl->array.surfaceId, NULL);
7a73ba74
TH
1314 if (unlikely(ret != 0))
1315 return ret;
1316 }
1317
1318 maxnum = (header->size - sizeof(cmd->body) -
1319 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1320 if (unlikely(cmd->body.numRanges > maxnum)) {
1321 DRM_ERROR("Illegal number of index ranges.\n");
1322 return -EINVAL;
1323 }
1324
1325 range = (SVGA3dPrimitiveRange *) decl;
1326 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
c0951b79
TH
1327 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1328 user_surface_converter,
1329 &range->indexArray.surfaceId, NULL);
7a73ba74
TH
1330 if (unlikely(ret != 0))
1331 return ret;
1332 }
1333 return 0;
1334}
1335
1336
1337static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1338 struct vmw_sw_context *sw_context,
1339 SVGA3dCmdHeader *header)
1340{
1341 struct vmw_tex_state_cmd {
1342 SVGA3dCmdHeader header;
1343 SVGA3dCmdSetTextureState state;
b5c3b1a6 1344 } *cmd;
7a73ba74
TH
1345
1346 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1347 ((unsigned long) header + header->size + sizeof(header));
1348 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1349 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
b5c3b1a6 1350 struct vmw_resource_val_node *ctx_node;
173fb7d4 1351 struct vmw_resource_val_node *res_node;
7a73ba74
TH
1352 int ret;
1353
b5c3b1a6
TH
1354 cmd = container_of(header, struct vmw_tex_state_cmd,
1355 header);
1356
1357 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1358 user_context_converter, &cmd->state.cid,
1359 &ctx_node);
7a73ba74
TH
1360 if (unlikely(ret != 0))
1361 return ret;
1362
1363 for (; cur_state < last_state; ++cur_state) {
1364 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1365 continue;
1366
c0951b79
TH
1367 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1368 user_surface_converter,
173fb7d4 1369 &cur_state->value, &res_node);
7a73ba74
TH
1370 if (unlikely(ret != 0))
1371 return ret;
b5c3b1a6
TH
1372
1373 if (dev_priv->has_mob) {
1374 struct vmw_ctx_bindinfo bi;
1375
1376 bi.ctx = ctx_node->res;
173fb7d4 1377 bi.res = res_node ? res_node->res : NULL;
b5c3b1a6
TH
1378 bi.bt = vmw_ctx_binding_tex;
1379 bi.i1.texture_stage = cur_state->stage;
1380 vmw_context_binding_add(ctx_node->staged_bindings,
1381 &bi);
1382 }
7a73ba74
TH
1383 }
1384
1385 return 0;
1386}
1387
4084fb89
JB
1388static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1389 struct vmw_sw_context *sw_context,
1390 void *buf)
1391{
1392 struct vmw_dma_buffer *vmw_bo;
1393 int ret;
1394
1395 struct {
1396 uint32_t header;
1397 SVGAFifoCmdDefineGMRFB body;
1398 } *cmd = buf;
1399
1400 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1401 &cmd->body.ptr,
1402 &vmw_bo);
1403 if (unlikely(ret != 0))
1404 return ret;
1405
1406 vmw_dmabuf_unreference(&vmw_bo);
1407
1408 return ret;
1409}
1410
a97e2192
TH
1411/**
1412 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1413 *
1414 * @dev_priv: Pointer to a device private struct.
1415 * @sw_context: The software context being used for this batch.
1416 * @res_type: The resource type.
1417 * @converter: Information about user-space binding for this resource type.
1418 * @res_id: Pointer to the user-space resource handle in the command stream.
1419 * @buf_id: Pointer to the user-space backup buffer handle in the command
1420 * stream.
1421 * @backup_offset: Offset of backup into MOB.
1422 *
1423 * This function prepares for registering a switch of backup buffers
1424 * in the resource metadata just prior to unreserving.
1425 */
1426static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1427 struct vmw_sw_context *sw_context,
1428 enum vmw_res_type res_type,
1429 const struct vmw_user_resource_conv
1430 *converter,
1431 uint32_t *res_id,
1432 uint32_t *buf_id,
1433 unsigned long backup_offset)
1434{
1435 int ret;
1436 struct vmw_dma_buffer *dma_buf;
1437 struct vmw_resource_val_node *val_node;
1438
1439 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1440 converter, res_id, &val_node);
1441 if (unlikely(ret != 0))
1442 return ret;
1443
1444 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1445 if (unlikely(ret != 0))
1446 return ret;
1447
1448 if (val_node->first_usage)
1449 val_node->no_buffer_needed = true;
1450
1451 vmw_dmabuf_unreference(&val_node->new_backup);
1452 val_node->new_backup = dma_buf;
1453 val_node->new_backup_offset = backup_offset;
1454
1455 return 0;
1456}
1457
1458/**
1459 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1460 * command
1461 *
1462 * @dev_priv: Pointer to a device private struct.
1463 * @sw_context: The software context being used for this batch.
1464 * @header: Pointer to the command header in the command stream.
1465 */
1466static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1467 struct vmw_sw_context *sw_context,
1468 SVGA3dCmdHeader *header)
1469{
1470 struct vmw_bind_gb_surface_cmd {
1471 SVGA3dCmdHeader header;
1472 SVGA3dCmdBindGBSurface body;
1473 } *cmd;
1474
1475 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1476
1477 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1478 user_surface_converter,
1479 &cmd->body.sid, &cmd->body.mobid,
1480 0);
1481}
1482
1483/**
1484 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1485 * command
1486 *
1487 * @dev_priv: Pointer to a device private struct.
1488 * @sw_context: The software context being used for this batch.
1489 * @header: Pointer to the command header in the command stream.
1490 */
1491static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1492 struct vmw_sw_context *sw_context,
1493 SVGA3dCmdHeader *header)
1494{
1495 struct vmw_gb_surface_cmd {
1496 SVGA3dCmdHeader header;
1497 SVGA3dCmdUpdateGBImage body;
1498 } *cmd;
1499
1500 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1501
1502 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1503 user_surface_converter,
1504 &cmd->body.image.sid, NULL);
1505}
1506
1507/**
1508 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1509 * command
1510 *
1511 * @dev_priv: Pointer to a device private struct.
1512 * @sw_context: The software context being used for this batch.
1513 * @header: Pointer to the command header in the command stream.
1514 */
1515static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1516 struct vmw_sw_context *sw_context,
1517 SVGA3dCmdHeader *header)
1518{
1519 struct vmw_gb_surface_cmd {
1520 SVGA3dCmdHeader header;
1521 SVGA3dCmdUpdateGBSurface body;
1522 } *cmd;
1523
1524 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1525
1526 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1527 user_surface_converter,
1528 &cmd->body.sid, NULL);
1529}
1530
1531/**
1532 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1533 * command
1534 *
1535 * @dev_priv: Pointer to a device private struct.
1536 * @sw_context: The software context being used for this batch.
1537 * @header: Pointer to the command header in the command stream.
1538 */
1539static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1540 struct vmw_sw_context *sw_context,
1541 SVGA3dCmdHeader *header)
1542{
1543 struct vmw_gb_surface_cmd {
1544 SVGA3dCmdHeader header;
1545 SVGA3dCmdReadbackGBImage body;
1546 } *cmd;
1547
1548 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1549
1550 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1551 user_surface_converter,
1552 &cmd->body.image.sid, NULL);
1553}
1554
1555/**
1556 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1557 * command
1558 *
1559 * @dev_priv: Pointer to a device private struct.
1560 * @sw_context: The software context being used for this batch.
1561 * @header: Pointer to the command header in the command stream.
1562 */
1563static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1564 struct vmw_sw_context *sw_context,
1565 SVGA3dCmdHeader *header)
1566{
1567 struct vmw_gb_surface_cmd {
1568 SVGA3dCmdHeader header;
1569 SVGA3dCmdReadbackGBSurface body;
1570 } *cmd;
1571
1572 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1573
1574 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1575 user_surface_converter,
1576 &cmd->body.sid, NULL);
1577}
1578
1579/**
1580 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1581 * command
1582 *
1583 * @dev_priv: Pointer to a device private struct.
1584 * @sw_context: The software context being used for this batch.
1585 * @header: Pointer to the command header in the command stream.
1586 */
1587static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1588 struct vmw_sw_context *sw_context,
1589 SVGA3dCmdHeader *header)
1590{
1591 struct vmw_gb_surface_cmd {
1592 SVGA3dCmdHeader header;
1593 SVGA3dCmdInvalidateGBImage body;
1594 } *cmd;
1595
1596 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1597
1598 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1599 user_surface_converter,
1600 &cmd->body.image.sid, NULL);
1601}
1602
1603/**
1604 * vmw_cmd_invalidate_gb_surface - Validate an
1605 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1606 *
1607 * @dev_priv: Pointer to a device private struct.
1608 * @sw_context: The software context being used for this batch.
1609 * @header: Pointer to the command header in the command stream.
1610 */
1611static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1612 struct vmw_sw_context *sw_context,
1613 SVGA3dCmdHeader *header)
1614{
1615 struct vmw_gb_surface_cmd {
1616 SVGA3dCmdHeader header;
1617 SVGA3dCmdInvalidateGBSurface body;
1618 } *cmd;
1619
1620 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1621
1622 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1623 user_surface_converter,
1624 &cmd->body.sid, NULL);
1625}
1626
d5bde956
TH
1627
1628/**
1629 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1630 * command
1631 *
1632 * @dev_priv: Pointer to a device private struct.
1633 * @sw_context: The software context being used for this batch.
1634 * @header: Pointer to the command header in the command stream.
1635 */
1636static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1637 struct vmw_sw_context *sw_context,
1638 SVGA3dCmdHeader *header)
1639{
1640 struct vmw_shader_define_cmd {
1641 SVGA3dCmdHeader header;
1642 SVGA3dCmdDefineShader body;
1643 } *cmd;
1644 int ret;
1645 size_t size;
18e4a466 1646 struct vmw_resource_val_node *val;
d5bde956
TH
1647
1648 cmd = container_of(header, struct vmw_shader_define_cmd,
1649 header);
1650
1651 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1652 user_context_converter, &cmd->body.cid,
18e4a466 1653 &val);
d5bde956
TH
1654 if (unlikely(ret != 0))
1655 return ret;
1656
1657 if (unlikely(!dev_priv->has_mob))
1658 return 0;
1659
1660 size = cmd->header.size - sizeof(cmd->body);
18e4a466
TH
1661 ret = vmw_compat_shader_add(dev_priv,
1662 vmw_context_res_man(val->res),
d5bde956
TH
1663 cmd->body.shid, cmd + 1,
1664 cmd->body.type, size,
18e4a466 1665 &sw_context->staged_cmd_res);
d5bde956
TH
1666 if (unlikely(ret != 0))
1667 return ret;
1668
1669 return vmw_resource_relocation_add(&sw_context->res_relocations,
1670 NULL, &cmd->header.id -
1671 sw_context->buf_start);
1672
1673 return 0;
1674}
1675
1676/**
1677 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1678 * command
1679 *
1680 * @dev_priv: Pointer to a device private struct.
1681 * @sw_context: The software context being used for this batch.
1682 * @header: Pointer to the command header in the command stream.
1683 */
1684static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1685 struct vmw_sw_context *sw_context,
1686 SVGA3dCmdHeader *header)
1687{
1688 struct vmw_shader_destroy_cmd {
1689 SVGA3dCmdHeader header;
1690 SVGA3dCmdDestroyShader body;
1691 } *cmd;
1692 int ret;
18e4a466 1693 struct vmw_resource_val_node *val;
d5bde956
TH
1694
1695 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1696 header);
1697
1698 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1699 user_context_converter, &cmd->body.cid,
18e4a466 1700 &val);
d5bde956
TH
1701 if (unlikely(ret != 0))
1702 return ret;
1703
1704 if (unlikely(!dev_priv->has_mob))
1705 return 0;
1706
18e4a466 1707 ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
d5bde956
TH
1708 cmd->body.shid,
1709 cmd->body.type,
18e4a466 1710 &sw_context->staged_cmd_res);
d5bde956
TH
1711 if (unlikely(ret != 0))
1712 return ret;
1713
1714 return vmw_resource_relocation_add(&sw_context->res_relocations,
1715 NULL, &cmd->header.id -
1716 sw_context->buf_start);
1717
1718 return 0;
1719}
1720
c0951b79
TH
1721/**
1722 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1723 * command
1724 *
1725 * @dev_priv: Pointer to a device private struct.
1726 * @sw_context: The software context being used for this batch.
1727 * @header: Pointer to the command header in the command stream.
1728 */
1729static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1730 struct vmw_sw_context *sw_context,
1731 SVGA3dCmdHeader *header)
1732{
1733 struct vmw_set_shader_cmd {
1734 SVGA3dCmdHeader header;
1735 SVGA3dCmdSetShader body;
1736 } *cmd;
18e4a466
TH
1737 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1738 struct vmw_ctx_bindinfo bi;
1739 struct vmw_resource *res = NULL;
c0951b79
TH
1740 int ret;
1741
1742 cmd = container_of(header, struct vmw_set_shader_cmd,
1743 header);
1744
b5c3b1a6
TH
1745 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1746 user_context_converter, &cmd->body.cid,
1747 &ctx_node);
c0951b79
TH
1748 if (unlikely(ret != 0))
1749 return ret;
1750
18e4a466
TH
1751 if (!dev_priv->has_mob)
1752 return 0;
1753
1754 if (cmd->body.shid != SVGA3D_INVALID_ID) {
1755 res = vmw_compat_shader_lookup
1756 (vmw_context_res_man(ctx_node->res),
1757 cmd->body.shid,
1758 cmd->body.type);
1759
1760 if (!IS_ERR(res)) {
1761 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1762 vmw_res_shader,
1763 &cmd->body.shid, res,
1764 &res_node);
1765 vmw_resource_unreference(&res);
1766 if (unlikely(ret != 0))
1767 return ret;
1768 }
1769 }
1770
1771 if (!res_node) {
1772 ret = vmw_cmd_res_check(dev_priv, sw_context,
1773 vmw_res_shader,
1774 user_shader_converter,
1775 &cmd->body.shid, &res_node);
b5c3b1a6
TH
1776 if (unlikely(ret != 0))
1777 return ret;
b5c3b1a6 1778 }
c74c162f 1779
18e4a466
TH
1780 bi.ctx = ctx_node->res;
1781 bi.res = res_node ? res_node->res : NULL;
1782 bi.bt = vmw_ctx_binding_shader;
1783 bi.i1.shader_type = cmd->body.type;
1784 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
c0951b79
TH
1785}
1786
0ccbbae4
TH
1787/**
1788 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1789 * command
1790 *
1791 * @dev_priv: Pointer to a device private struct.
1792 * @sw_context: The software context being used for this batch.
1793 * @header: Pointer to the command header in the command stream.
1794 */
1795static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1796 struct vmw_sw_context *sw_context,
1797 SVGA3dCmdHeader *header)
1798{
1799 struct vmw_set_shader_const_cmd {
1800 SVGA3dCmdHeader header;
1801 SVGA3dCmdSetShaderConst body;
1802 } *cmd;
1803 int ret;
1804
1805 cmd = container_of(header, struct vmw_set_shader_const_cmd,
1806 header);
1807
1808 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1809 user_context_converter, &cmd->body.cid,
1810 NULL);
1811 if (unlikely(ret != 0))
1812 return ret;
1813
1814 if (dev_priv->has_mob)
1815 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1816
1817 return 0;
1818}
1819
c74c162f
TH
1820/**
1821 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1822 * command
1823 *
1824 * @dev_priv: Pointer to a device private struct.
1825 * @sw_context: The software context being used for this batch.
1826 * @header: Pointer to the command header in the command stream.
1827 */
1828static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1829 struct vmw_sw_context *sw_context,
1830 SVGA3dCmdHeader *header)
1831{
1832 struct vmw_bind_gb_shader_cmd {
1833 SVGA3dCmdHeader header;
1834 SVGA3dCmdBindGBShader body;
1835 } *cmd;
1836
1837 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1838 header);
1839
1840 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1841 user_shader_converter,
1842 &cmd->body.shid, &cmd->body.mobid,
1843 cmd->body.offsetInBytes);
1844}
1845
4084fb89
JB
1846static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1847 struct vmw_sw_context *sw_context,
1848 void *buf, uint32_t *size)
1849{
1850 uint32_t size_remaining = *size;
4084fb89
JB
1851 uint32_t cmd_id;
1852
b9eb1a61 1853 cmd_id = ((uint32_t *)buf)[0];
4084fb89
JB
1854 switch (cmd_id) {
1855 case SVGA_CMD_UPDATE:
1856 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
4084fb89
JB
1857 break;
1858 case SVGA_CMD_DEFINE_GMRFB:
1859 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1860 break;
1861 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1862 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1863 break;
1864 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1865 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1866 break;
1867 default:
1868 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1869 return -EINVAL;
1870 }
1871
1872 if (*size > size_remaining) {
1873 DRM_ERROR("Invalid SVGA command (size mismatch):"
1874 " %u.\n", cmd_id);
1875 return -EINVAL;
1876 }
1877
0cff60c6 1878 if (unlikely(!sw_context->kernel)) {
4084fb89
JB
1879 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1880 return -EPERM;
1881 }
1882
1883 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1884 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1885
1886 return 0;
1887}
fb1d9738 1888
4fbd9d2e 1889static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
c373d4ea
TH
1890 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1891 false, false, false),
1892 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1893 false, false, false),
1894 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1895 true, false, false),
1896 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1897 true, false, false),
1898 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1899 true, false, false),
1900 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1901 false, false, false),
1902 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1903 false, false, false),
1904 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1905 true, false, false),
1906 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1907 true, false, false),
1908 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1909 true, false, false),
fb1d9738 1910 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
c373d4ea
TH
1911 &vmw_cmd_set_render_target_check, true, false, false),
1912 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1913 true, false, false),
1914 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1915 true, false, false),
1916 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1917 true, false, false),
1918 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1919 true, false, false),
1920 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1921 true, false, false),
1922 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1923 true, false, false),
1924 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1925 true, false, false),
1926 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1927 false, false, false),
d5bde956
TH
1928 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1929 true, false, false),
1930 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1931 true, false, false),
c373d4ea
TH
1932 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1933 true, false, false),
0ccbbae4
TH
1934 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1935 true, false, false),
c373d4ea
TH
1936 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1937 true, false, false),
1938 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1939 true, false, false),
1940 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1941 true, false, false),
1942 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1943 true, false, false),
1944 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1945 true, false, false),
1946 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1947 true, false, false),
fb1d9738 1948 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
c373d4ea
TH
1949 &vmw_cmd_blt_surf_screen_check, false, false, false),
1950 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1951 false, false, false),
1952 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1953 false, false, false),
1954 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1955 false, false, false),
1956 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1957 false, false, false),
1958 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1959 false, false, false),
1960 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1961 false, false, false),
1962 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1963 false, false, false),
1964 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1965 false, false, false),
1966 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1967 false, false, false),
1968 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1969 false, false, false),
1970 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1971 false, false, false),
1972 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1973 false, false, false),
1974 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1975 false, false, false),
1976 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1977 false, false, true),
1978 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1979 false, false, true),
1980 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1981 false, false, true),
1982 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1983 false, false, true),
c373d4ea
TH
1984 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1985 false, false, true),
1986 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1987 false, false, true),
1988 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1989 false, false, true),
1990 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1991 true, false, true),
1992 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1993 false, false, true),
1994 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1995 true, false, true),
a97e2192 1996 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
c373d4ea 1997 &vmw_cmd_update_gb_surface, true, false, true),
a97e2192 1998 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
c373d4ea 1999 &vmw_cmd_readback_gb_image, true, false, true),
a97e2192 2000 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
c373d4ea 2001 &vmw_cmd_readback_gb_surface, true, false, true),
a97e2192 2002 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
c373d4ea 2003 &vmw_cmd_invalidate_gb_image, true, false, true),
a97e2192 2004 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
c373d4ea
TH
2005 &vmw_cmd_invalidate_gb_surface, true, false, true),
2006 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2007 false, false, true),
2008 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2009 false, false, true),
2010 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2011 false, false, true),
2012 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2013 false, false, true),
2014 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2015 false, false, true),
2016 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2017 false, false, true),
2018 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2019 true, false, true),
2020 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2021 false, false, true),
f2a0dcb1 2022 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
8ba07315 2023 false, false, false),
c373d4ea
TH
2024 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2025 true, false, true),
2026 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2027 true, false, true),
2028 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2029 true, false, true),
2030 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2031 true, false, true),
2032 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2033 false, false, true),
2034 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2035 false, false, true),
2036 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2037 false, false, true),
2038 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2039 false, false, true),
2040 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2041 false, false, true),
2042 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2043 false, false, true),
2044 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2045 false, false, true),
2046 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2047 false, false, true),
2048 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2049 false, false, true),
2050 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2051 false, false, true),
2052 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2053 true, false, true)
fb1d9738
JB
2054};
2055
2056static int vmw_cmd_check(struct vmw_private *dev_priv,
2057 struct vmw_sw_context *sw_context,
2058 void *buf, uint32_t *size)
2059{
2060 uint32_t cmd_id;
7a73ba74 2061 uint32_t size_remaining = *size;
fb1d9738
JB
2062 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2063 int ret;
c373d4ea
TH
2064 const struct vmw_cmd_entry *entry;
2065 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
fb1d9738 2066
b9eb1a61 2067 cmd_id = ((uint32_t *)buf)[0];
4084fb89
JB
2068 /* Handle any none 3D commands */
2069 if (unlikely(cmd_id < SVGA_CMD_MAX))
2070 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2071
fb1d9738 2072
b9eb1a61
TH
2073 cmd_id = header->id;
2074 *size = header->size + sizeof(SVGA3dCmdHeader);
fb1d9738
JB
2075
2076 cmd_id -= SVGA_3D_CMD_BASE;
7a73ba74 2077 if (unlikely(*size > size_remaining))
c373d4ea 2078 goto out_invalid;
7a73ba74 2079
fb1d9738 2080 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
c373d4ea
TH
2081 goto out_invalid;
2082
2083 entry = &vmw_cmd_entries[cmd_id];
36e952c1
TH
2084 if (unlikely(!entry->func))
2085 goto out_invalid;
2086
c373d4ea
TH
2087 if (unlikely(!entry->user_allow && !sw_context->kernel))
2088 goto out_privileged;
2089
2090 if (unlikely(entry->gb_disable && gb))
2091 goto out_old;
2092
2093 if (unlikely(entry->gb_enable && !gb))
2094 goto out_new;
fb1d9738 2095
c373d4ea 2096 ret = entry->func(dev_priv, sw_context, header);
fb1d9738 2097 if (unlikely(ret != 0))
c373d4ea 2098 goto out_invalid;
fb1d9738
JB
2099
2100 return 0;
c373d4ea
TH
2101out_invalid:
2102 DRM_ERROR("Invalid SVGA3D command: %d\n",
2103 cmd_id + SVGA_3D_CMD_BASE);
2104 return -EINVAL;
2105out_privileged:
2106 DRM_ERROR("Privileged SVGA3D command: %d\n",
2107 cmd_id + SVGA_3D_CMD_BASE);
2108 return -EPERM;
2109out_old:
2110 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2111 cmd_id + SVGA_3D_CMD_BASE);
2112 return -EINVAL;
2113out_new:
2114 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
fb1d9738
JB
2115 cmd_id + SVGA_3D_CMD_BASE);
2116 return -EINVAL;
2117}
2118
2119static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2120 struct vmw_sw_context *sw_context,
922ade0d 2121 void *buf,
be38ab6e 2122 uint32_t size)
fb1d9738
JB
2123{
2124 int32_t cur_size = size;
2125 int ret;
2126
c0951b79
TH
2127 sw_context->buf_start = buf;
2128
fb1d9738 2129 while (cur_size > 0) {
7a73ba74 2130 size = cur_size;
fb1d9738
JB
2131 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2132 if (unlikely(ret != 0))
2133 return ret;
2134 buf = (void *)((unsigned long) buf + size);
2135 cur_size -= size;
2136 }
2137
2138 if (unlikely(cur_size != 0)) {
2139 DRM_ERROR("Command verifier out of sync.\n");
2140 return -EINVAL;
2141 }
2142
2143 return 0;
2144}
2145
2146static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2147{
2148 sw_context->cur_reloc = 0;
2149}
2150
2151static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2152{
2153 uint32_t i;
2154 struct vmw_relocation *reloc;
2155 struct ttm_validate_buffer *validate;
2156 struct ttm_buffer_object *bo;
2157
2158 for (i = 0; i < sw_context->cur_reloc; ++i) {
2159 reloc = &sw_context->relocs[i];
c0951b79 2160 validate = &sw_context->val_bufs[reloc->index].base;
fb1d9738 2161 bo = validate->bo;
c0951b79
TH
2162 switch (bo->mem.mem_type) {
2163 case TTM_PL_VRAM:
135cba0d
TH
2164 reloc->location->offset += bo->offset;
2165 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
c0951b79
TH
2166 break;
2167 case VMW_PL_GMR:
135cba0d 2168 reloc->location->gmrId = bo->mem.start;
c0951b79 2169 break;
ddcda24e
TH
2170 case VMW_PL_MOB:
2171 *reloc->mob_loc = bo->mem.start;
2172 break;
c0951b79
TH
2173 default:
2174 BUG();
2175 }
fb1d9738
JB
2176 }
2177 vmw_free_relocations(sw_context);
2178}
2179
c0951b79
TH
2180/**
2181 * vmw_resource_list_unrefererence - Free up a resource list and unreference
2182 * all resources referenced by it.
2183 *
2184 * @list: The resource list.
2185 */
2186static void vmw_resource_list_unreference(struct list_head *list)
2187{
2188 struct vmw_resource_val_node *val, *val_next;
2189
2190 /*
2191 * Drop references to resources held during command submission.
2192 */
2193
2194 list_for_each_entry_safe(val, val_next, list, head) {
2195 list_del_init(&val->head);
2196 vmw_resource_unreference(&val->res);
b5c3b1a6
TH
2197 if (unlikely(val->staged_bindings))
2198 kfree(val->staged_bindings);
c0951b79
TH
2199 kfree(val);
2200 }
2201}
2202
fb1d9738
JB
2203static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2204{
c0951b79
TH
2205 struct vmw_validate_buffer *entry, *next;
2206 struct vmw_resource_val_node *val;
fb1d9738 2207
be38ab6e
TH
2208 /*
2209 * Drop references to DMA buffers held during command submission.
2210 */
fb1d9738 2211 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
c0951b79
TH
2212 base.head) {
2213 list_del(&entry->base.head);
2214 ttm_bo_unref(&entry->base.bo);
2215 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
fb1d9738
JB
2216 sw_context->cur_val_buf--;
2217 }
2218 BUG_ON(sw_context->cur_val_buf != 0);
be38ab6e 2219
c0951b79
TH
2220 list_for_each_entry(val, &sw_context->resource_list, head)
2221 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
fb1d9738
JB
2222}
2223
1a4b172a
TH
2224int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2225 struct ttm_buffer_object *bo,
2226 bool interruptible,
2227 bool validate_as_mob)
fb1d9738 2228{
459d0fa7
TH
2229 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
2230 base);
fb1d9738
JB
2231 int ret;
2232
459d0fa7 2233 if (vbo->pin_count > 0)
e2fa3a76
TH
2234 return 0;
2235
96c5f0df 2236 if (validate_as_mob)
1a4b172a
TH
2237 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
2238 false);
96c5f0df 2239
8ba5152a 2240 /**
135cba0d
TH
2241 * Put BO in VRAM if there is space, otherwise as a GMR.
2242 * If there is no space in VRAM and GMR ids are all used up,
2243 * start evicting GMRs to make room. If the DMA buffer can't be
2244 * used as a GMR, this will return -ENOMEM.
8ba5152a
TH
2245 */
2246
1a4b172a
TH
2247 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
2248 false);
3d3a5b32 2249 if (likely(ret == 0 || ret == -ERESTARTSYS))
fb1d9738
JB
2250 return ret;
2251
8ba5152a
TH
2252 /**
2253 * If that failed, try VRAM again, this time evicting
2254 * previous contents.
2255 */
fb1d9738 2256
1a4b172a 2257 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
fb1d9738
JB
2258 return ret;
2259}
2260
fb1d9738
JB
2261static int vmw_validate_buffers(struct vmw_private *dev_priv,
2262 struct vmw_sw_context *sw_context)
2263{
c0951b79 2264 struct vmw_validate_buffer *entry;
fb1d9738
JB
2265 int ret;
2266
c0951b79 2267 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
96c5f0df 2268 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1a4b172a 2269 true,
96c5f0df 2270 entry->validate_as_mob);
fb1d9738
JB
2271 if (unlikely(ret != 0))
2272 return ret;
2273 }
2274 return 0;
2275}
2276
be38ab6e
TH
2277static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2278 uint32_t size)
2279{
2280 if (likely(sw_context->cmd_bounce_size >= size))
2281 return 0;
2282
2283 if (sw_context->cmd_bounce_size == 0)
2284 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2285
2286 while (sw_context->cmd_bounce_size < size) {
2287 sw_context->cmd_bounce_size =
2288 PAGE_ALIGN(sw_context->cmd_bounce_size +
2289 (sw_context->cmd_bounce_size >> 1));
2290 }
2291
2292 if (sw_context->cmd_bounce != NULL)
2293 vfree(sw_context->cmd_bounce);
2294
2295 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2296
2297 if (sw_context->cmd_bounce == NULL) {
2298 DRM_ERROR("Failed to allocate command bounce buffer.\n");
2299 sw_context->cmd_bounce_size = 0;
2300 return -ENOMEM;
2301 }
2302
2303 return 0;
2304}
2305
ae2a1040
TH
2306/**
2307 * vmw_execbuf_fence_commands - create and submit a command stream fence
2308 *
2309 * Creates a fence object and submits a command stream marker.
2310 * If this fails for some reason, We sync the fifo and return NULL.
2311 * It is then safe to fence buffers with a NULL pointer.
6070e9fa
JB
2312 *
2313 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2314 * a userspace handle if @p_handle is not NULL, otherwise not.
ae2a1040
TH
2315 */
2316
2317int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2318 struct vmw_private *dev_priv,
2319 struct vmw_fence_obj **p_fence,
2320 uint32_t *p_handle)
2321{
2322 uint32_t sequence;
2323 int ret;
2324 bool synced = false;
2325
6070e9fa
JB
2326 /* p_handle implies file_priv. */
2327 BUG_ON(p_handle != NULL && file_priv == NULL);
ae2a1040
TH
2328
2329 ret = vmw_fifo_send_fence(dev_priv, &sequence);
2330 if (unlikely(ret != 0)) {
2331 DRM_ERROR("Fence submission error. Syncing.\n");
2332 synced = true;
2333 }
2334
2335 if (p_handle != NULL)
2336 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
c060a4e1 2337 sequence, p_fence, p_handle);
ae2a1040 2338 else
c060a4e1 2339 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
ae2a1040
TH
2340
2341 if (unlikely(ret != 0 && !synced)) {
2342 (void) vmw_fallback_wait(dev_priv, false, false,
2343 sequence, false,
2344 VMW_FENCE_WAIT_TIMEOUT);
2345 *p_fence = NULL;
2346 }
2347
2348 return 0;
2349}
2350
8bf445ce
TH
2351/**
2352 * vmw_execbuf_copy_fence_user - copy fence object information to
2353 * user-space.
2354 *
2355 * @dev_priv: Pointer to a vmw_private struct.
2356 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2357 * @ret: Return value from fence object creation.
2358 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2359 * which the information should be copied.
2360 * @fence: Pointer to the fenc object.
2361 * @fence_handle: User-space fence handle.
2362 *
2363 * This function copies fence information to user-space. If copying fails,
2364 * The user-space struct drm_vmw_fence_rep::error member is hopefully
2365 * left untouched, and if it's preloaded with an -EFAULT by user-space,
2366 * the error will hopefully be detected.
2367 * Also if copying fails, user-space will be unable to signal the fence
2368 * object so we wait for it immediately, and then unreference the
2369 * user-space reference.
2370 */
57c5ee79 2371void
8bf445ce
TH
2372vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2373 struct vmw_fpriv *vmw_fp,
2374 int ret,
2375 struct drm_vmw_fence_rep __user *user_fence_rep,
2376 struct vmw_fence_obj *fence,
2377 uint32_t fence_handle)
2378{
2379 struct drm_vmw_fence_rep fence_rep;
2380
2381 if (user_fence_rep == NULL)
2382 return;
2383
80d9b24a
DC
2384 memset(&fence_rep, 0, sizeof(fence_rep));
2385
8bf445ce
TH
2386 fence_rep.error = ret;
2387 if (ret == 0) {
2388 BUG_ON(fence == NULL);
2389
2390 fence_rep.handle = fence_handle;
2298e804 2391 fence_rep.seqno = fence->base.seqno;
8bf445ce
TH
2392 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2393 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2394 }
2395
2396 /*
2397 * copy_to_user errors will be detected by user space not
2398 * seeing fence_rep::error filled in. Typically
2399 * user-space would have pre-set that member to -EFAULT.
2400 */
2401 ret = copy_to_user(user_fence_rep, &fence_rep,
2402 sizeof(fence_rep));
2403
2404 /*
2405 * User-space lost the fence object. We need to sync
2406 * and unreference the handle.
2407 */
2408 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2409 ttm_ref_object_base_unref(vmw_fp->tfile,
2410 fence_handle, TTM_REF_USAGE);
2411 DRM_ERROR("Fence copy error. Syncing.\n");
c060a4e1 2412 (void) vmw_fence_obj_wait(fence, false, false,
8bf445ce
TH
2413 VMW_FENCE_WAIT_TIMEOUT);
2414 }
2415}
2416
3eab3d9e
TH
2417/**
2418 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
2419 * the fifo.
2420 *
2421 * @dev_priv: Pointer to a device private structure.
2422 * @kernel_commands: Pointer to the unpatched command batch.
2423 * @command_size: Size of the unpatched command batch.
2424 * @sw_context: Structure holding the relocation lists.
2425 *
2426 * Side effects: If this function returns 0, then the command batch
2427 * pointed to by @kernel_commands will have been modified.
2428 */
2429static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
2430 void *kernel_commands,
2431 u32 command_size,
2432 struct vmw_sw_context *sw_context)
2433{
2434 void *cmd = vmw_fifo_reserve(dev_priv, command_size);
2435
2436 if (!cmd) {
2437 DRM_ERROR("Failed reserving fifo space for commands.\n");
2438 return -ENOMEM;
2439 }
18e4a466 2440
3eab3d9e
TH
2441 vmw_apply_relocations(sw_context);
2442 memcpy(cmd, kernel_commands, command_size);
2443 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2444 vmw_resource_relocations_free(&sw_context->res_relocations);
2445 vmw_fifo_commit(dev_priv, command_size);
2446
2447 return 0;
2448}
2449
2450/**
2451 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
2452 * the command buffer manager.
2453 *
2454 * @dev_priv: Pointer to a device private structure.
2455 * @header: Opaque handle to the command buffer allocation.
2456 * @command_size: Size of the unpatched command batch.
2457 * @sw_context: Structure holding the relocation lists.
2458 *
2459 * Side effects: If this function returns 0, then the command buffer
2460 * represented by @header will have been modified.
2461 */
2462static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
2463 struct vmw_cmdbuf_header *header,
2464 u32 command_size,
2465 struct vmw_sw_context *sw_context)
2466{
2467 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
2468 SVGA3D_INVALID_ID, false, header);
2469
2470 vmw_apply_relocations(sw_context);
2471 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2472 vmw_resource_relocations_free(&sw_context->res_relocations);
2473 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
2474
2475 return 0;
2476}
2477
2478/**
2479 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
2480 * submission using a command buffer.
2481 *
2482 * @dev_priv: Pointer to a device private structure.
2483 * @user_commands: User-space pointer to the commands to be submitted.
2484 * @command_size: Size of the unpatched command batch.
2485 * @header: Out parameter returning the opaque pointer to the command buffer.
2486 *
2487 * This function checks whether we can use the command buffer manager for
2488 * submission and if so, creates a command buffer of suitable size and
2489 * copies the user data into that buffer.
2490 *
2491 * On successful return, the function returns a pointer to the data in the
2492 * command buffer and *@header is set to non-NULL.
2493 * If command buffers could not be used, the function will return the value
2494 * of @kernel_commands on function call. That value may be NULL. In that case,
2495 * the value of *@header will be set to NULL.
2496 * If an error is encountered, the function will return a pointer error value.
2497 * If the function is interrupted by a signal while sleeping, it will return
2498 * -ERESTARTSYS casted to a pointer error value.
2499 */
b9eb1a61
TH
2500static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
2501 void __user *user_commands,
2502 void *kernel_commands,
2503 u32 command_size,
2504 struct vmw_cmdbuf_header **header)
3eab3d9e
TH
2505{
2506 size_t cmdbuf_size;
2507 int ret;
2508
2509 *header = NULL;
2510 if (!dev_priv->cman || kernel_commands)
2511 return kernel_commands;
2512
2513 if (command_size > SVGA_CB_MAX_SIZE) {
2514 DRM_ERROR("Command buffer is too large.\n");
2515 return ERR_PTR(-EINVAL);
2516 }
2517
2518 /* If possible, add a little space for fencing. */
2519 cmdbuf_size = command_size + 512;
2520 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
2521 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
2522 true, header);
2523 if (IS_ERR(kernel_commands))
2524 return kernel_commands;
2525
2526 ret = copy_from_user(kernel_commands, user_commands,
2527 command_size);
2528 if (ret) {
2529 DRM_ERROR("Failed copying commands.\n");
2530 vmw_cmdbuf_header_free(*header);
2531 *header = NULL;
2532 return ERR_PTR(-EFAULT);
2533 }
2534
2535 return kernel_commands;
2536}
18e4a466 2537
922ade0d
TH
2538int vmw_execbuf_process(struct drm_file *file_priv,
2539 struct vmw_private *dev_priv,
2540 void __user *user_commands,
2541 void *kernel_commands,
2542 uint32_t command_size,
2543 uint64_t throttle_us,
bb1bd2f4
JB
2544 struct drm_vmw_fence_rep __user *user_fence_rep,
2545 struct vmw_fence_obj **out_fence)
fb1d9738 2546{
fb1d9738 2547 struct vmw_sw_context *sw_context = &dev_priv->ctx;
bb1bd2f4 2548 struct vmw_fence_obj *fence = NULL;
c0951b79
TH
2549 struct vmw_resource *error_resource;
2550 struct list_head resource_list;
3eab3d9e 2551 struct vmw_cmdbuf_header *header;
ecff665f 2552 struct ww_acquire_ctx ticket;
ae2a1040 2553 uint32_t handle;
922ade0d 2554 int ret;
fb1d9738 2555
3eab3d9e
TH
2556 if (throttle_us) {
2557 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2558 throttle_us);
2559
2560 if (ret)
2561 return ret;
2562 }
2563
2564 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
2565 kernel_commands, command_size,
2566 &header);
2567 if (IS_ERR(kernel_commands))
2568 return PTR_ERR(kernel_commands);
2569
922ade0d 2570 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3eab3d9e
TH
2571 if (ret) {
2572 ret = -ERESTARTSYS;
2573 goto out_free_header;
2574 }
fb1d9738 2575
3eab3d9e 2576 sw_context->kernel = false;
922ade0d 2577 if (kernel_commands == NULL) {
922ade0d
TH
2578 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2579 if (unlikely(ret != 0))
2580 goto out_unlock;
fb1d9738 2581
fb1d9738 2582
922ade0d
TH
2583 ret = copy_from_user(sw_context->cmd_bounce,
2584 user_commands, command_size);
2585
2586 if (unlikely(ret != 0)) {
2587 ret = -EFAULT;
2588 DRM_ERROR("Failed copying commands.\n");
2589 goto out_unlock;
2590 }
2591 kernel_commands = sw_context->cmd_bounce;
3eab3d9e 2592 } else if (!header)
922ade0d 2593 sw_context->kernel = true;
fb1d9738 2594
d5bde956 2595 sw_context->fp = vmw_fpriv(file_priv);
fb1d9738
JB
2596 sw_context->cur_reloc = 0;
2597 sw_context->cur_val_buf = 0;
f18c8840 2598 INIT_LIST_HEAD(&sw_context->resource_list);
e2fa3a76 2599 sw_context->cur_query_bo = dev_priv->pinned_bo;
c0951b79
TH
2600 sw_context->last_query_ctx = NULL;
2601 sw_context->needs_post_query_barrier = false;
2602 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
fb1d9738 2603 INIT_LIST_HEAD(&sw_context->validate_nodes);
c0951b79
TH
2604 INIT_LIST_HEAD(&sw_context->res_relocations);
2605 if (!sw_context->res_ht_initialized) {
2606 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2607 if (unlikely(ret != 0))
2608 goto out_unlock;
2609 sw_context->res_ht_initialized = true;
2610 }
18e4a466 2611 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
c0951b79 2612 INIT_LIST_HEAD(&resource_list);
922ade0d
TH
2613 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2614 command_size);
fb1d9738 2615 if (unlikely(ret != 0))
cf5e3413 2616 goto out_err_nores;
be38ab6e 2617
c0951b79
TH
2618 ret = vmw_resources_reserve(sw_context);
2619 if (unlikely(ret != 0))
cf5e3413 2620 goto out_err_nores;
c0951b79 2621
aa35071c
CK
2622 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
2623 true, NULL);
fb1d9738
JB
2624 if (unlikely(ret != 0))
2625 goto out_err;
2626
2627 ret = vmw_validate_buffers(dev_priv, sw_context);
2628 if (unlikely(ret != 0))
2629 goto out_err;
2630
c0951b79
TH
2631 ret = vmw_resources_validate(sw_context);
2632 if (unlikely(ret != 0))
2633 goto out_err;
1925d456 2634
173fb7d4
TH
2635 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2636 if (unlikely(ret != 0)) {
2637 ret = -ERESTARTSYS;
2638 goto out_err;
2639 }
2640
30f82d81
TH
2641 if (dev_priv->has_mob) {
2642 ret = vmw_rebind_contexts(sw_context);
2643 if (unlikely(ret != 0))
b2ad9881 2644 goto out_unlock_binding;
30f82d81
TH
2645 }
2646
3eab3d9e
TH
2647 if (!header) {
2648 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
2649 command_size, sw_context);
2650 } else {
2651 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
2652 sw_context);
2653 header = NULL;
1925d456 2654 }
3eab3d9e
TH
2655 if (ret)
2656 goto out_unlock_binding;
fb1d9738 2657
e2fa3a76 2658 vmw_query_bo_switch_commit(dev_priv, sw_context);
ae2a1040
TH
2659 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2660 &fence,
2661 (user_fence_rep) ? &handle : NULL);
fb1d9738
JB
2662 /*
2663 * This error is harmless, because if fence submission fails,
ae2a1040
TH
2664 * vmw_fifo_send_fence will sync. The error will be propagated to
2665 * user-space in @fence_rep
fb1d9738
JB
2666 */
2667
2668 if (ret != 0)
2669 DRM_ERROR("Fence submission error. Syncing.\n");
2670
c0951b79 2671 vmw_resource_list_unreserve(&sw_context->resource_list, false);
173fb7d4
TH
2672 mutex_unlock(&dev_priv->binding_mutex);
2673
ecff665f 2674 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
ae2a1040 2675 (void *) fence);
fb1d9738 2676
c0951b79
TH
2677 if (unlikely(dev_priv->pinned_bo != NULL &&
2678 !dev_priv->query_cid_valid))
2679 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2680
ae2a1040 2681 vmw_clear_validations(sw_context);
8bf445ce
TH
2682 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2683 user_fence_rep, fence, handle);
fb1d9738 2684
bb1bd2f4
JB
2685 /* Don't unreference when handing fence out */
2686 if (unlikely(out_fence != NULL)) {
2687 *out_fence = fence;
2688 fence = NULL;
2689 } else if (likely(fence != NULL)) {
ae2a1040 2690 vmw_fence_obj_unreference(&fence);
bb1bd2f4 2691 }
fb1d9738 2692
c0951b79 2693 list_splice_init(&sw_context->resource_list, &resource_list);
18e4a466 2694 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
922ade0d 2695 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
2696
2697 /*
2698 * Unreference resources outside of the cmdbuf_mutex to
2699 * avoid deadlocks in resource destruction paths.
2700 */
2701 vmw_resource_list_unreference(&resource_list);
2702
fb1d9738 2703 return 0;
922ade0d 2704
173fb7d4
TH
2705out_unlock_binding:
2706 mutex_unlock(&dev_priv->binding_mutex);
fb1d9738 2707out_err:
ecff665f 2708 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
cf5e3413 2709out_err_nores:
c0951b79 2710 vmw_resource_list_unreserve(&sw_context->resource_list, true);
cf5e3413
TH
2711 vmw_resource_relocations_free(&sw_context->res_relocations);
2712 vmw_free_relocations(sw_context);
fb1d9738 2713 vmw_clear_validations(sw_context);
c0951b79
TH
2714 if (unlikely(dev_priv->pinned_bo != NULL &&
2715 !dev_priv->query_cid_valid))
2716 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
fb1d9738 2717out_unlock:
c0951b79
TH
2718 list_splice_init(&sw_context->resource_list, &resource_list);
2719 error_resource = sw_context->error_resource;
2720 sw_context->error_resource = NULL;
18e4a466 2721 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
fb1d9738 2722 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
2723
2724 /*
2725 * Unreference resources outside of the cmdbuf_mutex to
2726 * avoid deadlocks in resource destruction paths.
2727 */
2728 vmw_resource_list_unreference(&resource_list);
2729 if (unlikely(error_resource != NULL))
2730 vmw_resource_unreference(&error_resource);
3eab3d9e
TH
2731out_free_header:
2732 if (header)
2733 vmw_cmdbuf_header_free(header);
c0951b79 2734
922ade0d
TH
2735 return ret;
2736}
2737
e2fa3a76
TH
2738/**
2739 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2740 *
2741 * @dev_priv: The device private structure.
2742 *
2743 * This function is called to idle the fifo and unpin the query buffer
2744 * if the normal way to do this hits an error, which should typically be
2745 * extremely rare.
2746 */
2747static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2748{
2749 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2750
2751 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
459d0fa7
TH
2752 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2753 if (dev_priv->dummy_query_bo_pinned) {
2754 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
2755 dev_priv->dummy_query_bo_pinned = false;
2756 }
e2fa3a76
TH
2757}
2758
2759
2760/**
c0951b79 2761 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
e2fa3a76
TH
2762 * query bo.
2763 *
2764 * @dev_priv: The device private structure.
c0951b79
TH
2765 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2766 * _after_ a query barrier that flushes all queries touching the current
2767 * buffer pointed to by @dev_priv->pinned_bo
e2fa3a76
TH
2768 *
2769 * This function should be used to unpin the pinned query bo, or
2770 * as a query barrier when we need to make sure that all queries have
2771 * finished before the next fifo command. (For example on hardware
2772 * context destructions where the hardware may otherwise leak unfinished
2773 * queries).
2774 *
2775 * This function does not return any failure codes, but make attempts
2776 * to do safe unpinning in case of errors.
2777 *
2778 * The function will synchronize on the previous query barrier, and will
2779 * thus not finish until that barrier has executed.
c0951b79
TH
2780 *
2781 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2782 * before calling this function.
e2fa3a76 2783 */
c0951b79
TH
2784void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2785 struct vmw_fence_obj *fence)
e2fa3a76
TH
2786{
2787 int ret = 0;
2788 struct list_head validate_list;
2789 struct ttm_validate_buffer pinned_val, query_val;
c0951b79 2790 struct vmw_fence_obj *lfence = NULL;
ecff665f 2791 struct ww_acquire_ctx ticket;
e2fa3a76
TH
2792
2793 if (dev_priv->pinned_bo == NULL)
2794 goto out_unlock;
2795
e2fa3a76
TH
2796 INIT_LIST_HEAD(&validate_list);
2797
459d0fa7 2798 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
ae9c0af2 2799 pinned_val.shared = false;
e2fa3a76
TH
2800 list_add_tail(&pinned_val.head, &validate_list);
2801
459d0fa7 2802 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
ae9c0af2 2803 query_val.shared = false;
e2fa3a76
TH
2804 list_add_tail(&query_val.head, &validate_list);
2805
aa35071c
CK
2806 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
2807 false, NULL);
e2fa3a76
TH
2808 if (unlikely(ret != 0)) {
2809 vmw_execbuf_unpin_panic(dev_priv);
2810 goto out_no_reserve;
2811 }
2812
c0951b79
TH
2813 if (dev_priv->query_cid_valid) {
2814 BUG_ON(fence != NULL);
2815 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2816 if (unlikely(ret != 0)) {
2817 vmw_execbuf_unpin_panic(dev_priv);
2818 goto out_no_emit;
2819 }
2820 dev_priv->query_cid_valid = false;
e2fa3a76
TH
2821 }
2822
459d0fa7
TH
2823 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2824 if (dev_priv->dummy_query_bo_pinned) {
2825 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
2826 dev_priv->dummy_query_bo_pinned = false;
2827 }
c0951b79
TH
2828 if (fence == NULL) {
2829 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2830 NULL);
2831 fence = lfence;
2832 }
ecff665f 2833 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
c0951b79
TH
2834 if (lfence != NULL)
2835 vmw_fence_obj_unreference(&lfence);
e2fa3a76
TH
2836
2837 ttm_bo_unref(&query_val.bo);
2838 ttm_bo_unref(&pinned_val.bo);
459d0fa7
TH
2839 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
2840 DRM_INFO("Dummy query bo pin count: %d\n",
2841 dev_priv->dummy_query_bo->pin_count);
e2fa3a76
TH
2842
2843out_unlock:
e2fa3a76
TH
2844 return;
2845
2846out_no_emit:
ecff665f 2847 ttm_eu_backoff_reservation(&ticket, &validate_list);
e2fa3a76
TH
2848out_no_reserve:
2849 ttm_bo_unref(&query_val.bo);
2850 ttm_bo_unref(&pinned_val.bo);
459d0fa7 2851 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
c0951b79
TH
2852}
2853
2854/**
2855 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2856 * query bo.
2857 *
2858 * @dev_priv: The device private structure.
2859 *
2860 * This function should be used to unpin the pinned query bo, or
2861 * as a query barrier when we need to make sure that all queries have
2862 * finished before the next fifo command. (For example on hardware
2863 * context destructions where the hardware may otherwise leak unfinished
2864 * queries).
2865 *
2866 * This function does not return any failure codes, but make attempts
2867 * to do safe unpinning in case of errors.
2868 *
2869 * The function will synchronize on the previous query barrier, and will
2870 * thus not finish until that barrier has executed.
2871 */
2872void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2873{
2874 mutex_lock(&dev_priv->cmdbuf_mutex);
2875 if (dev_priv->query_cid_valid)
2876 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
e2fa3a76
TH
2877 mutex_unlock(&dev_priv->cmdbuf_mutex);
2878}
2879
922ade0d
TH
2880
2881int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2882 struct drm_file *file_priv)
2883{
2884 struct vmw_private *dev_priv = vmw_priv(dev);
2885 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
922ade0d
TH
2886 int ret;
2887
2888 /*
2889 * This will allow us to extend the ioctl argument while
2890 * maintaining backwards compatibility:
2891 * We take different code paths depending on the value of
2892 * arg->version.
2893 */
2894
2895 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2896 DRM_ERROR("Incorrect execbuf version.\n");
2897 DRM_ERROR("You're running outdated experimental "
2898 "vmwgfx user-space drivers.");
2899 return -EINVAL;
2900 }
2901
294adf7d 2902 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
922ade0d
TH
2903 if (unlikely(ret != 0))
2904 return ret;
2905
2906 ret = vmw_execbuf_process(file_priv, dev_priv,
2907 (void __user *)(unsigned long)arg->commands,
2908 NULL, arg->command_size, arg->throttle_us,
bb1bd2f4
JB
2909 (void __user *)(unsigned long)arg->fence_rep,
2910 NULL);
5151adb3 2911 ttm_read_unlock(&dev_priv->reservation_sem);
922ade0d 2912 if (unlikely(ret != 0))
5151adb3 2913 return ret;
922ade0d
TH
2914
2915 vmw_kms_cursor_post_execbuf(dev_priv);
2916
5151adb3 2917 return 0;
fb1d9738 2918}