drm/vmwgfx: Command parser fixes for DX
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
760285e7
DH
30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
d80efd5c
TH
32#include "vmwgfx_so.h"
33#include "vmwgfx_binding.h"
fb1d9738 34
c0951b79
TH
35#define VMW_RES_HT_ORDER 12
36
37/**
38 * struct vmw_resource_relocation - Relocation info for resources
39 *
40 * @head: List head for the software context's relocation list.
41 * @res: Non-ref-counted pointer to the resource.
42 * @offset: Offset of 4 byte entries into the command buffer where the
43 * id that needs fixup is located.
44 */
45struct vmw_resource_relocation {
46 struct list_head head;
47 const struct vmw_resource *res;
48 unsigned long offset;
49};
50
51/**
52 * struct vmw_resource_val_node - Validation info for resources
53 *
54 * @head: List head for the software context's resource list.
55 * @hash: Hash entry for quick resouce to val_node lookup.
56 * @res: Ref-counted pointer to the resource.
57 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
58 * @new_backup: Refcounted pointer to the new backup buffer.
b5c3b1a6
TH
59 * @staged_bindings: If @res is a context, tracks bindings set up during
60 * the command batch. Otherwise NULL.
c0951b79
TH
61 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
62 * @first_usage: Set to true the first time the resource is referenced in
63 * the command stream.
d80efd5c
TH
64 * @switching_backup: The command stream provides a new backup buffer for a
65 * resource.
66 * @no_buffer_needed: This means @switching_backup is true on first buffer
67 * reference. So resource reservation does not need to allocate a backup
68 * buffer for the resource.
c0951b79
TH
69 */
70struct vmw_resource_val_node {
71 struct list_head head;
72 struct drm_hash_item hash;
73 struct vmw_resource *res;
74 struct vmw_dma_buffer *new_backup;
b5c3b1a6 75 struct vmw_ctx_binding_state *staged_bindings;
c0951b79 76 unsigned long new_backup_offset;
d80efd5c
TH
77 u32 first_usage : 1;
78 u32 switching_backup : 1;
79 u32 no_buffer_needed : 1;
c0951b79
TH
80};
81
c373d4ea
TH
82/**
83 * struct vmw_cmd_entry - Describe a command for the verifier
84 *
85 * @user_allow: Whether allowed from the execbuf ioctl.
86 * @gb_disable: Whether disabled if guest-backed objects are available.
87 * @gb_enable: Whether enabled iff guest-backed objects are available.
88 */
89struct vmw_cmd_entry {
90 int (*func) (struct vmw_private *, struct vmw_sw_context *,
91 SVGA3dCmdHeader *);
92 bool user_allow;
93 bool gb_disable;
94 bool gb_enable;
95};
96
97#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
98 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
99 (_gb_disable), (_gb_enable)}
100
d80efd5c
TH
101static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102 struct vmw_sw_context *sw_context,
103 struct vmw_resource *ctx);
104
c0951b79
TH
105/**
106 * vmw_resource_unreserve - unreserve resources previously reserved for
107 * command submission.
108 *
109 * @list_head: list of resources to unreserve.
110 * @backoff: Whether command submission failed.
111 */
d80efd5c
TH
112static void vmw_resource_list_unreserve(struct vmw_sw_context *sw_context,
113 struct list_head *list,
c0951b79
TH
114 bool backoff)
115{
116 struct vmw_resource_val_node *val;
117
118 list_for_each_entry(val, list, head) {
119 struct vmw_resource *res = val->res;
d80efd5c
TH
120 bool switch_backup =
121 (backoff) ? false : val->switching_backup;
c0951b79 122
173fb7d4
TH
123 /*
124 * Transfer staged context bindings to the
125 * persistent context binding tracker.
126 */
b5c3b1a6 127 if (unlikely(val->staged_bindings)) {
76c7d18b 128 if (!backoff) {
d80efd5c
TH
129 vmw_binding_state_commit
130 (vmw_context_binding_state(val->res),
131 val->staged_bindings);
76c7d18b 132 }
d80efd5c
TH
133
134 if (val->staged_bindings != sw_context->staged_bindings)
135 vmw_binding_state_free(val->staged_bindings);
136 else
137 sw_context->staged_bindings_inuse = false;
b5c3b1a6
TH
138 val->staged_bindings = NULL;
139 }
d80efd5c
TH
140 vmw_resource_unreserve(res, switch_backup, val->new_backup,
141 val->new_backup_offset);
c0951b79
TH
142 vmw_dmabuf_unreference(&val->new_backup);
143 }
144}
145
d80efd5c
TH
146/**
147 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
148 * added to the validate list.
149 *
150 * @dev_priv: Pointer to the device private:
151 * @sw_context: The validation context:
152 * @node: The validation node holding this context.
153 */
154static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
155 struct vmw_sw_context *sw_context,
156 struct vmw_resource_val_node *node)
157{
158 int ret;
159
160 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
161 if (unlikely(ret != 0))
162 goto out_err;
163
164 if (!sw_context->staged_bindings) {
165 sw_context->staged_bindings =
166 vmw_binding_state_alloc(dev_priv);
167 if (IS_ERR(sw_context->staged_bindings)) {
168 DRM_ERROR("Failed to allocate context binding "
169 "information.\n");
170 ret = PTR_ERR(sw_context->staged_bindings);
171 sw_context->staged_bindings = NULL;
172 goto out_err;
173 }
174 }
175
176 if (sw_context->staged_bindings_inuse) {
177 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
178 if (IS_ERR(node->staged_bindings)) {
179 DRM_ERROR("Failed to allocate context binding "
180 "information.\n");
181 ret = PTR_ERR(node->staged_bindings);
182 node->staged_bindings = NULL;
183 goto out_err;
184 }
185 } else {
186 node->staged_bindings = sw_context->staged_bindings;
187 sw_context->staged_bindings_inuse = true;
188 }
189
190 return 0;
191out_err:
192 return ret;
193}
c0951b79
TH
194
195/**
196 * vmw_resource_val_add - Add a resource to the software context's
197 * resource list if it's not already on it.
198 *
199 * @sw_context: Pointer to the software context.
200 * @res: Pointer to the resource.
201 * @p_node On successful return points to a valid pointer to a
202 * struct vmw_resource_val_node, if non-NULL on entry.
203 */
204static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
205 struct vmw_resource *res,
206 struct vmw_resource_val_node **p_node)
207{
d80efd5c 208 struct vmw_private *dev_priv = res->dev_priv;
c0951b79
TH
209 struct vmw_resource_val_node *node;
210 struct drm_hash_item *hash;
211 int ret;
212
213 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
214 &hash) == 0)) {
215 node = container_of(hash, struct vmw_resource_val_node, hash);
216 node->first_usage = false;
217 if (unlikely(p_node != NULL))
218 *p_node = node;
219 return 0;
220 }
221
222 node = kzalloc(sizeof(*node), GFP_KERNEL);
223 if (unlikely(node == NULL)) {
224 DRM_ERROR("Failed to allocate a resource validation "
225 "entry.\n");
226 return -ENOMEM;
227 }
228
229 node->hash.key = (unsigned long) res;
230 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
231 if (unlikely(ret != 0)) {
232 DRM_ERROR("Failed to initialize a resource validation "
233 "entry.\n");
234 kfree(node);
235 return ret;
236 }
c0951b79
TH
237 node->res = vmw_resource_reference(res);
238 node->first_usage = true;
c0951b79
TH
239 if (unlikely(p_node != NULL))
240 *p_node = node;
241
d80efd5c
TH
242 if (!dev_priv->has_mob) {
243 list_add_tail(&node->head, &sw_context->resource_list);
244 return 0;
245 }
246
247 switch (vmw_res_type(res)) {
248 case vmw_res_context:
249 case vmw_res_dx_context:
250 list_add(&node->head, &sw_context->ctx_resource_list);
251 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
252 break;
253 case vmw_res_cotable:
254 list_add_tail(&node->head, &sw_context->ctx_resource_list);
255 break;
256 default:
257 list_add_tail(&node->head, &sw_context->resource_list);
258 break;
259 }
260
261 return ret;
262}
263
264/**
265 * vmw_view_res_val_add - Add a view and the surface it's pointing to
266 * to the validation list
267 *
268 * @sw_context: The software context holding the validation list.
269 * @view: Pointer to the view resource.
270 *
271 * Returns 0 if success, negative error code otherwise.
272 */
273static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
274 struct vmw_resource *view)
275{
276 int ret;
277
278 /*
279 * First add the resource the view is pointing to, otherwise
280 * it may be swapped out when the view is validated.
281 */
282 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
283 if (ret)
284 return ret;
285
286 return vmw_resource_val_add(sw_context, view, NULL);
287}
288
289/**
290 * vmw_view_id_val_add - Look up a view and add it and the surface it's
291 * pointing to to the validation list.
292 *
293 * @sw_context: The software context holding the validation list.
294 * @view_type: The view type to look up.
295 * @id: view id of the view.
296 *
297 * The view is represented by a view id and the DX context it's created on,
298 * or scheduled for creation on. If there is no DX context set, the function
299 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
300 */
301static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
302 enum vmw_view_type view_type, u32 id)
303{
304 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
305 struct vmw_resource *view;
306 int ret;
307
308 if (!ctx_node) {
309 DRM_ERROR("DX Context not set.\n");
310 return -EINVAL;
311 }
312
313 view = vmw_view_lookup(sw_context->man, view_type, id);
314 if (IS_ERR(view))
315 return PTR_ERR(view);
316
317 ret = vmw_view_res_val_add(sw_context, view);
318 vmw_resource_unreference(&view);
319
320 return ret;
c0951b79
TH
321}
322
30f82d81
TH
323/**
324 * vmw_resource_context_res_add - Put resources previously bound to a context on
325 * the validation list
326 *
327 * @dev_priv: Pointer to a device private structure
328 * @sw_context: Pointer to a software context used for this command submission
329 * @ctx: Pointer to the context resource
330 *
331 * This function puts all resources that were previously bound to @ctx on
332 * the resource validation list. This is part of the context state reemission
333 */
334static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
335 struct vmw_sw_context *sw_context,
336 struct vmw_resource *ctx)
337{
338 struct list_head *binding_list;
d80efd5c 339 struct vmw_ctx_bindinfo *entry;
30f82d81
TH
340 int ret = 0;
341 struct vmw_resource *res;
d80efd5c
TH
342 u32 i;
343
344 /* Add all cotables to the validation list. */
345 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
346 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
347 res = vmw_context_cotable(ctx, i);
348 if (IS_ERR(res))
349 continue;
350
351 ret = vmw_resource_val_add(sw_context, res, NULL);
352 vmw_resource_unreference(&res);
353 if (unlikely(ret != 0))
354 return ret;
355 }
356 }
357
30f82d81 358
d80efd5c 359 /* Add all resources bound to the context to the validation list */
30f82d81
TH
360 mutex_lock(&dev_priv->binding_mutex);
361 binding_list = vmw_context_binding_list(ctx);
362
363 list_for_each_entry(entry, binding_list, ctx_list) {
d80efd5c
TH
364 /* entry->res is not refcounted */
365 res = vmw_resource_reference_unless_doomed(entry->res);
30f82d81
TH
366 if (unlikely(res == NULL))
367 continue;
368
d80efd5c
TH
369 if (vmw_res_type(entry->res) == vmw_res_view)
370 ret = vmw_view_res_val_add(sw_context, entry->res);
371 else
372 ret = vmw_resource_val_add(sw_context, entry->res,
373 NULL);
30f82d81
TH
374 vmw_resource_unreference(&res);
375 if (unlikely(ret != 0))
376 break;
377 }
378
379 mutex_unlock(&dev_priv->binding_mutex);
380 return ret;
381}
382
c0951b79
TH
383/**
384 * vmw_resource_relocation_add - Add a relocation to the relocation list
385 *
386 * @list: Pointer to head of relocation list.
387 * @res: The resource.
388 * @offset: Offset into the command buffer currently being parsed where the
389 * id that needs fixup is located. Granularity is 4 bytes.
390 */
391static int vmw_resource_relocation_add(struct list_head *list,
392 const struct vmw_resource *res,
393 unsigned long offset)
394{
395 struct vmw_resource_relocation *rel;
396
397 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
398 if (unlikely(rel == NULL)) {
399 DRM_ERROR("Failed to allocate a resource relocation.\n");
400 return -ENOMEM;
401 }
402
403 rel->res = res;
404 rel->offset = offset;
405 list_add_tail(&rel->head, list);
406
407 return 0;
408}
409
410/**
411 * vmw_resource_relocations_free - Free all relocations on a list
412 *
413 * @list: Pointer to the head of the relocation list.
414 */
415static void vmw_resource_relocations_free(struct list_head *list)
416{
417 struct vmw_resource_relocation *rel, *n;
418
419 list_for_each_entry_safe(rel, n, list, head) {
420 list_del(&rel->head);
421 kfree(rel);
422 }
423}
424
425/**
426 * vmw_resource_relocations_apply - Apply all relocations on a list
427 *
428 * @cb: Pointer to the start of the command buffer bein patch. This need
429 * not be the same buffer as the one being parsed when the relocation
430 * list was built, but the contents must be the same modulo the
431 * resource ids.
432 * @list: Pointer to the head of the relocation list.
433 */
434static void vmw_resource_relocations_apply(uint32_t *cb,
435 struct list_head *list)
436{
437 struct vmw_resource_relocation *rel;
438
d5bde956
TH
439 list_for_each_entry(rel, list, head) {
440 if (likely(rel->res != NULL))
441 cb[rel->offset] = rel->res->id;
442 else
443 cb[rel->offset] = SVGA_3D_CMD_NOP;
444 }
c0951b79
TH
445}
446
fb1d9738
JB
447static int vmw_cmd_invalid(struct vmw_private *dev_priv,
448 struct vmw_sw_context *sw_context,
449 SVGA3dCmdHeader *header)
450{
451 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
452}
453
454static int vmw_cmd_ok(struct vmw_private *dev_priv,
455 struct vmw_sw_context *sw_context,
456 SVGA3dCmdHeader *header)
457{
458 return 0;
459}
460
e2fa3a76
TH
461/**
462 * vmw_bo_to_validate_list - add a bo to a validate list
463 *
464 * @sw_context: The software context used for this command submission batch.
465 * @bo: The buffer object to add.
96c5f0df 466 * @validate_as_mob: Validate this buffer as a MOB.
e2fa3a76
TH
467 * @p_val_node: If non-NULL Will be updated with the validate node number
468 * on return.
469 *
470 * Returns -EINVAL if the limit of number of buffer objects per command
471 * submission is reached.
472 */
473static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
459d0fa7 474 struct vmw_dma_buffer *vbo,
96c5f0df 475 bool validate_as_mob,
e2fa3a76
TH
476 uint32_t *p_val_node)
477{
478 uint32_t val_node;
c0951b79 479 struct vmw_validate_buffer *vval_buf;
e2fa3a76 480 struct ttm_validate_buffer *val_buf;
c0951b79
TH
481 struct drm_hash_item *hash;
482 int ret;
e2fa3a76 483
459d0fa7 484 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
c0951b79
TH
485 &hash) == 0)) {
486 vval_buf = container_of(hash, struct vmw_validate_buffer,
487 hash);
96c5f0df
TH
488 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
489 DRM_ERROR("Inconsistent buffer usage.\n");
490 return -EINVAL;
491 }
c0951b79
TH
492 val_buf = &vval_buf->base;
493 val_node = vval_buf - sw_context->val_bufs;
494 } else {
495 val_node = sw_context->cur_val_buf;
496 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
497 DRM_ERROR("Max number of DMA buffers per submission "
498 "exceeded.\n");
499 return -EINVAL;
500 }
501 vval_buf = &sw_context->val_bufs[val_node];
459d0fa7 502 vval_buf->hash.key = (unsigned long) vbo;
c0951b79
TH
503 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
504 if (unlikely(ret != 0)) {
505 DRM_ERROR("Failed to initialize a buffer validation "
506 "entry.\n");
507 return ret;
508 }
509 ++sw_context->cur_val_buf;
510 val_buf = &vval_buf->base;
459d0fa7 511 val_buf->bo = ttm_bo_reference(&vbo->base);
ae9c0af2 512 val_buf->shared = false;
e2fa3a76 513 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
96c5f0df 514 vval_buf->validate_as_mob = validate_as_mob;
e2fa3a76
TH
515 }
516
e2fa3a76
TH
517 if (p_val_node)
518 *p_val_node = val_node;
519
520 return 0;
521}
522
c0951b79
TH
523/**
524 * vmw_resources_reserve - Reserve all resources on the sw_context's
525 * resource list.
526 *
527 * @sw_context: Pointer to the software context.
528 *
529 * Note that since vmware's command submission currently is protected by
530 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
531 * since only a single thread at once will attempt this.
532 */
533static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
fb1d9738 534{
c0951b79 535 struct vmw_resource_val_node *val;
fb1d9738
JB
536 int ret;
537
c0951b79
TH
538 list_for_each_entry(val, &sw_context->resource_list, head) {
539 struct vmw_resource *res = val->res;
fb1d9738 540
1a4b172a 541 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
c0951b79
TH
542 if (unlikely(ret != 0))
543 return ret;
544
545 if (res->backup) {
459d0fa7 546 struct vmw_dma_buffer *vbo = res->backup;
c0951b79
TH
547
548 ret = vmw_bo_to_validate_list
459d0fa7 549 (sw_context, vbo,
96c5f0df 550 vmw_resource_needs_backup(res), NULL);
c0951b79
TH
551
552 if (unlikely(ret != 0))
553 return ret;
554 }
fb1d9738 555 }
2f633e5e 556
c0951b79
TH
557 return 0;
558}
fb1d9738 559
c0951b79
TH
560/**
561 * vmw_resources_validate - Validate all resources on the sw_context's
562 * resource list.
563 *
564 * @sw_context: Pointer to the software context.
565 *
566 * Before this function is called, all resource backup buffers must have
567 * been validated.
568 */
569static int vmw_resources_validate(struct vmw_sw_context *sw_context)
570{
571 struct vmw_resource_val_node *val;
572 int ret;
573
574 list_for_each_entry(val, &sw_context->resource_list, head) {
575 struct vmw_resource *res = val->res;
d80efd5c 576 struct vmw_dma_buffer *backup = res->backup;
f18c8840 577
c0951b79
TH
578 ret = vmw_resource_validate(res);
579 if (unlikely(ret != 0)) {
580 if (ret != -ERESTARTSYS)
581 DRM_ERROR("Failed to validate resource.\n");
582 return ret;
583 }
d80efd5c
TH
584
585 /* Check if the resource switched backup buffer */
586 if (backup && res->backup && (backup != res->backup)) {
587 struct vmw_dma_buffer *vbo = res->backup;
588
589 ret = vmw_bo_to_validate_list
590 (sw_context, vbo,
591 vmw_resource_needs_backup(res), NULL);
592 if (ret) {
593 ttm_bo_unreserve(&vbo->base);
594 return ret;
595 }
596 }
c0951b79 597 }
f18c8840 598 return 0;
fb1d9738
JB
599}
600
18e4a466
TH
601/**
602 * vmw_cmd_res_reloc_add - Add a resource to a software context's
603 * relocation- and validation lists.
604 *
605 * @dev_priv: Pointer to a struct vmw_private identifying the device.
606 * @sw_context: Pointer to the software context.
18e4a466
TH
607 * @id_loc: Pointer to where the id that needs translation is located.
608 * @res: Valid pointer to a struct vmw_resource.
609 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
610 * used for this resource is returned here.
611 */
612static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
613 struct vmw_sw_context *sw_context,
18e4a466
TH
614 uint32_t *id_loc,
615 struct vmw_resource *res,
616 struct vmw_resource_val_node **p_val)
617{
618 int ret;
619 struct vmw_resource_val_node *node;
620
621 *p_val = NULL;
622 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
623 res,
624 id_loc - sw_context->buf_start);
625 if (unlikely(ret != 0))
9f9cb84f 626 return ret;
18e4a466
TH
627
628 ret = vmw_resource_val_add(sw_context, res, &node);
629 if (unlikely(ret != 0))
9f9cb84f 630 return ret;
18e4a466 631
18e4a466
TH
632 if (p_val)
633 *p_val = node;
634
9f9cb84f 635 return 0;
18e4a466
TH
636}
637
638
c0951b79 639/**
18e4a466 640 * vmw_cmd_res_check - Check that a resource is present and if so, put it
c0951b79
TH
641 * on the resource validate list unless it's already there.
642 *
643 * @dev_priv: Pointer to a device private structure.
644 * @sw_context: Pointer to the software context.
645 * @res_type: Resource type.
646 * @converter: User-space visisble type specific information.
d5bde956 647 * @id_loc: Pointer to the location in the command buffer currently being
c0951b79 648 * parsed from where the user-space resource id handle is located.
d5bde956
TH
649 * @p_val: Pointer to pointer to resource validalidation node. Populated
650 * on exit.
c0951b79 651 */
d5bde956 652static int
18e4a466
TH
653vmw_cmd_res_check(struct vmw_private *dev_priv,
654 struct vmw_sw_context *sw_context,
655 enum vmw_res_type res_type,
656 const struct vmw_user_resource_conv *converter,
657 uint32_t *id_loc,
658 struct vmw_resource_val_node **p_val)
fb1d9738 659{
c0951b79
TH
660 struct vmw_res_cache_entry *rcache =
661 &sw_context->res_cache[res_type];
be38ab6e 662 struct vmw_resource *res;
c0951b79
TH
663 struct vmw_resource_val_node *node;
664 int ret;
be38ab6e 665
18e4a466 666 if (*id_loc == SVGA3D_INVALID_ID) {
b5c3b1a6
TH
667 if (p_val)
668 *p_val = NULL;
669 if (res_type == vmw_res_context) {
670 DRM_ERROR("Illegal context invalid id.\n");
671 return -EINVAL;
672 }
7a73ba74 673 return 0;
b5c3b1a6 674 }
7a73ba74 675
c0951b79
TH
676 /*
677 * Fastpath in case of repeated commands referencing the same
678 * resource
679 */
7a73ba74 680
18e4a466 681 if (likely(rcache->valid && *id_loc == rcache->handle)) {
c0951b79
TH
682 const struct vmw_resource *res = rcache->res;
683
684 rcache->node->first_usage = false;
685 if (p_val)
686 *p_val = rcache->node;
687
688 return vmw_resource_relocation_add
689 (&sw_context->res_relocations, res,
d5bde956 690 id_loc - sw_context->buf_start);
be38ab6e
TH
691 }
692
c0951b79 693 ret = vmw_user_resource_lookup_handle(dev_priv,
d5bde956 694 sw_context->fp->tfile,
18e4a466 695 *id_loc,
c0951b79
TH
696 converter,
697 &res);
5bb39e81 698 if (unlikely(ret != 0)) {
c0951b79 699 DRM_ERROR("Could not find or use resource 0x%08x.\n",
18e4a466 700 (unsigned) *id_loc);
c0951b79 701 dump_stack();
5bb39e81
TH
702 return ret;
703 }
704
c0951b79
TH
705 rcache->valid = true;
706 rcache->res = res;
18e4a466 707 rcache->handle = *id_loc;
c0951b79 708
d80efd5c 709 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
18e4a466 710 res, &node);
c0951b79
TH
711 if (unlikely(ret != 0))
712 goto out_no_reloc;
f18c8840 713
c0951b79
TH
714 rcache->node = node;
715 if (p_val)
716 *p_val = node;
717 vmw_resource_unreference(&res);
f18c8840 718 return 0;
c0951b79
TH
719
720out_no_reloc:
721 BUG_ON(sw_context->error_resource != NULL);
722 sw_context->error_resource = res;
723
724 return ret;
fb1d9738
JB
725}
726
30f82d81
TH
727/**
728 * vmw_rebind_contexts - Rebind all resources previously bound to
729 * referenced contexts.
730 *
731 * @sw_context: Pointer to the software context.
732 *
733 * Rebind context binding points that have been scrubbed because of eviction.
734 */
735static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
736{
737 struct vmw_resource_val_node *val;
738 int ret;
739
740 list_for_each_entry(val, &sw_context->resource_list, head) {
18e4a466
TH
741 if (unlikely(!val->staged_bindings))
742 break;
30f82d81 743
d80efd5c
TH
744 ret = vmw_binding_rebind_all
745 (vmw_context_binding_state(val->res));
30f82d81
TH
746 if (unlikely(ret != 0)) {
747 if (ret != -ERESTARTSYS)
748 DRM_ERROR("Failed to rebind context.\n");
749 return ret;
750 }
751 }
752
753 return 0;
754}
755
d80efd5c
TH
756/**
757 * vmw_view_bindings_add - Add an array of view bindings to a context
758 * binding state tracker.
759 *
760 * @sw_context: The execbuf state used for this command.
761 * @view_type: View type for the bindings.
762 * @binding_type: Binding type for the bindings.
763 * @shader_slot: The shader slot to user for the bindings.
764 * @view_ids: Array of view ids to be bound.
765 * @num_views: Number of view ids in @view_ids.
766 * @first_slot: The binding slot to be used for the first view id in @view_ids.
767 */
768static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
769 enum vmw_view_type view_type,
770 enum vmw_ctx_binding_type binding_type,
771 uint32 shader_slot,
772 uint32 view_ids[], u32 num_views,
773 u32 first_slot)
774{
775 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
776 struct vmw_cmdbuf_res_manager *man;
777 u32 i;
778 int ret;
779
780 if (!ctx_node) {
781 DRM_ERROR("DX Context not set.\n");
782 return -EINVAL;
783 }
784
785 man = sw_context->man;
786 for (i = 0; i < num_views; ++i) {
787 struct vmw_ctx_bindinfo_view binding;
788 struct vmw_resource *view = NULL;
789
790 if (view_ids[i] != SVGA3D_INVALID_ID) {
791 view = vmw_view_lookup(man, view_type, view_ids[i]);
792 if (IS_ERR(view)) {
793 DRM_ERROR("View not found.\n");
794 return PTR_ERR(view);
795 }
796
797 ret = vmw_view_res_val_add(sw_context, view);
798 if (ret) {
799 DRM_ERROR("Could not add view to "
800 "validation list.\n");
801 vmw_resource_unreference(&view);
802 return ret;
803 }
804 }
805 binding.bi.ctx = ctx_node->res;
806 binding.bi.res = view;
807 binding.bi.bt = binding_type;
808 binding.shader_slot = shader_slot;
809 binding.slot = first_slot + i;
810 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
811 shader_slot, binding.slot);
812 if (view)
813 vmw_resource_unreference(&view);
814 }
815
816 return 0;
817}
818
c0951b79
TH
819/**
820 * vmw_cmd_cid_check - Check a command header for valid context information.
821 *
822 * @dev_priv: Pointer to a device private structure.
823 * @sw_context: Pointer to the software context.
824 * @header: A command header with an embedded user-space context handle.
825 *
826 * Convenience function: Call vmw_cmd_res_check with the user-space context
827 * handle embedded in @header.
828 */
829static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
830 struct vmw_sw_context *sw_context,
831 SVGA3dCmdHeader *header)
832{
833 struct vmw_cid_cmd {
834 SVGA3dCmdHeader header;
8e67bbbc 835 uint32_t cid;
c0951b79
TH
836 } *cmd;
837
838 cmd = container_of(header, struct vmw_cid_cmd, header);
839 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
840 user_context_converter, &cmd->cid, NULL);
841}
fb1d9738
JB
842
843static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
844 struct vmw_sw_context *sw_context,
845 SVGA3dCmdHeader *header)
846{
847 struct vmw_sid_cmd {
848 SVGA3dCmdHeader header;
849 SVGA3dCmdSetRenderTarget body;
850 } *cmd;
b5c3b1a6 851 struct vmw_resource_val_node *ctx_node;
173fb7d4 852 struct vmw_resource_val_node *res_node;
fb1d9738
JB
853 int ret;
854
b5c3b1a6
TH
855 cmd = container_of(header, struct vmw_sid_cmd, header);
856
d80efd5c
TH
857 if (cmd->body.type >= SVGA3D_RT_MAX) {
858 DRM_ERROR("Illegal render target type %u.\n",
859 (unsigned) cmd->body.type);
860 return -EINVAL;
861 }
862
b5c3b1a6
TH
863 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
864 user_context_converter, &cmd->body.cid,
865 &ctx_node);
fb1d9738
JB
866 if (unlikely(ret != 0))
867 return ret;
868
c0951b79
TH
869 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
870 user_surface_converter,
173fb7d4 871 &cmd->body.target.sid, &res_node);
b5c3b1a6
TH
872 if (unlikely(ret != 0))
873 return ret;
874
875 if (dev_priv->has_mob) {
d80efd5c 876 struct vmw_ctx_bindinfo_view binding;
b5c3b1a6 877
d80efd5c
TH
878 binding.bi.ctx = ctx_node->res;
879 binding.bi.res = res_node ? res_node->res : NULL;
880 binding.bi.bt = vmw_ctx_binding_rt;
881 binding.slot = cmd->body.type;
882 vmw_binding_add(ctx_node->staged_bindings,
883 &binding.bi, 0, binding.slot);
b5c3b1a6
TH
884 }
885
886 return 0;
fb1d9738
JB
887}
888
889static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
890 struct vmw_sw_context *sw_context,
891 SVGA3dCmdHeader *header)
892{
893 struct vmw_sid_cmd {
894 SVGA3dCmdHeader header;
895 SVGA3dCmdSurfaceCopy body;
896 } *cmd;
897 int ret;
898
899 cmd = container_of(header, struct vmw_sid_cmd, header);
c9146cd9 900
6bf6bf03
TH
901 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
902 user_surface_converter,
903 &cmd->body.src.sid, NULL);
904 if (ret)
905 return ret;
c9146cd9 906
c0951b79
TH
907 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
908 user_surface_converter,
909 &cmd->body.dest.sid, NULL);
fb1d9738
JB
910}
911
912static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
913 struct vmw_sw_context *sw_context,
914 SVGA3dCmdHeader *header)
915{
916 struct vmw_sid_cmd {
917 SVGA3dCmdHeader header;
918 SVGA3dCmdSurfaceStretchBlt body;
919 } *cmd;
920 int ret;
921
922 cmd = container_of(header, struct vmw_sid_cmd, header);
c0951b79
TH
923 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
924 user_surface_converter,
925 &cmd->body.src.sid, NULL);
fb1d9738
JB
926 if (unlikely(ret != 0))
927 return ret;
c0951b79
TH
928 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
929 user_surface_converter,
930 &cmd->body.dest.sid, NULL);
fb1d9738
JB
931}
932
933static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
934 struct vmw_sw_context *sw_context,
935 SVGA3dCmdHeader *header)
936{
937 struct vmw_sid_cmd {
938 SVGA3dCmdHeader header;
939 SVGA3dCmdBlitSurfaceToScreen body;
940 } *cmd;
941
942 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6 943
c0951b79
TH
944 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
945 user_surface_converter,
946 &cmd->body.srcImage.sid, NULL);
fb1d9738
JB
947}
948
949static int vmw_cmd_present_check(struct vmw_private *dev_priv,
950 struct vmw_sw_context *sw_context,
951 SVGA3dCmdHeader *header)
952{
953 struct vmw_sid_cmd {
954 SVGA3dCmdHeader header;
955 SVGA3dCmdPresent body;
956 } *cmd;
957
5bb39e81 958
fb1d9738 959 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6 960
c0951b79
TH
961 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
962 user_surface_converter, &cmd->body.sid,
963 NULL);
fb1d9738
JB
964}
965
e2fa3a76
TH
966/**
967 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
968 *
969 * @dev_priv: The device private structure.
e2fa3a76
TH
970 * @new_query_bo: The new buffer holding query results.
971 * @sw_context: The software context used for this command submission.
972 *
973 * This function checks whether @new_query_bo is suitable for holding
974 * query results, and if another buffer currently is pinned for query
975 * results. If so, the function prepares the state of @sw_context for
976 * switching pinned buffers after successful submission of the current
c0951b79 977 * command batch.
e2fa3a76
TH
978 */
979static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
459d0fa7 980 struct vmw_dma_buffer *new_query_bo,
e2fa3a76
TH
981 struct vmw_sw_context *sw_context)
982{
c0951b79
TH
983 struct vmw_res_cache_entry *ctx_entry =
984 &sw_context->res_cache[vmw_res_context];
e2fa3a76 985 int ret;
c0951b79
TH
986
987 BUG_ON(!ctx_entry->valid);
988 sw_context->last_query_ctx = ctx_entry->res;
e2fa3a76
TH
989
990 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
991
459d0fa7 992 if (unlikely(new_query_bo->base.num_pages > 4)) {
e2fa3a76
TH
993 DRM_ERROR("Query buffer too large.\n");
994 return -EINVAL;
995 }
996
997 if (unlikely(sw_context->cur_query_bo != NULL)) {
c0951b79 998 sw_context->needs_post_query_barrier = true;
e2fa3a76
TH
999 ret = vmw_bo_to_validate_list(sw_context,
1000 sw_context->cur_query_bo,
96c5f0df 1001 dev_priv->has_mob, NULL);
e2fa3a76
TH
1002 if (unlikely(ret != 0))
1003 return ret;
1004 }
1005 sw_context->cur_query_bo = new_query_bo;
1006
1007 ret = vmw_bo_to_validate_list(sw_context,
1008 dev_priv->dummy_query_bo,
96c5f0df 1009 dev_priv->has_mob, NULL);
e2fa3a76
TH
1010 if (unlikely(ret != 0))
1011 return ret;
1012
1013 }
1014
e2fa3a76
TH
1015 return 0;
1016}
1017
1018
1019/**
1020 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1021 *
1022 * @dev_priv: The device private structure.
1023 * @sw_context: The software context used for this command submission batch.
1024 *
1025 * This function will check if we're switching query buffers, and will then,
e2fa3a76
TH
1026 * issue a dummy occlusion query wait used as a query barrier. When the fence
1027 * object following that query wait has signaled, we are sure that all
c0951b79 1028 * preceding queries have finished, and the old query buffer can be unpinned.
e2fa3a76
TH
1029 * However, since both the new query buffer and the old one are fenced with
1030 * that fence, we can do an asynchronus unpin now, and be sure that the
1031 * old query buffer won't be moved until the fence has signaled.
1032 *
1033 * As mentioned above, both the new - and old query buffers need to be fenced
1034 * using a sequence emitted *after* calling this function.
1035 */
1036static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1037 struct vmw_sw_context *sw_context)
1038{
e2fa3a76
TH
1039 /*
1040 * The validate list should still hold references to all
1041 * contexts here.
1042 */
1043
c0951b79
TH
1044 if (sw_context->needs_post_query_barrier) {
1045 struct vmw_res_cache_entry *ctx_entry =
1046 &sw_context->res_cache[vmw_res_context];
1047 struct vmw_resource *ctx;
1048 int ret;
e2fa3a76 1049
c0951b79
TH
1050 BUG_ON(!ctx_entry->valid);
1051 ctx = ctx_entry->res;
e2fa3a76
TH
1052
1053 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1054
1055 if (unlikely(ret != 0))
1056 DRM_ERROR("Out of fifo space for dummy query.\n");
1057 }
1058
1059 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1060 if (dev_priv->pinned_bo) {
459d0fa7
TH
1061 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1062 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
e2fa3a76
TH
1063 }
1064
c0951b79 1065 if (!sw_context->needs_post_query_barrier) {
459d0fa7 1066 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
e2fa3a76 1067
c0951b79
TH
1068 /*
1069 * We pin also the dummy_query_bo buffer so that we
1070 * don't need to validate it when emitting
1071 * dummy queries in context destroy paths.
1072 */
e2fa3a76 1073
459d0fa7
TH
1074 if (!dev_priv->dummy_query_bo_pinned) {
1075 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1076 true);
1077 dev_priv->dummy_query_bo_pinned = true;
1078 }
e2fa3a76 1079
c0951b79
TH
1080 BUG_ON(sw_context->last_query_ctx == NULL);
1081 dev_priv->query_cid = sw_context->last_query_ctx->id;
1082 dev_priv->query_cid_valid = true;
1083 dev_priv->pinned_bo =
459d0fa7 1084 vmw_dmabuf_reference(sw_context->cur_query_bo);
c0951b79 1085 }
e2fa3a76
TH
1086 }
1087}
1088
ddcda24e
TH
1089/**
1090 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1091 * handle to a MOB id.
1092 *
1093 * @dev_priv: Pointer to a device private structure.
1094 * @sw_context: The software context used for this command batch validation.
1095 * @id: Pointer to the user-space handle to be translated.
1096 * @vmw_bo_p: Points to a location that, on successful return will carry
1097 * a reference-counted pointer to the DMA buffer identified by the
1098 * user-space handle in @id.
1099 *
1100 * This function saves information needed to translate a user-space buffer
1101 * handle to a MOB id. The translation does not take place immediately, but
1102 * during a call to vmw_apply_relocations(). This function builds a relocation
1103 * list and a list of buffers to validate. The former needs to be freed using
1104 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1105 * needs to be freed using vmw_clear_validations.
1106 */
1107static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1108 struct vmw_sw_context *sw_context,
1109 SVGAMobId *id,
1110 struct vmw_dma_buffer **vmw_bo_p)
1111{
1112 struct vmw_dma_buffer *vmw_bo = NULL;
ddcda24e
TH
1113 uint32_t handle = *id;
1114 struct vmw_relocation *reloc;
1115 int ret;
1116
d5bde956 1117 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
ddcda24e
TH
1118 if (unlikely(ret != 0)) {
1119 DRM_ERROR("Could not find or use MOB buffer.\n");
da5efffc
CIK
1120 ret = -EINVAL;
1121 goto out_no_reloc;
ddcda24e 1122 }
ddcda24e
TH
1123
1124 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1125 DRM_ERROR("Max number relocations per submission"
1126 " exceeded\n");
1127 ret = -EINVAL;
1128 goto out_no_reloc;
1129 }
1130
1131 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1132 reloc->mob_loc = id;
1133 reloc->location = NULL;
1134
459d0fa7 1135 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
ddcda24e
TH
1136 if (unlikely(ret != 0))
1137 goto out_no_reloc;
1138
1139 *vmw_bo_p = vmw_bo;
1140 return 0;
1141
1142out_no_reloc:
1143 vmw_dmabuf_unreference(&vmw_bo);
da5efffc 1144 *vmw_bo_p = NULL;
ddcda24e
TH
1145 return ret;
1146}
1147
e2fa3a76 1148/**
c0951b79
TH
1149 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1150 * handle to a valid SVGAGuestPtr
e2fa3a76 1151 *
c0951b79
TH
1152 * @dev_priv: Pointer to a device private structure.
1153 * @sw_context: The software context used for this command batch validation.
1154 * @ptr: Pointer to the user-space handle to be translated.
1155 * @vmw_bo_p: Points to a location that, on successful return will carry
1156 * a reference-counted pointer to the DMA buffer identified by the
1157 * user-space handle in @id.
e2fa3a76 1158 *
c0951b79
TH
1159 * This function saves information needed to translate a user-space buffer
1160 * handle to a valid SVGAGuestPtr. The translation does not take place
1161 * immediately, but during a call to vmw_apply_relocations().
1162 * This function builds a relocation list and a list of buffers to validate.
1163 * The former needs to be freed using either vmw_apply_relocations() or
1164 * vmw_free_relocations(). The latter needs to be freed using
1165 * vmw_clear_validations.
e2fa3a76 1166 */
4e4ddd47
TH
1167static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1168 struct vmw_sw_context *sw_context,
1169 SVGAGuestPtr *ptr,
1170 struct vmw_dma_buffer **vmw_bo_p)
fb1d9738 1171{
fb1d9738 1172 struct vmw_dma_buffer *vmw_bo = NULL;
4e4ddd47 1173 uint32_t handle = ptr->gmrId;
fb1d9738 1174 struct vmw_relocation *reloc;
4e4ddd47 1175 int ret;
fb1d9738 1176
d5bde956 1177 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
fb1d9738
JB
1178 if (unlikely(ret != 0)) {
1179 DRM_ERROR("Could not find or use GMR region.\n");
da5efffc
CIK
1180 ret = -EINVAL;
1181 goto out_no_reloc;
fb1d9738 1182 }
fb1d9738
JB
1183
1184 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
4e4ddd47 1185 DRM_ERROR("Max number relocations per submission"
fb1d9738
JB
1186 " exceeded\n");
1187 ret = -EINVAL;
1188 goto out_no_reloc;
1189 }
1190
1191 reloc = &sw_context->relocs[sw_context->cur_reloc++];
4e4ddd47 1192 reloc->location = ptr;
fb1d9738 1193
459d0fa7 1194 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
e2fa3a76 1195 if (unlikely(ret != 0))
fb1d9738 1196 goto out_no_reloc;
fb1d9738 1197
4e4ddd47
TH
1198 *vmw_bo_p = vmw_bo;
1199 return 0;
1200
1201out_no_reloc:
1202 vmw_dmabuf_unreference(&vmw_bo);
da5efffc 1203 *vmw_bo_p = NULL;
4e4ddd47
TH
1204 return ret;
1205}
1206
ddcda24e
TH
1207/**
1208 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1209 *
1210 * @dev_priv: Pointer to a device private struct.
1211 * @sw_context: The software context used for this command submission.
1212 * @header: Pointer to the command header in the command stream.
1213 */
1214static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1215 struct vmw_sw_context *sw_context,
1216 SVGA3dCmdHeader *header)
1217{
1218 struct vmw_begin_gb_query_cmd {
1219 SVGA3dCmdHeader header;
1220 SVGA3dCmdBeginGBQuery q;
1221 } *cmd;
1222
1223 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1224 header);
1225
1226 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1227 user_context_converter, &cmd->q.cid,
1228 NULL);
1229}
1230
c0951b79
TH
1231/**
1232 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1233 *
1234 * @dev_priv: Pointer to a device private struct.
1235 * @sw_context: The software context used for this command submission.
1236 * @header: Pointer to the command header in the command stream.
1237 */
1238static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1239 struct vmw_sw_context *sw_context,
1240 SVGA3dCmdHeader *header)
1241{
1242 struct vmw_begin_query_cmd {
1243 SVGA3dCmdHeader header;
1244 SVGA3dCmdBeginQuery q;
1245 } *cmd;
1246
1247 cmd = container_of(header, struct vmw_begin_query_cmd,
1248 header);
1249
ddcda24e
TH
1250 if (unlikely(dev_priv->has_mob)) {
1251 struct {
1252 SVGA3dCmdHeader header;
1253 SVGA3dCmdBeginGBQuery q;
1254 } gb_cmd;
1255
1256 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1257
1258 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1259 gb_cmd.header.size = cmd->header.size;
1260 gb_cmd.q.cid = cmd->q.cid;
1261 gb_cmd.q.type = cmd->q.type;
1262
1263 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1264 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1265 }
1266
c0951b79
TH
1267 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1268 user_context_converter, &cmd->q.cid,
1269 NULL);
1270}
1271
ddcda24e
TH
1272/**
1273 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1274 *
1275 * @dev_priv: Pointer to a device private struct.
1276 * @sw_context: The software context used for this command submission.
1277 * @header: Pointer to the command header in the command stream.
1278 */
1279static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1280 struct vmw_sw_context *sw_context,
1281 SVGA3dCmdHeader *header)
1282{
1283 struct vmw_dma_buffer *vmw_bo;
1284 struct vmw_query_cmd {
1285 SVGA3dCmdHeader header;
1286 SVGA3dCmdEndGBQuery q;
1287 } *cmd;
1288 int ret;
1289
1290 cmd = container_of(header, struct vmw_query_cmd, header);
1291 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1292 if (unlikely(ret != 0))
1293 return ret;
1294
1295 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1296 &cmd->q.mobid,
1297 &vmw_bo);
1298 if (unlikely(ret != 0))
1299 return ret;
1300
459d0fa7 1301 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
ddcda24e
TH
1302
1303 vmw_dmabuf_unreference(&vmw_bo);
1304 return ret;
1305}
1306
c0951b79
TH
1307/**
1308 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1309 *
1310 * @dev_priv: Pointer to a device private struct.
1311 * @sw_context: The software context used for this command submission.
1312 * @header: Pointer to the command header in the command stream.
1313 */
4e4ddd47
TH
1314static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1315 struct vmw_sw_context *sw_context,
1316 SVGA3dCmdHeader *header)
1317{
1318 struct vmw_dma_buffer *vmw_bo;
1319 struct vmw_query_cmd {
1320 SVGA3dCmdHeader header;
1321 SVGA3dCmdEndQuery q;
1322 } *cmd;
1323 int ret;
1324
1325 cmd = container_of(header, struct vmw_query_cmd, header);
ddcda24e
TH
1326 if (dev_priv->has_mob) {
1327 struct {
1328 SVGA3dCmdHeader header;
1329 SVGA3dCmdEndGBQuery q;
1330 } gb_cmd;
1331
1332 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1333
1334 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1335 gb_cmd.header.size = cmd->header.size;
1336 gb_cmd.q.cid = cmd->q.cid;
1337 gb_cmd.q.type = cmd->q.type;
1338 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1339 gb_cmd.q.offset = cmd->q.guestResult.offset;
1340
1341 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1342 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1343 }
1344
4e4ddd47
TH
1345 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1346 if (unlikely(ret != 0))
1347 return ret;
1348
1349 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1350 &cmd->q.guestResult,
1351 &vmw_bo);
1352 if (unlikely(ret != 0))
1353 return ret;
1354
459d0fa7 1355 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
e2fa3a76 1356
4e4ddd47 1357 vmw_dmabuf_unreference(&vmw_bo);
e2fa3a76 1358 return ret;
4e4ddd47 1359}
fb1d9738 1360
ddcda24e
TH
1361/**
1362 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1363 *
1364 * @dev_priv: Pointer to a device private struct.
1365 * @sw_context: The software context used for this command submission.
1366 * @header: Pointer to the command header in the command stream.
1367 */
1368static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1369 struct vmw_sw_context *sw_context,
1370 SVGA3dCmdHeader *header)
1371{
1372 struct vmw_dma_buffer *vmw_bo;
1373 struct vmw_query_cmd {
1374 SVGA3dCmdHeader header;
1375 SVGA3dCmdWaitForGBQuery q;
1376 } *cmd;
1377 int ret;
1378
1379 cmd = container_of(header, struct vmw_query_cmd, header);
1380 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1381 if (unlikely(ret != 0))
1382 return ret;
1383
1384 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1385 &cmd->q.mobid,
1386 &vmw_bo);
1387 if (unlikely(ret != 0))
1388 return ret;
1389
1390 vmw_dmabuf_unreference(&vmw_bo);
1391 return 0;
1392}
1393
1394/**
c0951b79
TH
1395 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1396 *
1397 * @dev_priv: Pointer to a device private struct.
1398 * @sw_context: The software context used for this command submission.
1399 * @header: Pointer to the command header in the command stream.
1400 */
4e4ddd47
TH
1401static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1402 struct vmw_sw_context *sw_context,
1403 SVGA3dCmdHeader *header)
1404{
1405 struct vmw_dma_buffer *vmw_bo;
1406 struct vmw_query_cmd {
1407 SVGA3dCmdHeader header;
1408 SVGA3dCmdWaitForQuery q;
1409 } *cmd;
1410 int ret;
1411
1412 cmd = container_of(header, struct vmw_query_cmd, header);
ddcda24e
TH
1413 if (dev_priv->has_mob) {
1414 struct {
1415 SVGA3dCmdHeader header;
1416 SVGA3dCmdWaitForGBQuery q;
1417 } gb_cmd;
1418
1419 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1420
1421 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1422 gb_cmd.header.size = cmd->header.size;
1423 gb_cmd.q.cid = cmd->q.cid;
1424 gb_cmd.q.type = cmd->q.type;
1425 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1426 gb_cmd.q.offset = cmd->q.guestResult.offset;
1427
1428 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1429 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1430 }
1431
4e4ddd47
TH
1432 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1433 if (unlikely(ret != 0))
1434 return ret;
1435
1436 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1437 &cmd->q.guestResult,
1438 &vmw_bo);
1439 if (unlikely(ret != 0))
1440 return ret;
1441
1442 vmw_dmabuf_unreference(&vmw_bo);
1443 return 0;
1444}
1445
4e4ddd47
TH
1446static int vmw_cmd_dma(struct vmw_private *dev_priv,
1447 struct vmw_sw_context *sw_context,
1448 SVGA3dCmdHeader *header)
1449{
1450 struct vmw_dma_buffer *vmw_bo = NULL;
4e4ddd47
TH
1451 struct vmw_surface *srf = NULL;
1452 struct vmw_dma_cmd {
1453 SVGA3dCmdHeader header;
1454 SVGA3dCmdSurfaceDMA dma;
1455 } *cmd;
1456 int ret;
cbd75e97
TH
1457 SVGA3dCmdSurfaceDMASuffix *suffix;
1458 uint32_t bo_size;
4e4ddd47
TH
1459
1460 cmd = container_of(header, struct vmw_dma_cmd, header);
cbd75e97
TH
1461 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1462 header->size - sizeof(*suffix));
1463
1464 /* Make sure device and verifier stays in sync. */
1465 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1466 DRM_ERROR("Invalid DMA suffix size.\n");
1467 return -EINVAL;
1468 }
1469
4e4ddd47
TH
1470 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1471 &cmd->dma.guest.ptr,
1472 &vmw_bo);
1473 if (unlikely(ret != 0))
1474 return ret;
1475
cbd75e97
TH
1476 /* Make sure DMA doesn't cross BO boundaries. */
1477 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1478 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1479 DRM_ERROR("Invalid DMA offset.\n");
1480 return -EINVAL;
1481 }
1482
1483 bo_size -= cmd->dma.guest.ptr.offset;
1484 if (unlikely(suffix->maximumOffset > bo_size))
1485 suffix->maximumOffset = bo_size;
1486
c0951b79
TH
1487 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1488 user_surface_converter, &cmd->dma.host.sid,
1489 NULL);
5bb39e81 1490 if (unlikely(ret != 0)) {
c0951b79
TH
1491 if (unlikely(ret != -ERESTARTSYS))
1492 DRM_ERROR("could not find surface for DMA.\n");
1493 goto out_no_surface;
5bb39e81
TH
1494 }
1495
c0951b79 1496 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
f18c8840 1497
d5bde956
TH
1498 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1499 header);
fb1d9738 1500
c0951b79 1501out_no_surface:
fb1d9738
JB
1502 vmw_dmabuf_unreference(&vmw_bo);
1503 return ret;
1504}
1505
7a73ba74
TH
1506static int vmw_cmd_draw(struct vmw_private *dev_priv,
1507 struct vmw_sw_context *sw_context,
1508 SVGA3dCmdHeader *header)
1509{
1510 struct vmw_draw_cmd {
1511 SVGA3dCmdHeader header;
1512 SVGA3dCmdDrawPrimitives body;
1513 } *cmd;
1514 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1515 (unsigned long)header + sizeof(*cmd));
1516 SVGA3dPrimitiveRange *range;
1517 uint32_t i;
1518 uint32_t maxnum;
1519 int ret;
1520
1521 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1522 if (unlikely(ret != 0))
1523 return ret;
1524
1525 cmd = container_of(header, struct vmw_draw_cmd, header);
1526 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1527
1528 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1529 DRM_ERROR("Illegal number of vertex declarations.\n");
1530 return -EINVAL;
1531 }
1532
1533 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
c0951b79
TH
1534 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1535 user_surface_converter,
1536 &decl->array.surfaceId, NULL);
7a73ba74
TH
1537 if (unlikely(ret != 0))
1538 return ret;
1539 }
1540
1541 maxnum = (header->size - sizeof(cmd->body) -
1542 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1543 if (unlikely(cmd->body.numRanges > maxnum)) {
1544 DRM_ERROR("Illegal number of index ranges.\n");
1545 return -EINVAL;
1546 }
1547
1548 range = (SVGA3dPrimitiveRange *) decl;
1549 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
c0951b79
TH
1550 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1551 user_surface_converter,
1552 &range->indexArray.surfaceId, NULL);
7a73ba74
TH
1553 if (unlikely(ret != 0))
1554 return ret;
1555 }
1556 return 0;
1557}
1558
1559
1560static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1561 struct vmw_sw_context *sw_context,
1562 SVGA3dCmdHeader *header)
1563{
1564 struct vmw_tex_state_cmd {
1565 SVGA3dCmdHeader header;
1566 SVGA3dCmdSetTextureState state;
b5c3b1a6 1567 } *cmd;
7a73ba74
TH
1568
1569 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1570 ((unsigned long) header + header->size + sizeof(header));
1571 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1572 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
b5c3b1a6 1573 struct vmw_resource_val_node *ctx_node;
173fb7d4 1574 struct vmw_resource_val_node *res_node;
7a73ba74
TH
1575 int ret;
1576
b5c3b1a6
TH
1577 cmd = container_of(header, struct vmw_tex_state_cmd,
1578 header);
1579
1580 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1581 user_context_converter, &cmd->state.cid,
1582 &ctx_node);
7a73ba74
TH
1583 if (unlikely(ret != 0))
1584 return ret;
1585
1586 for (; cur_state < last_state; ++cur_state) {
1587 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1588 continue;
1589
d80efd5c
TH
1590 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1591 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1592 (unsigned) cur_state->stage);
1593 return -EINVAL;
1594 }
1595
c0951b79
TH
1596 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1597 user_surface_converter,
173fb7d4 1598 &cur_state->value, &res_node);
7a73ba74
TH
1599 if (unlikely(ret != 0))
1600 return ret;
b5c3b1a6
TH
1601
1602 if (dev_priv->has_mob) {
d80efd5c
TH
1603 struct vmw_ctx_bindinfo_tex binding;
1604
1605 binding.bi.ctx = ctx_node->res;
1606 binding.bi.res = res_node ? res_node->res : NULL;
1607 binding.bi.bt = vmw_ctx_binding_tex;
1608 binding.texture_stage = cur_state->stage;
1609 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1610 0, binding.texture_stage);
b5c3b1a6 1611 }
7a73ba74
TH
1612 }
1613
1614 return 0;
1615}
1616
4084fb89
JB
1617static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1618 struct vmw_sw_context *sw_context,
1619 void *buf)
1620{
1621 struct vmw_dma_buffer *vmw_bo;
1622 int ret;
1623
1624 struct {
1625 uint32_t header;
1626 SVGAFifoCmdDefineGMRFB body;
1627 } *cmd = buf;
1628
1629 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1630 &cmd->body.ptr,
1631 &vmw_bo);
1632 if (unlikely(ret != 0))
1633 return ret;
1634
1635 vmw_dmabuf_unreference(&vmw_bo);
1636
1637 return ret;
1638}
1639
d80efd5c
TH
1640
1641/**
1642 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1643 * switching
1644 *
1645 * @dev_priv: Pointer to a device private struct.
1646 * @sw_context: The software context being used for this batch.
1647 * @val_node: The validation node representing the resource.
1648 * @buf_id: Pointer to the user-space backup buffer handle in the command
1649 * stream.
1650 * @backup_offset: Offset of backup into MOB.
1651 *
1652 * This function prepares for registering a switch of backup buffers
1653 * in the resource metadata just prior to unreserving. It's basically a wrapper
1654 * around vmw_cmd_res_switch_backup with a different interface.
1655 */
1656static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1657 struct vmw_sw_context *sw_context,
1658 struct vmw_resource_val_node *val_node,
1659 uint32_t *buf_id,
1660 unsigned long backup_offset)
1661{
1662 struct vmw_dma_buffer *dma_buf;
1663 int ret;
1664
1665 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1666 if (ret)
1667 return ret;
1668
1669 val_node->switching_backup = true;
1670 if (val_node->first_usage)
1671 val_node->no_buffer_needed = true;
1672
1673 vmw_dmabuf_unreference(&val_node->new_backup);
1674 val_node->new_backup = dma_buf;
1675 val_node->new_backup_offset = backup_offset;
1676
1677 return 0;
1678}
1679
1680
a97e2192
TH
1681/**
1682 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1683 *
1684 * @dev_priv: Pointer to a device private struct.
1685 * @sw_context: The software context being used for this batch.
1686 * @res_type: The resource type.
1687 * @converter: Information about user-space binding for this resource type.
1688 * @res_id: Pointer to the user-space resource handle in the command stream.
1689 * @buf_id: Pointer to the user-space backup buffer handle in the command
1690 * stream.
1691 * @backup_offset: Offset of backup into MOB.
1692 *
1693 * This function prepares for registering a switch of backup buffers
d80efd5c
TH
1694 * in the resource metadata just prior to unreserving. It's basically a wrapper
1695 * around vmw_cmd_res_switch_backup with a different interface.
a97e2192
TH
1696 */
1697static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1698 struct vmw_sw_context *sw_context,
1699 enum vmw_res_type res_type,
1700 const struct vmw_user_resource_conv
1701 *converter,
1702 uint32_t *res_id,
1703 uint32_t *buf_id,
1704 unsigned long backup_offset)
1705{
a97e2192 1706 struct vmw_resource_val_node *val_node;
d80efd5c 1707 int ret;
a97e2192
TH
1708
1709 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1710 converter, res_id, &val_node);
d80efd5c 1711 if (ret)
a97e2192
TH
1712 return ret;
1713
d80efd5c
TH
1714 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1715 buf_id, backup_offset);
a97e2192
TH
1716}
1717
1718/**
1719 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1720 * command
1721 *
1722 * @dev_priv: Pointer to a device private struct.
1723 * @sw_context: The software context being used for this batch.
1724 * @header: Pointer to the command header in the command stream.
1725 */
1726static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1727 struct vmw_sw_context *sw_context,
1728 SVGA3dCmdHeader *header)
1729{
1730 struct vmw_bind_gb_surface_cmd {
1731 SVGA3dCmdHeader header;
1732 SVGA3dCmdBindGBSurface body;
1733 } *cmd;
1734
1735 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1736
1737 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1738 user_surface_converter,
1739 &cmd->body.sid, &cmd->body.mobid,
1740 0);
1741}
1742
1743/**
1744 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1745 * command
1746 *
1747 * @dev_priv: Pointer to a device private struct.
1748 * @sw_context: The software context being used for this batch.
1749 * @header: Pointer to the command header in the command stream.
1750 */
1751static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1752 struct vmw_sw_context *sw_context,
1753 SVGA3dCmdHeader *header)
1754{
1755 struct vmw_gb_surface_cmd {
1756 SVGA3dCmdHeader header;
1757 SVGA3dCmdUpdateGBImage body;
1758 } *cmd;
1759
1760 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1761
1762 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1763 user_surface_converter,
1764 &cmd->body.image.sid, NULL);
1765}
1766
1767/**
1768 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1769 * command
1770 *
1771 * @dev_priv: Pointer to a device private struct.
1772 * @sw_context: The software context being used for this batch.
1773 * @header: Pointer to the command header in the command stream.
1774 */
1775static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1776 struct vmw_sw_context *sw_context,
1777 SVGA3dCmdHeader *header)
1778{
1779 struct vmw_gb_surface_cmd {
1780 SVGA3dCmdHeader header;
1781 SVGA3dCmdUpdateGBSurface body;
1782 } *cmd;
1783
1784 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1785
1786 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1787 user_surface_converter,
1788 &cmd->body.sid, NULL);
1789}
1790
1791/**
1792 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1793 * command
1794 *
1795 * @dev_priv: Pointer to a device private struct.
1796 * @sw_context: The software context being used for this batch.
1797 * @header: Pointer to the command header in the command stream.
1798 */
1799static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1800 struct vmw_sw_context *sw_context,
1801 SVGA3dCmdHeader *header)
1802{
1803 struct vmw_gb_surface_cmd {
1804 SVGA3dCmdHeader header;
1805 SVGA3dCmdReadbackGBImage body;
1806 } *cmd;
1807
1808 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1809
1810 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1811 user_surface_converter,
1812 &cmd->body.image.sid, NULL);
1813}
1814
1815/**
1816 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1817 * command
1818 *
1819 * @dev_priv: Pointer to a device private struct.
1820 * @sw_context: The software context being used for this batch.
1821 * @header: Pointer to the command header in the command stream.
1822 */
1823static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1824 struct vmw_sw_context *sw_context,
1825 SVGA3dCmdHeader *header)
1826{
1827 struct vmw_gb_surface_cmd {
1828 SVGA3dCmdHeader header;
1829 SVGA3dCmdReadbackGBSurface body;
1830 } *cmd;
1831
1832 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1833
1834 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1835 user_surface_converter,
1836 &cmd->body.sid, NULL);
1837}
1838
1839/**
1840 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1841 * command
1842 *
1843 * @dev_priv: Pointer to a device private struct.
1844 * @sw_context: The software context being used for this batch.
1845 * @header: Pointer to the command header in the command stream.
1846 */
1847static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1848 struct vmw_sw_context *sw_context,
1849 SVGA3dCmdHeader *header)
1850{
1851 struct vmw_gb_surface_cmd {
1852 SVGA3dCmdHeader header;
1853 SVGA3dCmdInvalidateGBImage body;
1854 } *cmd;
1855
1856 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1857
1858 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1859 user_surface_converter,
1860 &cmd->body.image.sid, NULL);
1861}
1862
1863/**
1864 * vmw_cmd_invalidate_gb_surface - Validate an
1865 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1866 *
1867 * @dev_priv: Pointer to a device private struct.
1868 * @sw_context: The software context being used for this batch.
1869 * @header: Pointer to the command header in the command stream.
1870 */
1871static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1872 struct vmw_sw_context *sw_context,
1873 SVGA3dCmdHeader *header)
1874{
1875 struct vmw_gb_surface_cmd {
1876 SVGA3dCmdHeader header;
1877 SVGA3dCmdInvalidateGBSurface body;
1878 } *cmd;
1879
1880 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1881
1882 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1883 user_surface_converter,
1884 &cmd->body.sid, NULL);
1885}
1886
d5bde956
TH
1887
1888/**
1889 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1890 * command
1891 *
1892 * @dev_priv: Pointer to a device private struct.
1893 * @sw_context: The software context being used for this batch.
1894 * @header: Pointer to the command header in the command stream.
1895 */
1896static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1897 struct vmw_sw_context *sw_context,
1898 SVGA3dCmdHeader *header)
1899{
1900 struct vmw_shader_define_cmd {
1901 SVGA3dCmdHeader header;
1902 SVGA3dCmdDefineShader body;
1903 } *cmd;
1904 int ret;
1905 size_t size;
18e4a466 1906 struct vmw_resource_val_node *val;
d5bde956
TH
1907
1908 cmd = container_of(header, struct vmw_shader_define_cmd,
1909 header);
1910
1911 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1912 user_context_converter, &cmd->body.cid,
18e4a466 1913 &val);
d5bde956
TH
1914 if (unlikely(ret != 0))
1915 return ret;
1916
1917 if (unlikely(!dev_priv->has_mob))
1918 return 0;
1919
1920 size = cmd->header.size - sizeof(cmd->body);
18e4a466
TH
1921 ret = vmw_compat_shader_add(dev_priv,
1922 vmw_context_res_man(val->res),
d5bde956
TH
1923 cmd->body.shid, cmd + 1,
1924 cmd->body.type, size,
18e4a466 1925 &sw_context->staged_cmd_res);
d5bde956
TH
1926 if (unlikely(ret != 0))
1927 return ret;
1928
1929 return vmw_resource_relocation_add(&sw_context->res_relocations,
1930 NULL, &cmd->header.id -
1931 sw_context->buf_start);
1932
1933 return 0;
1934}
1935
1936/**
1937 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1938 * command
1939 *
1940 * @dev_priv: Pointer to a device private struct.
1941 * @sw_context: The software context being used for this batch.
1942 * @header: Pointer to the command header in the command stream.
1943 */
1944static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1945 struct vmw_sw_context *sw_context,
1946 SVGA3dCmdHeader *header)
1947{
1948 struct vmw_shader_destroy_cmd {
1949 SVGA3dCmdHeader header;
1950 SVGA3dCmdDestroyShader body;
1951 } *cmd;
1952 int ret;
18e4a466 1953 struct vmw_resource_val_node *val;
d5bde956
TH
1954
1955 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1956 header);
1957
1958 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1959 user_context_converter, &cmd->body.cid,
18e4a466 1960 &val);
d5bde956
TH
1961 if (unlikely(ret != 0))
1962 return ret;
1963
1964 if (unlikely(!dev_priv->has_mob))
1965 return 0;
1966
d80efd5c
TH
1967 ret = vmw_shader_remove(vmw_context_res_man(val->res),
1968 cmd->body.shid,
1969 cmd->body.type,
1970 &sw_context->staged_cmd_res);
d5bde956
TH
1971 if (unlikely(ret != 0))
1972 return ret;
1973
1974 return vmw_resource_relocation_add(&sw_context->res_relocations,
1975 NULL, &cmd->header.id -
1976 sw_context->buf_start);
1977
1978 return 0;
1979}
1980
c0951b79
TH
1981/**
1982 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1983 * command
1984 *
1985 * @dev_priv: Pointer to a device private struct.
1986 * @sw_context: The software context being used for this batch.
1987 * @header: Pointer to the command header in the command stream.
1988 */
1989static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1990 struct vmw_sw_context *sw_context,
1991 SVGA3dCmdHeader *header)
1992{
1993 struct vmw_set_shader_cmd {
1994 SVGA3dCmdHeader header;
1995 SVGA3dCmdSetShader body;
1996 } *cmd;
18e4a466 1997 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
d80efd5c 1998 struct vmw_ctx_bindinfo_shader binding;
18e4a466 1999 struct vmw_resource *res = NULL;
c0951b79
TH
2000 int ret;
2001
2002 cmd = container_of(header, struct vmw_set_shader_cmd,
2003 header);
2004
d80efd5c
TH
2005 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2006 DRM_ERROR("Illegal shader type %u.\n",
2007 (unsigned) cmd->body.type);
2008 return -EINVAL;
2009 }
2010
b5c3b1a6
TH
2011 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2012 user_context_converter, &cmd->body.cid,
2013 &ctx_node);
c0951b79
TH
2014 if (unlikely(ret != 0))
2015 return ret;
2016
18e4a466
TH
2017 if (!dev_priv->has_mob)
2018 return 0;
2019
2020 if (cmd->body.shid != SVGA3D_INVALID_ID) {
d80efd5c
TH
2021 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2022 cmd->body.shid,
2023 cmd->body.type);
18e4a466
TH
2024
2025 if (!IS_ERR(res)) {
2026 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
18e4a466
TH
2027 &cmd->body.shid, res,
2028 &res_node);
2029 vmw_resource_unreference(&res);
2030 if (unlikely(ret != 0))
2031 return ret;
2032 }
2033 }
2034
2035 if (!res_node) {
2036 ret = vmw_cmd_res_check(dev_priv, sw_context,
2037 vmw_res_shader,
2038 user_shader_converter,
2039 &cmd->body.shid, &res_node);
b5c3b1a6
TH
2040 if (unlikely(ret != 0))
2041 return ret;
b5c3b1a6 2042 }
c74c162f 2043
d80efd5c
TH
2044 binding.bi.ctx = ctx_node->res;
2045 binding.bi.res = res_node ? res_node->res : NULL;
2046 binding.bi.bt = vmw_ctx_binding_shader;
2047 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2048 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2049 binding.shader_slot, 0);
2050 return 0;
c0951b79
TH
2051}
2052
0ccbbae4
TH
2053/**
2054 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2055 * command
2056 *
2057 * @dev_priv: Pointer to a device private struct.
2058 * @sw_context: The software context being used for this batch.
2059 * @header: Pointer to the command header in the command stream.
2060 */
2061static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2062 struct vmw_sw_context *sw_context,
2063 SVGA3dCmdHeader *header)
2064{
2065 struct vmw_set_shader_const_cmd {
2066 SVGA3dCmdHeader header;
2067 SVGA3dCmdSetShaderConst body;
2068 } *cmd;
2069 int ret;
2070
2071 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2072 header);
2073
2074 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2075 user_context_converter, &cmd->body.cid,
2076 NULL);
2077 if (unlikely(ret != 0))
2078 return ret;
2079
2080 if (dev_priv->has_mob)
2081 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2082
2083 return 0;
2084}
2085
c74c162f
TH
2086/**
2087 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2088 * command
2089 *
2090 * @dev_priv: Pointer to a device private struct.
2091 * @sw_context: The software context being used for this batch.
2092 * @header: Pointer to the command header in the command stream.
2093 */
2094static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2095 struct vmw_sw_context *sw_context,
2096 SVGA3dCmdHeader *header)
2097{
2098 struct vmw_bind_gb_shader_cmd {
2099 SVGA3dCmdHeader header;
2100 SVGA3dCmdBindGBShader body;
2101 } *cmd;
2102
2103 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2104 header);
2105
2106 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2107 user_shader_converter,
2108 &cmd->body.shid, &cmd->body.mobid,
2109 cmd->body.offsetInBytes);
2110}
2111
d80efd5c
TH
2112/**
2113 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2114 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2115 *
2116 * @dev_priv: Pointer to a device private struct.
2117 * @sw_context: The software context being used for this batch.
2118 * @header: Pointer to the command header in the command stream.
2119 */
2120static int
2121vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2122 struct vmw_sw_context *sw_context,
2123 SVGA3dCmdHeader *header)
4084fb89 2124{
d80efd5c
TH
2125 struct {
2126 SVGA3dCmdHeader header;
2127 SVGA3dCmdDXSetSingleConstantBuffer body;
2128 } *cmd;
2129 struct vmw_resource_val_node *res_node = NULL;
2130 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2131 struct vmw_ctx_bindinfo_cb binding;
2132 int ret;
4084fb89 2133
d80efd5c
TH
2134 if (unlikely(ctx_node == NULL)) {
2135 DRM_ERROR("DX Context not set.\n");
4084fb89
JB
2136 return -EINVAL;
2137 }
2138
d80efd5c
TH
2139 cmd = container_of(header, typeof(*cmd), header);
2140 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2141 user_surface_converter,
2142 &cmd->body.sid, &res_node);
2143 if (unlikely(ret != 0))
2144 return ret;
4084fb89 2145
d80efd5c
TH
2146 binding.bi.ctx = ctx_node->res;
2147 binding.bi.res = res_node ? res_node->res : NULL;
2148 binding.bi.bt = vmw_ctx_binding_cb;
2149 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2150 binding.offset = cmd->body.offsetInBytes;
2151 binding.size = cmd->body.sizeInBytes;
2152 binding.slot = cmd->body.slot;
2153
2154 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2155 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2156 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2157 (unsigned) cmd->body.type,
2158 (unsigned) binding.slot);
2159 return -EINVAL;
4084fb89
JB
2160 }
2161
d80efd5c
TH
2162 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2163 binding.shader_slot, binding.slot);
4084fb89
JB
2164
2165 return 0;
2166}
fb1d9738 2167
d80efd5c
TH
2168/**
2169 * vmw_cmd_dx_set_shader_res - Validate an
2170 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2171 *
2172 * @dev_priv: Pointer to a device private struct.
2173 * @sw_context: The software context being used for this batch.
2174 * @header: Pointer to the command header in the command stream.
2175 */
2176static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2177 struct vmw_sw_context *sw_context,
2178 SVGA3dCmdHeader *header)
2179{
2180 struct {
2181 SVGA3dCmdHeader header;
2182 SVGA3dCmdDXSetShaderResources body;
2183 } *cmd = container_of(header, typeof(*cmd), header);
2184 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2185 sizeof(SVGA3dShaderResourceViewId);
2186
2187 if ((u64) cmd->body.startView + (u64) num_sr_view >
2188 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2189 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2190 DRM_ERROR("Invalid shader binding.\n");
2191 return -EINVAL;
2192 }
2193
2194 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2195 vmw_ctx_binding_sr,
2196 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2197 (void *) &cmd[1], num_sr_view,
2198 cmd->body.startView);
2199}
2200
2201/**
2202 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2203 * command
2204 *
2205 * @dev_priv: Pointer to a device private struct.
2206 * @sw_context: The software context being used for this batch.
2207 * @header: Pointer to the command header in the command stream.
2208 */
2209static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2210 struct vmw_sw_context *sw_context,
2211 SVGA3dCmdHeader *header)
2212{
2213 struct {
2214 SVGA3dCmdHeader header;
2215 SVGA3dCmdDXSetShader body;
2216 } *cmd;
2217 struct vmw_resource *res = NULL;
2218 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2219 struct vmw_ctx_bindinfo_shader binding;
2220 int ret = 0;
2221
2222 if (unlikely(ctx_node == NULL)) {
2223 DRM_ERROR("DX Context not set.\n");
2224 return -EINVAL;
2225 }
2226
2227 cmd = container_of(header, typeof(*cmd), header);
2228
2229 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2230 DRM_ERROR("Illegal shader type %u.\n",
2231 (unsigned) cmd->body.type);
2232 return -EINVAL;
2233 }
2234
2235 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2236 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2237 if (IS_ERR(res)) {
2238 DRM_ERROR("Could not find shader for binding.\n");
2239 return PTR_ERR(res);
2240 }
2241
2242 ret = vmw_resource_val_add(sw_context, res, NULL);
2243 if (ret)
2244 goto out_unref;
2245 }
2246
2247 binding.bi.ctx = ctx_node->res;
2248 binding.bi.res = res;
2249 binding.bi.bt = vmw_ctx_binding_dx_shader;
2250 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2251
2252 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2253 binding.shader_slot, 0);
2254out_unref:
2255 if (res)
2256 vmw_resource_unreference(&res);
2257
2258 return ret;
2259}
2260
2261/**
2262 * vmw_cmd_dx_set_vertex_buffers - Validates an
2263 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2264 *
2265 * @dev_priv: Pointer to a device private struct.
2266 * @sw_context: The software context being used for this batch.
2267 * @header: Pointer to the command header in the command stream.
2268 */
2269static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2270 struct vmw_sw_context *sw_context,
2271 SVGA3dCmdHeader *header)
2272{
2273 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2274 struct vmw_ctx_bindinfo_vb binding;
2275 struct vmw_resource_val_node *res_node;
2276 struct {
2277 SVGA3dCmdHeader header;
2278 SVGA3dCmdDXSetVertexBuffers body;
2279 SVGA3dVertexBuffer buf[];
2280 } *cmd;
2281 int i, ret, num;
2282
2283 if (unlikely(ctx_node == NULL)) {
2284 DRM_ERROR("DX Context not set.\n");
2285 return -EINVAL;
2286 }
2287
2288 cmd = container_of(header, typeof(*cmd), header);
2289 num = (cmd->header.size - sizeof(cmd->body)) /
2290 sizeof(SVGA3dVertexBuffer);
2291 if ((u64)num + (u64)cmd->body.startBuffer >
2292 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2293 DRM_ERROR("Invalid number of vertex buffers.\n");
2294 return -EINVAL;
2295 }
2296
2297 for (i = 0; i < num; i++) {
2298 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2299 user_surface_converter,
2300 &cmd->buf[i].sid, &res_node);
2301 if (unlikely(ret != 0))
2302 return ret;
2303
2304 binding.bi.ctx = ctx_node->res;
2305 binding.bi.bt = vmw_ctx_binding_vb;
2306 binding.bi.res = ((res_node) ? res_node->res : NULL);
2307 binding.offset = cmd->buf[i].offset;
2308 binding.stride = cmd->buf[i].stride;
2309 binding.slot = i + cmd->body.startBuffer;
2310
2311 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2312 0, binding.slot);
2313 }
2314
2315 return 0;
2316}
2317
2318/**
2319 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2320 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2321 *
2322 * @dev_priv: Pointer to a device private struct.
2323 * @sw_context: The software context being used for this batch.
2324 * @header: Pointer to the command header in the command stream.
2325 */
2326static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2327 struct vmw_sw_context *sw_context,
2328 SVGA3dCmdHeader *header)
2329{
2330 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2331 struct vmw_ctx_bindinfo_ib binding;
2332 struct vmw_resource_val_node *res_node;
2333 struct {
2334 SVGA3dCmdHeader header;
2335 SVGA3dCmdDXSetIndexBuffer body;
2336 } *cmd;
2337 int ret;
2338
2339 if (unlikely(ctx_node == NULL)) {
2340 DRM_ERROR("DX Context not set.\n");
2341 return -EINVAL;
2342 }
2343
2344 cmd = container_of(header, typeof(*cmd), header);
2345 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2346 user_surface_converter,
2347 &cmd->body.sid, &res_node);
2348 if (unlikely(ret != 0))
2349 return ret;
2350
2351 binding.bi.ctx = ctx_node->res;
2352 binding.bi.res = ((res_node) ? res_node->res : NULL);
2353 binding.bi.bt = vmw_ctx_binding_ib;
2354 binding.offset = cmd->body.offset;
2355 binding.format = cmd->body.format;
2356
2357 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2358
2359 return 0;
2360}
2361
2362/**
2363 * vmw_cmd_dx_set_rendertarget - Validate an
2364 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2365 *
2366 * @dev_priv: Pointer to a device private struct.
2367 * @sw_context: The software context being used for this batch.
2368 * @header: Pointer to the command header in the command stream.
2369 */
2370static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2371 struct vmw_sw_context *sw_context,
2372 SVGA3dCmdHeader *header)
2373{
2374 struct {
2375 SVGA3dCmdHeader header;
2376 SVGA3dCmdDXSetRenderTargets body;
2377 } *cmd = container_of(header, typeof(*cmd), header);
2378 int ret;
2379 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2380 sizeof(SVGA3dRenderTargetViewId);
2381
2382 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2383 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2384 return -EINVAL;
2385 }
2386
2387 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2388 vmw_ctx_binding_ds, 0,
2389 &cmd->body.depthStencilViewId, 1, 0);
2390 if (ret)
2391 return ret;
2392
2393 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2394 vmw_ctx_binding_dx_rt, 0,
2395 (void *)&cmd[1], num_rt_view, 0);
2396}
2397
2398/**
2399 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2400 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2401 *
2402 * @dev_priv: Pointer to a device private struct.
2403 * @sw_context: The software context being used for this batch.
2404 * @header: Pointer to the command header in the command stream.
2405 */
2406static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2407 struct vmw_sw_context *sw_context,
2408 SVGA3dCmdHeader *header)
2409{
2410 struct {
2411 SVGA3dCmdHeader header;
2412 SVGA3dCmdDXClearRenderTargetView body;
2413 } *cmd = container_of(header, typeof(*cmd), header);
2414
2415 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2416 cmd->body.renderTargetViewId);
2417}
2418
2419/**
2420 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2421 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2422 *
2423 * @dev_priv: Pointer to a device private struct.
2424 * @sw_context: The software context being used for this batch.
2425 * @header: Pointer to the command header in the command stream.
2426 */
2427static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2428 struct vmw_sw_context *sw_context,
2429 SVGA3dCmdHeader *header)
2430{
2431 struct {
2432 SVGA3dCmdHeader header;
2433 SVGA3dCmdDXClearDepthStencilView body;
2434 } *cmd = container_of(header, typeof(*cmd), header);
2435
2436 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2437 cmd->body.depthStencilViewId);
2438}
2439
2440static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2441 struct vmw_sw_context *sw_context,
2442 SVGA3dCmdHeader *header)
2443{
2444 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2445 struct vmw_resource_val_node *srf_node;
2446 struct vmw_resource *res;
2447 enum vmw_view_type view_type;
2448 int ret;
2449 /*
2450 * This is based on the fact that all affected define commands have
2451 * the same initial command body layout.
2452 */
2453 struct {
2454 SVGA3dCmdHeader header;
2455 uint32 defined_id;
2456 uint32 sid;
2457 } *cmd;
2458
2459 if (unlikely(ctx_node == NULL)) {
2460 DRM_ERROR("DX Context not set.\n");
2461 return -EINVAL;
2462 }
2463
2464 view_type = vmw_view_cmd_to_type(header->id);
2465 cmd = container_of(header, typeof(*cmd), header);
2466 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2467 user_surface_converter,
2468 &cmd->sid, &srf_node);
2469 if (unlikely(ret != 0))
2470 return ret;
2471
2472 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2473 ret = vmw_cotable_notify(res, cmd->defined_id);
2474 vmw_resource_unreference(&res);
2475 if (unlikely(ret != 0))
2476 return ret;
2477
2478 return vmw_view_add(sw_context->man,
2479 ctx_node->res,
2480 srf_node->res,
2481 view_type,
2482 cmd->defined_id,
2483 header,
2484 header->size + sizeof(*header),
2485 &sw_context->staged_cmd_res);
2486}
2487
2f633e5e
CL
2488/**
2489 * vmw_cmd_dx_set_so_targets - Validate an
2490 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2491 *
2492 * @dev_priv: Pointer to a device private struct.
2493 * @sw_context: The software context being used for this batch.
2494 * @header: Pointer to the command header in the command stream.
2495 */
2496static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2497 struct vmw_sw_context *sw_context,
2498 SVGA3dCmdHeader *header)
2499{
2500 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2501 struct vmw_ctx_bindinfo_so binding;
2502 struct vmw_resource_val_node *res_node;
2503 struct {
2504 SVGA3dCmdHeader header;
2505 SVGA3dCmdDXSetSOTargets body;
2506 SVGA3dSoTarget targets[];
2507 } *cmd;
2508 int i, ret, num;
2509
2510 if (unlikely(ctx_node == NULL)) {
2511 DRM_ERROR("DX Context not set.\n");
2512 return -EINVAL;
2513 }
2514
2515 cmd = container_of(header, typeof(*cmd), header);
2516 num = (cmd->header.size - sizeof(cmd->body)) /
2517 sizeof(SVGA3dSoTarget);
2518
2519 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2520 DRM_ERROR("Invalid DX SO binding.\n");
2521 return -EINVAL;
2522 }
2523
2524 for (i = 0; i < num; i++) {
2525 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2526 user_surface_converter,
2527 &cmd->targets[i].sid, &res_node);
2528 if (unlikely(ret != 0))
2529 return ret;
2530
2531 binding.bi.ctx = ctx_node->res;
2532 binding.bi.res = ((res_node) ? res_node->res : NULL);
2533 binding.bi.bt = vmw_ctx_binding_so,
2534 binding.offset = cmd->targets[i].offset;
2535 binding.size = cmd->targets[i].sizeInBytes;
2536 binding.slot = i;
2537
2538 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2539 0, binding.slot);
2540 }
2541
2542 return 0;
2543}
2544
d80efd5c
TH
2545static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2546 struct vmw_sw_context *sw_context,
2547 SVGA3dCmdHeader *header)
2548{
2549 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2550 struct vmw_resource *res;
2551 /*
2552 * This is based on the fact that all affected define commands have
2553 * the same initial command body layout.
2554 */
2555 struct {
2556 SVGA3dCmdHeader header;
2557 uint32 defined_id;
2558 } *cmd;
2559 enum vmw_so_type so_type;
2560 int ret;
2561
2562 if (unlikely(ctx_node == NULL)) {
2563 DRM_ERROR("DX Context not set.\n");
2564 return -EINVAL;
2565 }
2566
2567 so_type = vmw_so_cmd_to_type(header->id);
2568 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2569 cmd = container_of(header, typeof(*cmd), header);
2570 ret = vmw_cotable_notify(res, cmd->defined_id);
2571 vmw_resource_unreference(&res);
2572
2573 return ret;
2574}
2575
2576/**
2577 * vmw_cmd_dx_check_subresource - Validate an
2578 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2579 *
2580 * @dev_priv: Pointer to a device private struct.
2581 * @sw_context: The software context being used for this batch.
2582 * @header: Pointer to the command header in the command stream.
2583 */
2584static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2585 struct vmw_sw_context *sw_context,
2586 SVGA3dCmdHeader *header)
2587{
2588 struct {
2589 SVGA3dCmdHeader header;
2590 union {
2591 SVGA3dCmdDXReadbackSubResource r_body;
2592 SVGA3dCmdDXInvalidateSubResource i_body;
2593 SVGA3dCmdDXUpdateSubResource u_body;
2594 SVGA3dSurfaceId sid;
2595 };
2596 } *cmd;
2597
2598 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2599 offsetof(typeof(*cmd), sid));
2600 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2601 offsetof(typeof(*cmd), sid));
2602 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2603 offsetof(typeof(*cmd), sid));
2604
2605 cmd = container_of(header, typeof(*cmd), header);
2606
2607 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2608 user_surface_converter,
2609 &cmd->sid, NULL);
2610}
2611
2612static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2613 struct vmw_sw_context *sw_context,
2614 SVGA3dCmdHeader *header)
2615{
2616 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2617
2618 if (unlikely(ctx_node == NULL)) {
2619 DRM_ERROR("DX Context not set.\n");
2620 return -EINVAL;
2621 }
2622
2623 return 0;
2624}
2625
2626/**
2627 * vmw_cmd_dx_view_remove - validate a view remove command and
2628 * schedule the view resource for removal.
2629 *
2630 * @dev_priv: Pointer to a device private struct.
2631 * @sw_context: The software context being used for this batch.
2632 * @header: Pointer to the command header in the command stream.
2633 *
2634 * Check that the view exists, and if it was not created using this
2635 * command batch, make sure it's validated (present in the device) so that
2636 * the remove command will not confuse the device.
2637 */
2638static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2639 struct vmw_sw_context *sw_context,
2640 SVGA3dCmdHeader *header)
2641{
2642 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2643 struct {
2644 SVGA3dCmdHeader header;
2645 union vmw_view_destroy body;
2646 } *cmd = container_of(header, typeof(*cmd), header);
2647 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2648 struct vmw_resource *view;
2649 int ret;
2650
2651 if (!ctx_node) {
2652 DRM_ERROR("DX Context not set.\n");
2653 return -EINVAL;
2654 }
2655
2656 ret = vmw_view_remove(sw_context->man,
2657 cmd->body.view_id, view_type,
2658 &sw_context->staged_cmd_res,
2659 &view);
2660 if (ret || !view)
2661 return ret;
2662
2663 /*
2664 * Add view to the validate list iff it was not created using this
2665 * command batch.
2666 */
2667 return vmw_view_res_val_add(sw_context, view);
2668}
2669
2670/**
2671 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2672 * command
2673 *
2674 * @dev_priv: Pointer to a device private struct.
2675 * @sw_context: The software context being used for this batch.
2676 * @header: Pointer to the command header in the command stream.
2677 */
2678static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2679 struct vmw_sw_context *sw_context,
2680 SVGA3dCmdHeader *header)
2681{
2682 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2683 struct vmw_resource *res;
2684 struct {
2685 SVGA3dCmdHeader header;
2686 SVGA3dCmdDXDefineShader body;
2687 } *cmd = container_of(header, typeof(*cmd), header);
2688 int ret;
2689
2690 if (!ctx_node) {
2691 DRM_ERROR("DX Context not set.\n");
2692 return -EINVAL;
2693 }
2694
2695 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2696 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2697 vmw_resource_unreference(&res);
2698 if (ret)
2699 return ret;
2700
2701 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2702 cmd->body.shaderId, cmd->body.type,
2703 &sw_context->staged_cmd_res);
2704}
2705
2706/**
2707 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2708 * command
2709 *
2710 * @dev_priv: Pointer to a device private struct.
2711 * @sw_context: The software context being used for this batch.
2712 * @header: Pointer to the command header in the command stream.
2713 */
2714static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2715 struct vmw_sw_context *sw_context,
2716 SVGA3dCmdHeader *header)
2717{
2718 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2719 struct {
2720 SVGA3dCmdHeader header;
2721 SVGA3dCmdDXDestroyShader body;
2722 } *cmd = container_of(header, typeof(*cmd), header);
2723 int ret;
2724
2725 if (!ctx_node) {
2726 DRM_ERROR("DX Context not set.\n");
2727 return -EINVAL;
2728 }
2729
2730 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2731 &sw_context->staged_cmd_res);
2732 if (ret)
2733 DRM_ERROR("Could not find shader to remove.\n");
2734
2735 return ret;
2736}
2737
2738/**
2739 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2740 * command
2741 *
2742 * @dev_priv: Pointer to a device private struct.
2743 * @sw_context: The software context being used for this batch.
2744 * @header: Pointer to the command header in the command stream.
2745 */
2746static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2747 struct vmw_sw_context *sw_context,
2748 SVGA3dCmdHeader *header)
2749{
2750 struct vmw_resource_val_node *ctx_node;
2751 struct vmw_resource_val_node *res_node;
2752 struct vmw_resource *res;
2753 struct {
2754 SVGA3dCmdHeader header;
2755 SVGA3dCmdDXBindShader body;
2756 } *cmd = container_of(header, typeof(*cmd), header);
2757 int ret;
2758
2759 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2760 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2761 user_context_converter,
2762 &cmd->body.cid, &ctx_node);
2763 if (ret)
2764 return ret;
2765 } else {
2766 ctx_node = sw_context->dx_ctx_node;
2767 if (!ctx_node) {
2768 DRM_ERROR("DX Context not set.\n");
2769 return -EINVAL;
2770 }
2771 }
2772
2773 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2774 cmd->body.shid, 0);
2775 if (IS_ERR(res)) {
2776 DRM_ERROR("Could not find shader to bind.\n");
2777 return PTR_ERR(res);
2778 }
2779
2780 ret = vmw_resource_val_add(sw_context, res, &res_node);
2781 if (ret) {
2782 DRM_ERROR("Error creating resource validation node.\n");
2783 goto out_unref;
2784 }
2785
2786
2787 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
2788 &cmd->body.mobid,
2789 cmd->body.offsetInBytes);
2790out_unref:
2791 vmw_resource_unreference(&res);
2792
2793 return ret;
2794}
2795
2796static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2797 struct vmw_sw_context *sw_context,
2798 void *buf, uint32_t *size)
2799{
2800 uint32_t size_remaining = *size;
2801 uint32_t cmd_id;
2802
2803 cmd_id = ((uint32_t *)buf)[0];
2804 switch (cmd_id) {
2805 case SVGA_CMD_UPDATE:
2806 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
2807 break;
2808 case SVGA_CMD_DEFINE_GMRFB:
2809 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2810 break;
2811 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2812 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2813 break;
2814 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2815 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2816 break;
2817 default:
2818 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
2819 return -EINVAL;
2820 }
2821
2822 if (*size > size_remaining) {
2823 DRM_ERROR("Invalid SVGA command (size mismatch):"
2824 " %u.\n", cmd_id);
2825 return -EINVAL;
2826 }
2827
2828 if (unlikely(!sw_context->kernel)) {
2829 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
2830 return -EPERM;
2831 }
2832
2833 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
2834 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
2835
2836 return 0;
2837}
2838
2839static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
2840 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
2841 false, false, false),
2842 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
2843 false, false, false),
2844 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
2845 true, false, false),
2846 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
2847 true, false, false),
2848 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
2849 true, false, false),
2850 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
2851 false, false, false),
2852 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
2853 false, false, false),
2854 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
2855 true, false, false),
2856 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
2857 true, false, false),
2858 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
2859 true, false, false),
2860 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
2861 &vmw_cmd_set_render_target_check, true, false, false),
2862 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
2863 true, false, false),
2864 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
2865 true, false, false),
2866 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
2867 true, false, false),
c373d4ea
TH
2868 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
2869 true, false, false),
2870 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
2871 true, false, false),
2872 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
2873 true, false, false),
2874 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
2875 true, false, false),
2876 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
2877 false, false, false),
d5bde956
TH
2878 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
2879 true, false, false),
2880 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
2881 true, false, false),
c373d4ea
TH
2882 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
2883 true, false, false),
0ccbbae4
TH
2884 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
2885 true, false, false),
c373d4ea
TH
2886 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
2887 true, false, false),
2888 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
2889 true, false, false),
2890 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
2891 true, false, false),
2892 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
2893 true, false, false),
2894 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
2895 true, false, false),
2896 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
2897 true, false, false),
fb1d9738 2898 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
c373d4ea
TH
2899 &vmw_cmd_blt_surf_screen_check, false, false, false),
2900 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
2901 false, false, false),
2902 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
2903 false, false, false),
2904 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
2905 false, false, false),
2906 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
2907 false, false, false),
2908 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
2909 false, false, false),
2910 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
2911 false, false, false),
2912 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
2913 false, false, false),
2914 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
2915 false, false, false),
2916 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
2917 false, false, false),
2918 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
2919 false, false, false),
2920 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
2921 false, false, false),
2922 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
2923 false, false, false),
2924 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
2925 false, false, false),
2926 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
2927 false, false, true),
2928 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
2929 false, false, true),
2930 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
2931 false, false, true),
2932 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
2933 false, false, true),
c373d4ea
TH
2934 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
2935 false, false, true),
2936 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2937 false, false, true),
2938 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2939 false, false, true),
2940 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2941 true, false, true),
2942 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2943 false, false, true),
2944 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2945 true, false, true),
a97e2192 2946 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
c373d4ea 2947 &vmw_cmd_update_gb_surface, true, false, true),
a97e2192 2948 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
c373d4ea 2949 &vmw_cmd_readback_gb_image, true, false, true),
a97e2192 2950 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
c373d4ea 2951 &vmw_cmd_readback_gb_surface, true, false, true),
a97e2192 2952 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
c373d4ea 2953 &vmw_cmd_invalidate_gb_image, true, false, true),
a97e2192 2954 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
c373d4ea
TH
2955 &vmw_cmd_invalidate_gb_surface, true, false, true),
2956 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2957 false, false, true),
2958 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2959 false, false, true),
2960 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2961 false, false, true),
2962 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2963 false, false, true),
2964 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2965 false, false, true),
2966 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2967 false, false, true),
2968 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2969 true, false, true),
2970 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2971 false, false, true),
f2a0dcb1 2972 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
8ba07315 2973 false, false, false),
c373d4ea
TH
2974 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2975 true, false, true),
2976 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2977 true, false, true),
2978 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2979 true, false, true),
2980 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2981 true, false, true),
2982 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2983 false, false, true),
2984 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2985 false, false, true),
2986 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2987 false, false, true),
2988 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2989 false, false, true),
2990 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2991 false, false, true),
2992 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2993 false, false, true),
2994 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2995 false, false, true),
2996 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2997 false, false, true),
2998 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2999 false, false, true),
3000 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3001 false, false, true),
3002 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
d80efd5c
TH
3003 true, false, true),
3004 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3005 false, false, true),
3006 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3007 false, false, true),
3008 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3009 false, false, true),
3010 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3011 false, false, true),
3012
3013 /*
3014 * DX commands
3015 */
3016 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3017 false, false, true),
3018 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3019 false, false, true),
3020 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3021 false, false, true),
3022 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3023 false, false, true),
3024 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3025 false, false, true),
3026 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3027 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3028 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3029 &vmw_cmd_dx_set_shader_res, true, false, true),
3030 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3031 true, false, true),
2f633e5e
CL
3032 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3033 true, false, true),
3034 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3035 true, false, true),
3036 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
d80efd5c 3037 true, false, true),
2f633e5e 3038 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
d80efd5c 3039 true, false, true),
2f633e5e
CL
3040 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3041 &vmw_cmd_dx_cid_check, true, false, true),
3042 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
d80efd5c
TH
3043 true, false, true),
3044 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3045 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3046 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3047 &vmw_cmd_dx_set_index_buffer, true, false, true),
3048 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3049 &vmw_cmd_dx_set_rendertargets, true, false, true),
3050 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3051 true, false, true),
d80efd5c 3052 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
2f633e5e
CL
3053 &vmw_cmd_dx_cid_check, true, false, true),
3054 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3055 &vmw_cmd_dx_cid_check, true, false, true),
d80efd5c
TH
3056 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_invalid,
3057 true, false, true),
3058 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_invalid,
3059 true, false, true),
3060 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_invalid,
3061 true, false, true),
3062 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_invalid,
3063 true, false, true),
3064 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_invalid,
3065 true, false, true),
3066 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3067 true, false, true),
3068 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
3069 true, false, true),
3070 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3071 true, false, true),
3072 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3073 true, false, true),
3074 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3075 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3076 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3077 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3078 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, &vmw_cmd_invalid,
3079 true, false, true),
3080 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3081 true, false, true),
3082 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
3083 true, false, true),
3084 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3085 &vmw_cmd_dx_check_subresource, true, false, true),
3086 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3087 &vmw_cmd_dx_check_subresource, true, false, true),
3088 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3089 &vmw_cmd_dx_check_subresource, true, false, true),
3090 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3091 &vmw_cmd_dx_view_define, true, false, true),
3092 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3093 &vmw_cmd_dx_view_remove, true, false, true),
3094 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3095 &vmw_cmd_dx_view_define, true, false, true),
3096 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3097 &vmw_cmd_dx_view_remove, true, false, true),
3098 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3099 &vmw_cmd_dx_view_define, true, false, true),
3100 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3101 &vmw_cmd_dx_view_remove, true, false, true),
3102 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3103 &vmw_cmd_dx_so_define, true, false, true),
3104 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3105 &vmw_cmd_dx_cid_check, true, false, true),
3106 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3107 &vmw_cmd_dx_so_define, true, false, true),
3108 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3109 &vmw_cmd_dx_cid_check, true, false, true),
3110 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3111 &vmw_cmd_dx_so_define, true, false, true),
3112 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3113 &vmw_cmd_dx_cid_check, true, false, true),
3114 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3115 &vmw_cmd_dx_so_define, true, false, true),
3116 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3117 &vmw_cmd_dx_cid_check, true, false, true),
3118 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3119 &vmw_cmd_dx_so_define, true, false, true),
3120 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3121 &vmw_cmd_dx_cid_check, true, false, true),
3122 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3123 &vmw_cmd_dx_define_shader, true, false, true),
3124 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3125 &vmw_cmd_dx_destroy_shader, true, false, true),
3126 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3127 &vmw_cmd_dx_bind_shader, true, false, true),
3128 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3129 &vmw_cmd_dx_so_define, true, false, true),
3130 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3131 &vmw_cmd_dx_cid_check, true, false, true),
2f633e5e 3132 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
d80efd5c 3133 true, false, true),
2f633e5e
CL
3134 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3135 &vmw_cmd_dx_set_so_targets, true, false, true),
d80efd5c
TH
3136 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3137 &vmw_cmd_dx_cid_check, true, false, true),
3138 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3139 &vmw_cmd_dx_cid_check, true, false, true),
fb1d9738
JB
3140};
3141
3142static int vmw_cmd_check(struct vmw_private *dev_priv,
3143 struct vmw_sw_context *sw_context,
3144 void *buf, uint32_t *size)
3145{
3146 uint32_t cmd_id;
7a73ba74 3147 uint32_t size_remaining = *size;
fb1d9738
JB
3148 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3149 int ret;
c373d4ea
TH
3150 const struct vmw_cmd_entry *entry;
3151 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
fb1d9738 3152
b9eb1a61 3153 cmd_id = ((uint32_t *)buf)[0];
4084fb89
JB
3154 /* Handle any none 3D commands */
3155 if (unlikely(cmd_id < SVGA_CMD_MAX))
3156 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3157
fb1d9738 3158
b9eb1a61
TH
3159 cmd_id = header->id;
3160 *size = header->size + sizeof(SVGA3dCmdHeader);
fb1d9738
JB
3161
3162 cmd_id -= SVGA_3D_CMD_BASE;
7a73ba74 3163 if (unlikely(*size > size_remaining))
c373d4ea 3164 goto out_invalid;
7a73ba74 3165
fb1d9738 3166 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
c373d4ea
TH
3167 goto out_invalid;
3168
3169 entry = &vmw_cmd_entries[cmd_id];
36e952c1
TH
3170 if (unlikely(!entry->func))
3171 goto out_invalid;
3172
c373d4ea
TH
3173 if (unlikely(!entry->user_allow && !sw_context->kernel))
3174 goto out_privileged;
3175
3176 if (unlikely(entry->gb_disable && gb))
3177 goto out_old;
3178
3179 if (unlikely(entry->gb_enable && !gb))
3180 goto out_new;
fb1d9738 3181
c373d4ea 3182 ret = entry->func(dev_priv, sw_context, header);
fb1d9738 3183 if (unlikely(ret != 0))
c373d4ea 3184 goto out_invalid;
fb1d9738
JB
3185
3186 return 0;
c373d4ea
TH
3187out_invalid:
3188 DRM_ERROR("Invalid SVGA3D command: %d\n",
3189 cmd_id + SVGA_3D_CMD_BASE);
3190 return -EINVAL;
3191out_privileged:
3192 DRM_ERROR("Privileged SVGA3D command: %d\n",
3193 cmd_id + SVGA_3D_CMD_BASE);
3194 return -EPERM;
3195out_old:
3196 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3197 cmd_id + SVGA_3D_CMD_BASE);
3198 return -EINVAL;
3199out_new:
3200 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
fb1d9738
JB
3201 cmd_id + SVGA_3D_CMD_BASE);
3202 return -EINVAL;
3203}
3204
3205static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3206 struct vmw_sw_context *sw_context,
922ade0d 3207 void *buf,
be38ab6e 3208 uint32_t size)
fb1d9738
JB
3209{
3210 int32_t cur_size = size;
3211 int ret;
3212
c0951b79
TH
3213 sw_context->buf_start = buf;
3214
fb1d9738 3215 while (cur_size > 0) {
7a73ba74 3216 size = cur_size;
fb1d9738
JB
3217 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3218 if (unlikely(ret != 0))
3219 return ret;
3220 buf = (void *)((unsigned long) buf + size);
3221 cur_size -= size;
3222 }
3223
3224 if (unlikely(cur_size != 0)) {
3225 DRM_ERROR("Command verifier out of sync.\n");
3226 return -EINVAL;
3227 }
3228
3229 return 0;
3230}
3231
3232static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3233{
3234 sw_context->cur_reloc = 0;
3235}
3236
3237static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3238{
3239 uint32_t i;
3240 struct vmw_relocation *reloc;
3241 struct ttm_validate_buffer *validate;
3242 struct ttm_buffer_object *bo;
3243
3244 for (i = 0; i < sw_context->cur_reloc; ++i) {
3245 reloc = &sw_context->relocs[i];
c0951b79 3246 validate = &sw_context->val_bufs[reloc->index].base;
fb1d9738 3247 bo = validate->bo;
c0951b79
TH
3248 switch (bo->mem.mem_type) {
3249 case TTM_PL_VRAM:
135cba0d
TH
3250 reloc->location->offset += bo->offset;
3251 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
c0951b79
TH
3252 break;
3253 case VMW_PL_GMR:
135cba0d 3254 reloc->location->gmrId = bo->mem.start;
c0951b79 3255 break;
ddcda24e
TH
3256 case VMW_PL_MOB:
3257 *reloc->mob_loc = bo->mem.start;
3258 break;
c0951b79
TH
3259 default:
3260 BUG();
3261 }
fb1d9738
JB
3262 }
3263 vmw_free_relocations(sw_context);
3264}
3265
c0951b79
TH
3266/**
3267 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3268 * all resources referenced by it.
3269 *
3270 * @list: The resource list.
3271 */
d80efd5c
TH
3272static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3273 struct list_head *list)
c0951b79
TH
3274{
3275 struct vmw_resource_val_node *val, *val_next;
3276
3277 /*
3278 * Drop references to resources held during command submission.
3279 */
3280
3281 list_for_each_entry_safe(val, val_next, list, head) {
3282 list_del_init(&val->head);
3283 vmw_resource_unreference(&val->res);
d80efd5c
TH
3284
3285 if (val->staged_bindings) {
3286 if (val->staged_bindings != sw_context->staged_bindings)
3287 vmw_binding_state_free(val->staged_bindings);
3288 else
3289 sw_context->staged_bindings_inuse = false;
3290 val->staged_bindings = NULL;
3291 }
3292
c0951b79
TH
3293 kfree(val);
3294 }
3295}
3296
fb1d9738
JB
3297static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3298{
c0951b79
TH
3299 struct vmw_validate_buffer *entry, *next;
3300 struct vmw_resource_val_node *val;
fb1d9738 3301
be38ab6e
TH
3302 /*
3303 * Drop references to DMA buffers held during command submission.
3304 */
fb1d9738 3305 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
c0951b79
TH
3306 base.head) {
3307 list_del(&entry->base.head);
3308 ttm_bo_unref(&entry->base.bo);
3309 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
fb1d9738
JB
3310 sw_context->cur_val_buf--;
3311 }
3312 BUG_ON(sw_context->cur_val_buf != 0);
be38ab6e 3313
c0951b79
TH
3314 list_for_each_entry(val, &sw_context->resource_list, head)
3315 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
fb1d9738
JB
3316}
3317
1a4b172a
TH
3318int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3319 struct ttm_buffer_object *bo,
3320 bool interruptible,
3321 bool validate_as_mob)
fb1d9738 3322{
459d0fa7
TH
3323 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3324 base);
fb1d9738
JB
3325 int ret;
3326
459d0fa7 3327 if (vbo->pin_count > 0)
e2fa3a76
TH
3328 return 0;
3329
96c5f0df 3330 if (validate_as_mob)
1a4b172a
TH
3331 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3332 false);
96c5f0df 3333
8ba5152a 3334 /**
135cba0d
TH
3335 * Put BO in VRAM if there is space, otherwise as a GMR.
3336 * If there is no space in VRAM and GMR ids are all used up,
3337 * start evicting GMRs to make room. If the DMA buffer can't be
3338 * used as a GMR, this will return -ENOMEM.
8ba5152a
TH
3339 */
3340
1a4b172a
TH
3341 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3342 false);
3d3a5b32 3343 if (likely(ret == 0 || ret == -ERESTARTSYS))
fb1d9738
JB
3344 return ret;
3345
8ba5152a
TH
3346 /**
3347 * If that failed, try VRAM again, this time evicting
3348 * previous contents.
3349 */
fb1d9738 3350
1a4b172a 3351 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
fb1d9738
JB
3352 return ret;
3353}
3354
fb1d9738
JB
3355static int vmw_validate_buffers(struct vmw_private *dev_priv,
3356 struct vmw_sw_context *sw_context)
3357{
c0951b79 3358 struct vmw_validate_buffer *entry;
fb1d9738
JB
3359 int ret;
3360
c0951b79 3361 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
96c5f0df 3362 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1a4b172a 3363 true,
96c5f0df 3364 entry->validate_as_mob);
fb1d9738
JB
3365 if (unlikely(ret != 0))
3366 return ret;
3367 }
3368 return 0;
3369}
3370
be38ab6e
TH
3371static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3372 uint32_t size)
3373{
3374 if (likely(sw_context->cmd_bounce_size >= size))
3375 return 0;
3376
3377 if (sw_context->cmd_bounce_size == 0)
3378 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3379
3380 while (sw_context->cmd_bounce_size < size) {
3381 sw_context->cmd_bounce_size =
3382 PAGE_ALIGN(sw_context->cmd_bounce_size +
3383 (sw_context->cmd_bounce_size >> 1));
3384 }
3385
3386 if (sw_context->cmd_bounce != NULL)
3387 vfree(sw_context->cmd_bounce);
3388
3389 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3390
3391 if (sw_context->cmd_bounce == NULL) {
3392 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3393 sw_context->cmd_bounce_size = 0;
3394 return -ENOMEM;
3395 }
3396
3397 return 0;
3398}
3399
ae2a1040
TH
3400/**
3401 * vmw_execbuf_fence_commands - create and submit a command stream fence
3402 *
3403 * Creates a fence object and submits a command stream marker.
3404 * If this fails for some reason, We sync the fifo and return NULL.
3405 * It is then safe to fence buffers with a NULL pointer.
6070e9fa
JB
3406 *
3407 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3408 * a userspace handle if @p_handle is not NULL, otherwise not.
ae2a1040
TH
3409 */
3410
3411int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3412 struct vmw_private *dev_priv,
3413 struct vmw_fence_obj **p_fence,
3414 uint32_t *p_handle)
3415{
3416 uint32_t sequence;
3417 int ret;
3418 bool synced = false;
3419
6070e9fa
JB
3420 /* p_handle implies file_priv. */
3421 BUG_ON(p_handle != NULL && file_priv == NULL);
ae2a1040
TH
3422
3423 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3424 if (unlikely(ret != 0)) {
3425 DRM_ERROR("Fence submission error. Syncing.\n");
3426 synced = true;
3427 }
3428
3429 if (p_handle != NULL)
3430 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
c060a4e1 3431 sequence, p_fence, p_handle);
ae2a1040 3432 else
c060a4e1 3433 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
ae2a1040
TH
3434
3435 if (unlikely(ret != 0 && !synced)) {
3436 (void) vmw_fallback_wait(dev_priv, false, false,
3437 sequence, false,
3438 VMW_FENCE_WAIT_TIMEOUT);
3439 *p_fence = NULL;
3440 }
3441
3442 return 0;
3443}
3444
8bf445ce
TH
3445/**
3446 * vmw_execbuf_copy_fence_user - copy fence object information to
3447 * user-space.
3448 *
3449 * @dev_priv: Pointer to a vmw_private struct.
3450 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3451 * @ret: Return value from fence object creation.
3452 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3453 * which the information should be copied.
3454 * @fence: Pointer to the fenc object.
3455 * @fence_handle: User-space fence handle.
3456 *
3457 * This function copies fence information to user-space. If copying fails,
3458 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3459 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3460 * the error will hopefully be detected.
3461 * Also if copying fails, user-space will be unable to signal the fence
3462 * object so we wait for it immediately, and then unreference the
3463 * user-space reference.
3464 */
57c5ee79 3465void
8bf445ce
TH
3466vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3467 struct vmw_fpriv *vmw_fp,
3468 int ret,
3469 struct drm_vmw_fence_rep __user *user_fence_rep,
3470 struct vmw_fence_obj *fence,
3471 uint32_t fence_handle)
3472{
3473 struct drm_vmw_fence_rep fence_rep;
3474
3475 if (user_fence_rep == NULL)
3476 return;
3477
80d9b24a
DC
3478 memset(&fence_rep, 0, sizeof(fence_rep));
3479
8bf445ce
TH
3480 fence_rep.error = ret;
3481 if (ret == 0) {
3482 BUG_ON(fence == NULL);
3483
3484 fence_rep.handle = fence_handle;
2298e804 3485 fence_rep.seqno = fence->base.seqno;
8bf445ce
TH
3486 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3487 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3488 }
3489
3490 /*
3491 * copy_to_user errors will be detected by user space not
3492 * seeing fence_rep::error filled in. Typically
3493 * user-space would have pre-set that member to -EFAULT.
3494 */
3495 ret = copy_to_user(user_fence_rep, &fence_rep,
3496 sizeof(fence_rep));
3497
3498 /*
3499 * User-space lost the fence object. We need to sync
3500 * and unreference the handle.
3501 */
3502 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3503 ttm_ref_object_base_unref(vmw_fp->tfile,
3504 fence_handle, TTM_REF_USAGE);
3505 DRM_ERROR("Fence copy error. Syncing.\n");
c060a4e1 3506 (void) vmw_fence_obj_wait(fence, false, false,
8bf445ce
TH
3507 VMW_FENCE_WAIT_TIMEOUT);
3508 }
3509}
3510
3eab3d9e
TH
3511/**
3512 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3513 * the fifo.
3514 *
3515 * @dev_priv: Pointer to a device private structure.
3516 * @kernel_commands: Pointer to the unpatched command batch.
3517 * @command_size: Size of the unpatched command batch.
3518 * @sw_context: Structure holding the relocation lists.
3519 *
3520 * Side effects: If this function returns 0, then the command batch
3521 * pointed to by @kernel_commands will have been modified.
3522 */
3523static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3524 void *kernel_commands,
3525 u32 command_size,
3526 struct vmw_sw_context *sw_context)
3527{
d80efd5c 3528 void *cmd;
3eab3d9e 3529
d80efd5c
TH
3530 if (sw_context->dx_ctx_node)
3531 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3532 sw_context->dx_ctx_node->res->id);
3533 else
3534 cmd = vmw_fifo_reserve(dev_priv, command_size);
3eab3d9e
TH
3535 if (!cmd) {
3536 DRM_ERROR("Failed reserving fifo space for commands.\n");
3537 return -ENOMEM;
3538 }
18e4a466 3539
3eab3d9e
TH
3540 vmw_apply_relocations(sw_context);
3541 memcpy(cmd, kernel_commands, command_size);
3542 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3543 vmw_resource_relocations_free(&sw_context->res_relocations);
3544 vmw_fifo_commit(dev_priv, command_size);
3545
3546 return 0;
3547}
3548
3549/**
3550 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3551 * the command buffer manager.
3552 *
3553 * @dev_priv: Pointer to a device private structure.
3554 * @header: Opaque handle to the command buffer allocation.
3555 * @command_size: Size of the unpatched command batch.
3556 * @sw_context: Structure holding the relocation lists.
3557 *
3558 * Side effects: If this function returns 0, then the command buffer
3559 * represented by @header will have been modified.
3560 */
3561static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3562 struct vmw_cmdbuf_header *header,
3563 u32 command_size,
3564 struct vmw_sw_context *sw_context)
3565{
d80efd5c
TH
3566 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3567 SVGA3D_INVALID_ID);
3eab3d9e 3568 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
d80efd5c 3569 id, false, header);
3eab3d9e
TH
3570
3571 vmw_apply_relocations(sw_context);
3572 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3573 vmw_resource_relocations_free(&sw_context->res_relocations);
3574 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3575
3576 return 0;
3577}
3578
3579/**
3580 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3581 * submission using a command buffer.
3582 *
3583 * @dev_priv: Pointer to a device private structure.
3584 * @user_commands: User-space pointer to the commands to be submitted.
3585 * @command_size: Size of the unpatched command batch.
3586 * @header: Out parameter returning the opaque pointer to the command buffer.
3587 *
3588 * This function checks whether we can use the command buffer manager for
3589 * submission and if so, creates a command buffer of suitable size and
3590 * copies the user data into that buffer.
3591 *
3592 * On successful return, the function returns a pointer to the data in the
3593 * command buffer and *@header is set to non-NULL.
3594 * If command buffers could not be used, the function will return the value
3595 * of @kernel_commands on function call. That value may be NULL. In that case,
3596 * the value of *@header will be set to NULL.
3597 * If an error is encountered, the function will return a pointer error value.
3598 * If the function is interrupted by a signal while sleeping, it will return
3599 * -ERESTARTSYS casted to a pointer error value.
3600 */
b9eb1a61
TH
3601static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3602 void __user *user_commands,
3603 void *kernel_commands,
3604 u32 command_size,
3605 struct vmw_cmdbuf_header **header)
3eab3d9e
TH
3606{
3607 size_t cmdbuf_size;
3608 int ret;
3609
3610 *header = NULL;
3611 if (!dev_priv->cman || kernel_commands)
3612 return kernel_commands;
3613
3614 if (command_size > SVGA_CB_MAX_SIZE) {
3615 DRM_ERROR("Command buffer is too large.\n");
3616 return ERR_PTR(-EINVAL);
3617 }
3618
3619 /* If possible, add a little space for fencing. */
3620 cmdbuf_size = command_size + 512;
3621 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3622 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3623 true, header);
3624 if (IS_ERR(kernel_commands))
3625 return kernel_commands;
3626
3627 ret = copy_from_user(kernel_commands, user_commands,
3628 command_size);
3629 if (ret) {
3630 DRM_ERROR("Failed copying commands.\n");
3631 vmw_cmdbuf_header_free(*header);
3632 *header = NULL;
3633 return ERR_PTR(-EFAULT);
3634 }
3635
3636 return kernel_commands;
3637}
18e4a466 3638
d80efd5c
TH
3639static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3640 struct vmw_sw_context *sw_context,
3641 uint32_t handle)
3642{
3643 struct vmw_resource_val_node *ctx_node;
3644 struct vmw_resource *res;
3645 int ret;
3646
3647 if (handle == SVGA3D_INVALID_ID)
3648 return 0;
3649
3650 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3651 handle, user_context_converter,
3652 &res);
3653 if (unlikely(ret != 0)) {
3654 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3655 (unsigned) handle);
3656 return ret;
3657 }
3658
3659 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3660 if (unlikely(ret != 0))
3661 goto out_err;
3662
3663 sw_context->dx_ctx_node = ctx_node;
3664 sw_context->man = vmw_context_res_man(res);
3665out_err:
3666 vmw_resource_unreference(&res);
3667 return ret;
3668}
3669
922ade0d
TH
3670int vmw_execbuf_process(struct drm_file *file_priv,
3671 struct vmw_private *dev_priv,
3672 void __user *user_commands,
3673 void *kernel_commands,
3674 uint32_t command_size,
3675 uint64_t throttle_us,
d80efd5c 3676 uint32_t dx_context_handle,
bb1bd2f4
JB
3677 struct drm_vmw_fence_rep __user *user_fence_rep,
3678 struct vmw_fence_obj **out_fence)
fb1d9738 3679{
fb1d9738 3680 struct vmw_sw_context *sw_context = &dev_priv->ctx;
bb1bd2f4 3681 struct vmw_fence_obj *fence = NULL;
c0951b79
TH
3682 struct vmw_resource *error_resource;
3683 struct list_head resource_list;
3eab3d9e 3684 struct vmw_cmdbuf_header *header;
ecff665f 3685 struct ww_acquire_ctx ticket;
ae2a1040 3686 uint32_t handle;
922ade0d 3687 int ret;
fb1d9738 3688
2f633e5e 3689 if (throttle_us) {
3eab3d9e
TH
3690 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3691 throttle_us);
2f633e5e 3692
3eab3d9e
TH
3693 if (ret)
3694 return ret;
3695 }
2f633e5e 3696
3eab3d9e
TH
3697 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3698 kernel_commands, command_size,
3699 &header);
3700 if (IS_ERR(kernel_commands))
3701 return PTR_ERR(kernel_commands);
3702
922ade0d 3703 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3eab3d9e
TH
3704 if (ret) {
3705 ret = -ERESTARTSYS;
3706 goto out_free_header;
3707 }
fb1d9738 3708
3eab3d9e 3709 sw_context->kernel = false;
922ade0d 3710 if (kernel_commands == NULL) {
922ade0d
TH
3711 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3712 if (unlikely(ret != 0))
3713 goto out_unlock;
fb1d9738 3714
fb1d9738 3715
922ade0d
TH
3716 ret = copy_from_user(sw_context->cmd_bounce,
3717 user_commands, command_size);
3718
3719 if (unlikely(ret != 0)) {
3720 ret = -EFAULT;
3721 DRM_ERROR("Failed copying commands.\n");
3722 goto out_unlock;
3723 }
3724 kernel_commands = sw_context->cmd_bounce;
3eab3d9e 3725 } else if (!header)
922ade0d 3726 sw_context->kernel = true;
fb1d9738 3727
d5bde956 3728 sw_context->fp = vmw_fpriv(file_priv);
fb1d9738
JB
3729 sw_context->cur_reloc = 0;
3730 sw_context->cur_val_buf = 0;
f18c8840 3731 INIT_LIST_HEAD(&sw_context->resource_list);
d80efd5c 3732 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
e2fa3a76 3733 sw_context->cur_query_bo = dev_priv->pinned_bo;
c0951b79
TH
3734 sw_context->last_query_ctx = NULL;
3735 sw_context->needs_post_query_barrier = false;
d80efd5c 3736 sw_context->dx_ctx_node = NULL;
c0951b79 3737 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
fb1d9738 3738 INIT_LIST_HEAD(&sw_context->validate_nodes);
c0951b79 3739 INIT_LIST_HEAD(&sw_context->res_relocations);
d80efd5c
TH
3740 if (sw_context->staged_bindings)
3741 vmw_binding_state_reset(sw_context->staged_bindings);
3742
c0951b79
TH
3743 if (!sw_context->res_ht_initialized) {
3744 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3745 if (unlikely(ret != 0))
3746 goto out_unlock;
3747 sw_context->res_ht_initialized = true;
3748 }
18e4a466 3749 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
c0951b79 3750 INIT_LIST_HEAD(&resource_list);
d80efd5c
TH
3751 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3752 if (unlikely(ret != 0)) {
3753 list_splice_init(&sw_context->ctx_resource_list,
3754 &sw_context->resource_list);
3755 goto out_err_nores;
3756 }
3757
922ade0d
TH
3758 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3759 command_size);
be38ab6e 3760
2f633e5e
CL
3761 /*
3762 * Merge the resource lists before checking the return status
3763 * from vmd_cmd_check_all so that all the open hashtabs will
3764 * be handled properly even if vmw_cmd_check_all fails.
3765 */
d80efd5c
TH
3766 list_splice_init(&sw_context->ctx_resource_list,
3767 &sw_context->resource_list);
2f633e5e
CL
3768
3769 if (unlikely(ret != 0))
3770 goto out_err_nores;
3771
c0951b79
TH
3772 ret = vmw_resources_reserve(sw_context);
3773 if (unlikely(ret != 0))
cf5e3413 3774 goto out_err_nores;
c0951b79 3775
aa35071c
CK
3776 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
3777 true, NULL);
fb1d9738 3778 if (unlikely(ret != 0))
d80efd5c 3779 goto out_err_nores;
fb1d9738
JB
3780
3781 ret = vmw_validate_buffers(dev_priv, sw_context);
3782 if (unlikely(ret != 0))
3783 goto out_err;
3784
c0951b79
TH
3785 ret = vmw_resources_validate(sw_context);
3786 if (unlikely(ret != 0))
3787 goto out_err;
1925d456 3788
173fb7d4
TH
3789 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3790 if (unlikely(ret != 0)) {
3791 ret = -ERESTARTSYS;
3792 goto out_err;
3793 }
3794
30f82d81
TH
3795 if (dev_priv->has_mob) {
3796 ret = vmw_rebind_contexts(sw_context);
3797 if (unlikely(ret != 0))
b2ad9881 3798 goto out_unlock_binding;
30f82d81
TH
3799 }
3800
3eab3d9e
TH
3801 if (!header) {
3802 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3803 command_size, sw_context);
3804 } else {
3805 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3806 sw_context);
3807 header = NULL;
1925d456 3808 }
d80efd5c 3809 mutex_unlock(&dev_priv->binding_mutex);
3eab3d9e 3810 if (ret)
d80efd5c 3811 goto out_err;
fb1d9738 3812
e2fa3a76 3813 vmw_query_bo_switch_commit(dev_priv, sw_context);
ae2a1040
TH
3814 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
3815 &fence,
3816 (user_fence_rep) ? &handle : NULL);
fb1d9738
JB
3817 /*
3818 * This error is harmless, because if fence submission fails,
ae2a1040
TH
3819 * vmw_fifo_send_fence will sync. The error will be propagated to
3820 * user-space in @fence_rep
fb1d9738
JB
3821 */
3822
3823 if (ret != 0)
3824 DRM_ERROR("Fence submission error. Syncing.\n");
3825
d80efd5c
TH
3826 vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
3827 false);
173fb7d4 3828
ecff665f 3829 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
ae2a1040 3830 (void *) fence);
fb1d9738 3831
c0951b79
TH
3832 if (unlikely(dev_priv->pinned_bo != NULL &&
3833 !dev_priv->query_cid_valid))
3834 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
3835
ae2a1040 3836 vmw_clear_validations(sw_context);
8bf445ce
TH
3837 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
3838 user_fence_rep, fence, handle);
fb1d9738 3839
bb1bd2f4
JB
3840 /* Don't unreference when handing fence out */
3841 if (unlikely(out_fence != NULL)) {
3842 *out_fence = fence;
3843 fence = NULL;
3844 } else if (likely(fence != NULL)) {
ae2a1040 3845 vmw_fence_obj_unreference(&fence);
bb1bd2f4 3846 }
fb1d9738 3847
c0951b79 3848 list_splice_init(&sw_context->resource_list, &resource_list);
18e4a466 3849 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
922ade0d 3850 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
3851
3852 /*
3853 * Unreference resources outside of the cmdbuf_mutex to
3854 * avoid deadlocks in resource destruction paths.
3855 */
d80efd5c 3856 vmw_resource_list_unreference(sw_context, &resource_list);
c0951b79 3857
fb1d9738 3858 return 0;
922ade0d 3859
173fb7d4
TH
3860out_unlock_binding:
3861 mutex_unlock(&dev_priv->binding_mutex);
fb1d9738 3862out_err:
ecff665f 3863 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
cf5e3413 3864out_err_nores:
d80efd5c
TH
3865 vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
3866 true);
cf5e3413
TH
3867 vmw_resource_relocations_free(&sw_context->res_relocations);
3868 vmw_free_relocations(sw_context);
fb1d9738 3869 vmw_clear_validations(sw_context);
c0951b79
TH
3870 if (unlikely(dev_priv->pinned_bo != NULL &&
3871 !dev_priv->query_cid_valid))
3872 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
fb1d9738 3873out_unlock:
c0951b79
TH
3874 list_splice_init(&sw_context->resource_list, &resource_list);
3875 error_resource = sw_context->error_resource;
3876 sw_context->error_resource = NULL;
18e4a466 3877 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
fb1d9738 3878 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
3879
3880 /*
3881 * Unreference resources outside of the cmdbuf_mutex to
3882 * avoid deadlocks in resource destruction paths.
3883 */
d80efd5c 3884 vmw_resource_list_unreference(sw_context, &resource_list);
c0951b79
TH
3885 if (unlikely(error_resource != NULL))
3886 vmw_resource_unreference(&error_resource);
3eab3d9e
TH
3887out_free_header:
3888 if (header)
3889 vmw_cmdbuf_header_free(header);
c0951b79 3890
922ade0d
TH
3891 return ret;
3892}
3893
e2fa3a76
TH
3894/**
3895 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3896 *
3897 * @dev_priv: The device private structure.
3898 *
3899 * This function is called to idle the fifo and unpin the query buffer
3900 * if the normal way to do this hits an error, which should typically be
3901 * extremely rare.
3902 */
3903static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3904{
3905 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
3906
3907 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
459d0fa7
TH
3908 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3909 if (dev_priv->dummy_query_bo_pinned) {
3910 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3911 dev_priv->dummy_query_bo_pinned = false;
3912 }
e2fa3a76
TH
3913}
3914
3915
3916/**
c0951b79 3917 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
e2fa3a76
TH
3918 * query bo.
3919 *
3920 * @dev_priv: The device private structure.
c0951b79
TH
3921 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
3922 * _after_ a query barrier that flushes all queries touching the current
3923 * buffer pointed to by @dev_priv->pinned_bo
e2fa3a76
TH
3924 *
3925 * This function should be used to unpin the pinned query bo, or
3926 * as a query barrier when we need to make sure that all queries have
3927 * finished before the next fifo command. (For example on hardware
3928 * context destructions where the hardware may otherwise leak unfinished
3929 * queries).
3930 *
3931 * This function does not return any failure codes, but make attempts
3932 * to do safe unpinning in case of errors.
3933 *
3934 * The function will synchronize on the previous query barrier, and will
3935 * thus not finish until that barrier has executed.
c0951b79
TH
3936 *
3937 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
3938 * before calling this function.
e2fa3a76 3939 */
c0951b79
TH
3940void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
3941 struct vmw_fence_obj *fence)
e2fa3a76
TH
3942{
3943 int ret = 0;
3944 struct list_head validate_list;
3945 struct ttm_validate_buffer pinned_val, query_val;
c0951b79 3946 struct vmw_fence_obj *lfence = NULL;
ecff665f 3947 struct ww_acquire_ctx ticket;
e2fa3a76
TH
3948
3949 if (dev_priv->pinned_bo == NULL)
3950 goto out_unlock;
3951
e2fa3a76
TH
3952 INIT_LIST_HEAD(&validate_list);
3953
459d0fa7 3954 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
ae9c0af2 3955 pinned_val.shared = false;
e2fa3a76
TH
3956 list_add_tail(&pinned_val.head, &validate_list);
3957
459d0fa7 3958 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
ae9c0af2 3959 query_val.shared = false;
e2fa3a76
TH
3960 list_add_tail(&query_val.head, &validate_list);
3961
aa35071c
CK
3962 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
3963 false, NULL);
e2fa3a76
TH
3964 if (unlikely(ret != 0)) {
3965 vmw_execbuf_unpin_panic(dev_priv);
3966 goto out_no_reserve;
3967 }
3968
c0951b79
TH
3969 if (dev_priv->query_cid_valid) {
3970 BUG_ON(fence != NULL);
3971 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
3972 if (unlikely(ret != 0)) {
3973 vmw_execbuf_unpin_panic(dev_priv);
3974 goto out_no_emit;
3975 }
3976 dev_priv->query_cid_valid = false;
e2fa3a76
TH
3977 }
3978
459d0fa7
TH
3979 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3980 if (dev_priv->dummy_query_bo_pinned) {
3981 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3982 dev_priv->dummy_query_bo_pinned = false;
3983 }
c0951b79
TH
3984 if (fence == NULL) {
3985 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
3986 NULL);
3987 fence = lfence;
3988 }
ecff665f 3989 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
c0951b79
TH
3990 if (lfence != NULL)
3991 vmw_fence_obj_unreference(&lfence);
e2fa3a76
TH
3992
3993 ttm_bo_unref(&query_val.bo);
3994 ttm_bo_unref(&pinned_val.bo);
459d0fa7
TH
3995 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
3996 DRM_INFO("Dummy query bo pin count: %d\n",
3997 dev_priv->dummy_query_bo->pin_count);
e2fa3a76
TH
3998
3999out_unlock:
e2fa3a76
TH
4000 return;
4001
4002out_no_emit:
ecff665f 4003 ttm_eu_backoff_reservation(&ticket, &validate_list);
e2fa3a76
TH
4004out_no_reserve:
4005 ttm_bo_unref(&query_val.bo);
4006 ttm_bo_unref(&pinned_val.bo);
459d0fa7 4007 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
c0951b79
TH
4008}
4009
4010/**
4011 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4012 * query bo.
4013 *
4014 * @dev_priv: The device private structure.
4015 *
4016 * This function should be used to unpin the pinned query bo, or
4017 * as a query barrier when we need to make sure that all queries have
4018 * finished before the next fifo command. (For example on hardware
4019 * context destructions where the hardware may otherwise leak unfinished
4020 * queries).
4021 *
4022 * This function does not return any failure codes, but make attempts
4023 * to do safe unpinning in case of errors.
4024 *
4025 * The function will synchronize on the previous query barrier, and will
4026 * thus not finish until that barrier has executed.
4027 */
4028void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4029{
4030 mutex_lock(&dev_priv->cmdbuf_mutex);
4031 if (dev_priv->query_cid_valid)
4032 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
e2fa3a76
TH
4033 mutex_unlock(&dev_priv->cmdbuf_mutex);
4034}
4035
d80efd5c
TH
4036int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4037 struct drm_file *file_priv, size_t size)
922ade0d
TH
4038{
4039 struct vmw_private *dev_priv = vmw_priv(dev);
d80efd5c 4040 struct drm_vmw_execbuf_arg arg;
922ade0d 4041 int ret;
d80efd5c
TH
4042 static const size_t copy_offset[] = {
4043 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4044 sizeof(struct drm_vmw_execbuf_arg)};
4045
4046 if (unlikely(size < copy_offset[0])) {
4047 DRM_ERROR("Invalid command size, ioctl %d\n",
4048 DRM_VMW_EXECBUF);
4049 return -EINVAL;
4050 }
4051
4052 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4053 return -EFAULT;
922ade0d
TH
4054
4055 /*
d80efd5c 4056 * Extend the ioctl argument while
922ade0d
TH
4057 * maintaining backwards compatibility:
4058 * We take different code paths depending on the value of
d80efd5c 4059 * arg.version.
922ade0d
TH
4060 */
4061
d80efd5c
TH
4062 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4063 arg.version == 0)) {
922ade0d 4064 DRM_ERROR("Incorrect execbuf version.\n");
922ade0d
TH
4065 return -EINVAL;
4066 }
4067
d80efd5c
TH
4068 if (arg.version > 1 &&
4069 copy_from_user(&arg.context_handle,
4070 (void __user *) (data + copy_offset[0]),
4071 copy_offset[arg.version - 1] -
4072 copy_offset[0]) != 0)
4073 return -EFAULT;
4074
4075 switch (arg.version) {
4076 case 1:
4077 arg.context_handle = (uint32_t) -1;
4078 break;
4079 case 2:
4080 if (arg.pad64 != 0) {
4081 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4082 return -EINVAL;
4083 }
4084 break;
4085 default:
4086 break;
4087 }
4088
294adf7d 4089 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
922ade0d
TH
4090 if (unlikely(ret != 0))
4091 return ret;
4092
4093 ret = vmw_execbuf_process(file_priv, dev_priv,
d80efd5c
TH
4094 (void __user *)(unsigned long)arg.commands,
4095 NULL, arg.command_size, arg.throttle_us,
4096 arg.context_handle,
4097 (void __user *)(unsigned long)arg.fence_rep,
bb1bd2f4 4098 NULL);
5151adb3 4099 ttm_read_unlock(&dev_priv->reservation_sem);
922ade0d 4100 if (unlikely(ret != 0))
5151adb3 4101 return ret;
922ade0d
TH
4102
4103 vmw_kms_cursor_post_execbuf(dev_priv);
4104
5151adb3 4105 return 0;
fb1d9738 4106}