drm/vmwgfx: Add command parser support for a couple of DX commands
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
760285e7
DH
30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
d80efd5c
TH
32#include "vmwgfx_so.h"
33#include "vmwgfx_binding.h"
fb1d9738 34
c0951b79
TH
35#define VMW_RES_HT_ORDER 12
36
37/**
38 * struct vmw_resource_relocation - Relocation info for resources
39 *
40 * @head: List head for the software context's relocation list.
41 * @res: Non-ref-counted pointer to the resource.
42 * @offset: Offset of 4 byte entries into the command buffer where the
43 * id that needs fixup is located.
44 */
45struct vmw_resource_relocation {
46 struct list_head head;
47 const struct vmw_resource *res;
48 unsigned long offset;
49};
50
51/**
52 * struct vmw_resource_val_node - Validation info for resources
53 *
54 * @head: List head for the software context's resource list.
55 * @hash: Hash entry for quick resouce to val_node lookup.
56 * @res: Ref-counted pointer to the resource.
57 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
58 * @new_backup: Refcounted pointer to the new backup buffer.
b5c3b1a6
TH
59 * @staged_bindings: If @res is a context, tracks bindings set up during
60 * the command batch. Otherwise NULL.
c0951b79
TH
61 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
62 * @first_usage: Set to true the first time the resource is referenced in
63 * the command stream.
d80efd5c
TH
64 * @switching_backup: The command stream provides a new backup buffer for a
65 * resource.
66 * @no_buffer_needed: This means @switching_backup is true on first buffer
67 * reference. So resource reservation does not need to allocate a backup
68 * buffer for the resource.
c0951b79
TH
69 */
70struct vmw_resource_val_node {
71 struct list_head head;
72 struct drm_hash_item hash;
73 struct vmw_resource *res;
74 struct vmw_dma_buffer *new_backup;
b5c3b1a6 75 struct vmw_ctx_binding_state *staged_bindings;
c0951b79 76 unsigned long new_backup_offset;
d80efd5c
TH
77 u32 first_usage : 1;
78 u32 switching_backup : 1;
79 u32 no_buffer_needed : 1;
c0951b79
TH
80};
81
c373d4ea
TH
82/**
83 * struct vmw_cmd_entry - Describe a command for the verifier
84 *
85 * @user_allow: Whether allowed from the execbuf ioctl.
86 * @gb_disable: Whether disabled if guest-backed objects are available.
87 * @gb_enable: Whether enabled iff guest-backed objects are available.
88 */
89struct vmw_cmd_entry {
90 int (*func) (struct vmw_private *, struct vmw_sw_context *,
91 SVGA3dCmdHeader *);
92 bool user_allow;
93 bool gb_disable;
94 bool gb_enable;
95};
96
97#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
98 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
99 (_gb_disable), (_gb_enable)}
100
d80efd5c
TH
101static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102 struct vmw_sw_context *sw_context,
103 struct vmw_resource *ctx);
104
c0951b79
TH
105/**
106 * vmw_resource_unreserve - unreserve resources previously reserved for
107 * command submission.
108 *
109 * @list_head: list of resources to unreserve.
110 * @backoff: Whether command submission failed.
111 */
d80efd5c
TH
112static void vmw_resource_list_unreserve(struct vmw_sw_context *sw_context,
113 struct list_head *list,
c0951b79
TH
114 bool backoff)
115{
116 struct vmw_resource_val_node *val;
117
118 list_for_each_entry(val, list, head) {
119 struct vmw_resource *res = val->res;
d80efd5c
TH
120 bool switch_backup =
121 (backoff) ? false : val->switching_backup;
c0951b79 122
173fb7d4
TH
123 /*
124 * Transfer staged context bindings to the
125 * persistent context binding tracker.
126 */
b5c3b1a6 127 if (unlikely(val->staged_bindings)) {
76c7d18b 128 if (!backoff) {
d80efd5c
TH
129 vmw_binding_state_commit
130 (vmw_context_binding_state(val->res),
131 val->staged_bindings);
76c7d18b 132 }
d80efd5c
TH
133
134 if (val->staged_bindings != sw_context->staged_bindings)
135 vmw_binding_state_free(val->staged_bindings);
136 else
137 sw_context->staged_bindings_inuse = false;
b5c3b1a6
TH
138 val->staged_bindings = NULL;
139 }
d80efd5c
TH
140 vmw_resource_unreserve(res, switch_backup, val->new_backup,
141 val->new_backup_offset);
c0951b79
TH
142 vmw_dmabuf_unreference(&val->new_backup);
143 }
144}
145
d80efd5c
TH
146/**
147 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
148 * added to the validate list.
149 *
150 * @dev_priv: Pointer to the device private:
151 * @sw_context: The validation context:
152 * @node: The validation node holding this context.
153 */
154static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
155 struct vmw_sw_context *sw_context,
156 struct vmw_resource_val_node *node)
157{
158 int ret;
159
160 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
161 if (unlikely(ret != 0))
162 goto out_err;
163
164 if (!sw_context->staged_bindings) {
165 sw_context->staged_bindings =
166 vmw_binding_state_alloc(dev_priv);
167 if (IS_ERR(sw_context->staged_bindings)) {
168 DRM_ERROR("Failed to allocate context binding "
169 "information.\n");
170 ret = PTR_ERR(sw_context->staged_bindings);
171 sw_context->staged_bindings = NULL;
172 goto out_err;
173 }
174 }
175
176 if (sw_context->staged_bindings_inuse) {
177 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
178 if (IS_ERR(node->staged_bindings)) {
179 DRM_ERROR("Failed to allocate context binding "
180 "information.\n");
181 ret = PTR_ERR(node->staged_bindings);
182 node->staged_bindings = NULL;
183 goto out_err;
184 }
185 } else {
186 node->staged_bindings = sw_context->staged_bindings;
187 sw_context->staged_bindings_inuse = true;
188 }
189
190 return 0;
191out_err:
192 return ret;
193}
c0951b79
TH
194
195/**
196 * vmw_resource_val_add - Add a resource to the software context's
197 * resource list if it's not already on it.
198 *
199 * @sw_context: Pointer to the software context.
200 * @res: Pointer to the resource.
201 * @p_node On successful return points to a valid pointer to a
202 * struct vmw_resource_val_node, if non-NULL on entry.
203 */
204static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
205 struct vmw_resource *res,
206 struct vmw_resource_val_node **p_node)
207{
d80efd5c 208 struct vmw_private *dev_priv = res->dev_priv;
c0951b79
TH
209 struct vmw_resource_val_node *node;
210 struct drm_hash_item *hash;
211 int ret;
212
213 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
214 &hash) == 0)) {
215 node = container_of(hash, struct vmw_resource_val_node, hash);
216 node->first_usage = false;
217 if (unlikely(p_node != NULL))
218 *p_node = node;
219 return 0;
220 }
221
222 node = kzalloc(sizeof(*node), GFP_KERNEL);
223 if (unlikely(node == NULL)) {
224 DRM_ERROR("Failed to allocate a resource validation "
225 "entry.\n");
226 return -ENOMEM;
227 }
228
229 node->hash.key = (unsigned long) res;
230 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
231 if (unlikely(ret != 0)) {
232 DRM_ERROR("Failed to initialize a resource validation "
233 "entry.\n");
234 kfree(node);
235 return ret;
236 }
c0951b79
TH
237 node->res = vmw_resource_reference(res);
238 node->first_usage = true;
c0951b79
TH
239 if (unlikely(p_node != NULL))
240 *p_node = node;
241
d80efd5c
TH
242 if (!dev_priv->has_mob) {
243 list_add_tail(&node->head, &sw_context->resource_list);
244 return 0;
245 }
246
247 switch (vmw_res_type(res)) {
248 case vmw_res_context:
249 case vmw_res_dx_context:
250 list_add(&node->head, &sw_context->ctx_resource_list);
251 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
252 break;
253 case vmw_res_cotable:
254 list_add_tail(&node->head, &sw_context->ctx_resource_list);
255 break;
256 default:
257 list_add_tail(&node->head, &sw_context->resource_list);
258 break;
259 }
260
261 return ret;
262}
263
264/**
265 * vmw_view_res_val_add - Add a view and the surface it's pointing to
266 * to the validation list
267 *
268 * @sw_context: The software context holding the validation list.
269 * @view: Pointer to the view resource.
270 *
271 * Returns 0 if success, negative error code otherwise.
272 */
273static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
274 struct vmw_resource *view)
275{
276 int ret;
277
278 /*
279 * First add the resource the view is pointing to, otherwise
280 * it may be swapped out when the view is validated.
281 */
282 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
283 if (ret)
284 return ret;
285
286 return vmw_resource_val_add(sw_context, view, NULL);
287}
288
289/**
290 * vmw_view_id_val_add - Look up a view and add it and the surface it's
291 * pointing to to the validation list.
292 *
293 * @sw_context: The software context holding the validation list.
294 * @view_type: The view type to look up.
295 * @id: view id of the view.
296 *
297 * The view is represented by a view id and the DX context it's created on,
298 * or scheduled for creation on. If there is no DX context set, the function
299 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
300 */
301static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
302 enum vmw_view_type view_type, u32 id)
303{
304 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
305 struct vmw_resource *view;
306 int ret;
307
308 if (!ctx_node) {
309 DRM_ERROR("DX Context not set.\n");
310 return -EINVAL;
311 }
312
313 view = vmw_view_lookup(sw_context->man, view_type, id);
314 if (IS_ERR(view))
315 return PTR_ERR(view);
316
317 ret = vmw_view_res_val_add(sw_context, view);
318 vmw_resource_unreference(&view);
319
320 return ret;
c0951b79
TH
321}
322
30f82d81
TH
323/**
324 * vmw_resource_context_res_add - Put resources previously bound to a context on
325 * the validation list
326 *
327 * @dev_priv: Pointer to a device private structure
328 * @sw_context: Pointer to a software context used for this command submission
329 * @ctx: Pointer to the context resource
330 *
331 * This function puts all resources that were previously bound to @ctx on
332 * the resource validation list. This is part of the context state reemission
333 */
334static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
335 struct vmw_sw_context *sw_context,
336 struct vmw_resource *ctx)
337{
338 struct list_head *binding_list;
d80efd5c 339 struct vmw_ctx_bindinfo *entry;
30f82d81
TH
340 int ret = 0;
341 struct vmw_resource *res;
d80efd5c
TH
342 u32 i;
343
344 /* Add all cotables to the validation list. */
345 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
346 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
347 res = vmw_context_cotable(ctx, i);
348 if (IS_ERR(res))
349 continue;
350
351 ret = vmw_resource_val_add(sw_context, res, NULL);
352 vmw_resource_unreference(&res);
353 if (unlikely(ret != 0))
354 return ret;
355 }
356 }
357
30f82d81 358
d80efd5c 359 /* Add all resources bound to the context to the validation list */
30f82d81
TH
360 mutex_lock(&dev_priv->binding_mutex);
361 binding_list = vmw_context_binding_list(ctx);
362
363 list_for_each_entry(entry, binding_list, ctx_list) {
d80efd5c
TH
364 /* entry->res is not refcounted */
365 res = vmw_resource_reference_unless_doomed(entry->res);
30f82d81
TH
366 if (unlikely(res == NULL))
367 continue;
368
d80efd5c
TH
369 if (vmw_res_type(entry->res) == vmw_res_view)
370 ret = vmw_view_res_val_add(sw_context, entry->res);
371 else
372 ret = vmw_resource_val_add(sw_context, entry->res,
373 NULL);
30f82d81
TH
374 vmw_resource_unreference(&res);
375 if (unlikely(ret != 0))
376 break;
377 }
378
379 mutex_unlock(&dev_priv->binding_mutex);
380 return ret;
381}
382
c0951b79
TH
383/**
384 * vmw_resource_relocation_add - Add a relocation to the relocation list
385 *
386 * @list: Pointer to head of relocation list.
387 * @res: The resource.
388 * @offset: Offset into the command buffer currently being parsed where the
389 * id that needs fixup is located. Granularity is 4 bytes.
390 */
391static int vmw_resource_relocation_add(struct list_head *list,
392 const struct vmw_resource *res,
393 unsigned long offset)
394{
395 struct vmw_resource_relocation *rel;
396
397 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
398 if (unlikely(rel == NULL)) {
399 DRM_ERROR("Failed to allocate a resource relocation.\n");
400 return -ENOMEM;
401 }
402
403 rel->res = res;
404 rel->offset = offset;
405 list_add_tail(&rel->head, list);
406
407 return 0;
408}
409
410/**
411 * vmw_resource_relocations_free - Free all relocations on a list
412 *
413 * @list: Pointer to the head of the relocation list.
414 */
415static void vmw_resource_relocations_free(struct list_head *list)
416{
417 struct vmw_resource_relocation *rel, *n;
418
419 list_for_each_entry_safe(rel, n, list, head) {
420 list_del(&rel->head);
421 kfree(rel);
422 }
423}
424
425/**
426 * vmw_resource_relocations_apply - Apply all relocations on a list
427 *
428 * @cb: Pointer to the start of the command buffer bein patch. This need
429 * not be the same buffer as the one being parsed when the relocation
430 * list was built, but the contents must be the same modulo the
431 * resource ids.
432 * @list: Pointer to the head of the relocation list.
433 */
434static void vmw_resource_relocations_apply(uint32_t *cb,
435 struct list_head *list)
436{
437 struct vmw_resource_relocation *rel;
438
d5bde956
TH
439 list_for_each_entry(rel, list, head) {
440 if (likely(rel->res != NULL))
441 cb[rel->offset] = rel->res->id;
442 else
443 cb[rel->offset] = SVGA_3D_CMD_NOP;
444 }
c0951b79
TH
445}
446
fb1d9738
JB
447static int vmw_cmd_invalid(struct vmw_private *dev_priv,
448 struct vmw_sw_context *sw_context,
449 SVGA3dCmdHeader *header)
450{
451 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
452}
453
454static int vmw_cmd_ok(struct vmw_private *dev_priv,
455 struct vmw_sw_context *sw_context,
456 SVGA3dCmdHeader *header)
457{
458 return 0;
459}
460
e2fa3a76
TH
461/**
462 * vmw_bo_to_validate_list - add a bo to a validate list
463 *
464 * @sw_context: The software context used for this command submission batch.
465 * @bo: The buffer object to add.
96c5f0df 466 * @validate_as_mob: Validate this buffer as a MOB.
e2fa3a76
TH
467 * @p_val_node: If non-NULL Will be updated with the validate node number
468 * on return.
469 *
470 * Returns -EINVAL if the limit of number of buffer objects per command
471 * submission is reached.
472 */
473static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
459d0fa7 474 struct vmw_dma_buffer *vbo,
96c5f0df 475 bool validate_as_mob,
e2fa3a76
TH
476 uint32_t *p_val_node)
477{
478 uint32_t val_node;
c0951b79 479 struct vmw_validate_buffer *vval_buf;
e2fa3a76 480 struct ttm_validate_buffer *val_buf;
c0951b79
TH
481 struct drm_hash_item *hash;
482 int ret;
e2fa3a76 483
459d0fa7 484 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
c0951b79
TH
485 &hash) == 0)) {
486 vval_buf = container_of(hash, struct vmw_validate_buffer,
487 hash);
96c5f0df
TH
488 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
489 DRM_ERROR("Inconsistent buffer usage.\n");
490 return -EINVAL;
491 }
c0951b79
TH
492 val_buf = &vval_buf->base;
493 val_node = vval_buf - sw_context->val_bufs;
494 } else {
495 val_node = sw_context->cur_val_buf;
496 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
497 DRM_ERROR("Max number of DMA buffers per submission "
498 "exceeded.\n");
499 return -EINVAL;
500 }
501 vval_buf = &sw_context->val_bufs[val_node];
459d0fa7 502 vval_buf->hash.key = (unsigned long) vbo;
c0951b79
TH
503 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
504 if (unlikely(ret != 0)) {
505 DRM_ERROR("Failed to initialize a buffer validation "
506 "entry.\n");
507 return ret;
508 }
509 ++sw_context->cur_val_buf;
510 val_buf = &vval_buf->base;
459d0fa7 511 val_buf->bo = ttm_bo_reference(&vbo->base);
ae9c0af2 512 val_buf->shared = false;
e2fa3a76 513 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
96c5f0df 514 vval_buf->validate_as_mob = validate_as_mob;
e2fa3a76
TH
515 }
516
e2fa3a76
TH
517 if (p_val_node)
518 *p_val_node = val_node;
519
520 return 0;
521}
522
c0951b79
TH
523/**
524 * vmw_resources_reserve - Reserve all resources on the sw_context's
525 * resource list.
526 *
527 * @sw_context: Pointer to the software context.
528 *
529 * Note that since vmware's command submission currently is protected by
530 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
531 * since only a single thread at once will attempt this.
532 */
533static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
fb1d9738 534{
c0951b79 535 struct vmw_resource_val_node *val;
fb1d9738
JB
536 int ret;
537
c0951b79
TH
538 list_for_each_entry(val, &sw_context->resource_list, head) {
539 struct vmw_resource *res = val->res;
fb1d9738 540
1a4b172a 541 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
c0951b79
TH
542 if (unlikely(ret != 0))
543 return ret;
544
545 if (res->backup) {
459d0fa7 546 struct vmw_dma_buffer *vbo = res->backup;
c0951b79
TH
547
548 ret = vmw_bo_to_validate_list
459d0fa7 549 (sw_context, vbo,
96c5f0df 550 vmw_resource_needs_backup(res), NULL);
c0951b79
TH
551
552 if (unlikely(ret != 0))
553 return ret;
554 }
fb1d9738 555 }
2f633e5e 556
c0951b79
TH
557 return 0;
558}
fb1d9738 559
c0951b79
TH
560/**
561 * vmw_resources_validate - Validate all resources on the sw_context's
562 * resource list.
563 *
564 * @sw_context: Pointer to the software context.
565 *
566 * Before this function is called, all resource backup buffers must have
567 * been validated.
568 */
569static int vmw_resources_validate(struct vmw_sw_context *sw_context)
570{
571 struct vmw_resource_val_node *val;
572 int ret;
573
574 list_for_each_entry(val, &sw_context->resource_list, head) {
575 struct vmw_resource *res = val->res;
d80efd5c 576 struct vmw_dma_buffer *backup = res->backup;
f18c8840 577
c0951b79
TH
578 ret = vmw_resource_validate(res);
579 if (unlikely(ret != 0)) {
580 if (ret != -ERESTARTSYS)
581 DRM_ERROR("Failed to validate resource.\n");
582 return ret;
583 }
d80efd5c
TH
584
585 /* Check if the resource switched backup buffer */
586 if (backup && res->backup && (backup != res->backup)) {
587 struct vmw_dma_buffer *vbo = res->backup;
588
589 ret = vmw_bo_to_validate_list
590 (sw_context, vbo,
591 vmw_resource_needs_backup(res), NULL);
592 if (ret) {
593 ttm_bo_unreserve(&vbo->base);
594 return ret;
595 }
596 }
c0951b79 597 }
f18c8840 598 return 0;
fb1d9738
JB
599}
600
18e4a466
TH
601/**
602 * vmw_cmd_res_reloc_add - Add a resource to a software context's
603 * relocation- and validation lists.
604 *
605 * @dev_priv: Pointer to a struct vmw_private identifying the device.
606 * @sw_context: Pointer to the software context.
18e4a466
TH
607 * @id_loc: Pointer to where the id that needs translation is located.
608 * @res: Valid pointer to a struct vmw_resource.
609 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
610 * used for this resource is returned here.
611 */
612static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
613 struct vmw_sw_context *sw_context,
18e4a466
TH
614 uint32_t *id_loc,
615 struct vmw_resource *res,
616 struct vmw_resource_val_node **p_val)
617{
618 int ret;
619 struct vmw_resource_val_node *node;
620
621 *p_val = NULL;
622 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
623 res,
624 id_loc - sw_context->buf_start);
625 if (unlikely(ret != 0))
9f9cb84f 626 return ret;
18e4a466
TH
627
628 ret = vmw_resource_val_add(sw_context, res, &node);
629 if (unlikely(ret != 0))
9f9cb84f 630 return ret;
18e4a466 631
18e4a466
TH
632 if (p_val)
633 *p_val = node;
634
9f9cb84f 635 return 0;
18e4a466
TH
636}
637
638
c0951b79 639/**
18e4a466 640 * vmw_cmd_res_check - Check that a resource is present and if so, put it
c0951b79
TH
641 * on the resource validate list unless it's already there.
642 *
643 * @dev_priv: Pointer to a device private structure.
644 * @sw_context: Pointer to the software context.
645 * @res_type: Resource type.
646 * @converter: User-space visisble type specific information.
d5bde956 647 * @id_loc: Pointer to the location in the command buffer currently being
c0951b79 648 * parsed from where the user-space resource id handle is located.
d5bde956
TH
649 * @p_val: Pointer to pointer to resource validalidation node. Populated
650 * on exit.
c0951b79 651 */
d5bde956 652static int
18e4a466
TH
653vmw_cmd_res_check(struct vmw_private *dev_priv,
654 struct vmw_sw_context *sw_context,
655 enum vmw_res_type res_type,
656 const struct vmw_user_resource_conv *converter,
657 uint32_t *id_loc,
658 struct vmw_resource_val_node **p_val)
fb1d9738 659{
c0951b79
TH
660 struct vmw_res_cache_entry *rcache =
661 &sw_context->res_cache[res_type];
be38ab6e 662 struct vmw_resource *res;
c0951b79
TH
663 struct vmw_resource_val_node *node;
664 int ret;
be38ab6e 665
18e4a466 666 if (*id_loc == SVGA3D_INVALID_ID) {
b5c3b1a6
TH
667 if (p_val)
668 *p_val = NULL;
669 if (res_type == vmw_res_context) {
670 DRM_ERROR("Illegal context invalid id.\n");
671 return -EINVAL;
672 }
7a73ba74 673 return 0;
b5c3b1a6 674 }
7a73ba74 675
c0951b79
TH
676 /*
677 * Fastpath in case of repeated commands referencing the same
678 * resource
679 */
7a73ba74 680
18e4a466 681 if (likely(rcache->valid && *id_loc == rcache->handle)) {
c0951b79
TH
682 const struct vmw_resource *res = rcache->res;
683
684 rcache->node->first_usage = false;
685 if (p_val)
686 *p_val = rcache->node;
687
688 return vmw_resource_relocation_add
689 (&sw_context->res_relocations, res,
d5bde956 690 id_loc - sw_context->buf_start);
be38ab6e
TH
691 }
692
c0951b79 693 ret = vmw_user_resource_lookup_handle(dev_priv,
d5bde956 694 sw_context->fp->tfile,
18e4a466 695 *id_loc,
c0951b79
TH
696 converter,
697 &res);
5bb39e81 698 if (unlikely(ret != 0)) {
c0951b79 699 DRM_ERROR("Could not find or use resource 0x%08x.\n",
18e4a466 700 (unsigned) *id_loc);
c0951b79 701 dump_stack();
5bb39e81
TH
702 return ret;
703 }
704
c0951b79
TH
705 rcache->valid = true;
706 rcache->res = res;
18e4a466 707 rcache->handle = *id_loc;
c0951b79 708
d80efd5c 709 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
18e4a466 710 res, &node);
c0951b79
TH
711 if (unlikely(ret != 0))
712 goto out_no_reloc;
f18c8840 713
c0951b79
TH
714 rcache->node = node;
715 if (p_val)
716 *p_val = node;
717 vmw_resource_unreference(&res);
f18c8840 718 return 0;
c0951b79
TH
719
720out_no_reloc:
721 BUG_ON(sw_context->error_resource != NULL);
722 sw_context->error_resource = res;
723
724 return ret;
fb1d9738
JB
725}
726
30f82d81
TH
727/**
728 * vmw_rebind_contexts - Rebind all resources previously bound to
729 * referenced contexts.
730 *
731 * @sw_context: Pointer to the software context.
732 *
733 * Rebind context binding points that have been scrubbed because of eviction.
734 */
735static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
736{
737 struct vmw_resource_val_node *val;
738 int ret;
739
740 list_for_each_entry(val, &sw_context->resource_list, head) {
18e4a466
TH
741 if (unlikely(!val->staged_bindings))
742 break;
30f82d81 743
d80efd5c
TH
744 ret = vmw_binding_rebind_all
745 (vmw_context_binding_state(val->res));
30f82d81
TH
746 if (unlikely(ret != 0)) {
747 if (ret != -ERESTARTSYS)
748 DRM_ERROR("Failed to rebind context.\n");
749 return ret;
750 }
751 }
752
753 return 0;
754}
755
d80efd5c
TH
756/**
757 * vmw_view_bindings_add - Add an array of view bindings to a context
758 * binding state tracker.
759 *
760 * @sw_context: The execbuf state used for this command.
761 * @view_type: View type for the bindings.
762 * @binding_type: Binding type for the bindings.
763 * @shader_slot: The shader slot to user for the bindings.
764 * @view_ids: Array of view ids to be bound.
765 * @num_views: Number of view ids in @view_ids.
766 * @first_slot: The binding slot to be used for the first view id in @view_ids.
767 */
768static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
769 enum vmw_view_type view_type,
770 enum vmw_ctx_binding_type binding_type,
771 uint32 shader_slot,
772 uint32 view_ids[], u32 num_views,
773 u32 first_slot)
774{
775 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
776 struct vmw_cmdbuf_res_manager *man;
777 u32 i;
778 int ret;
779
780 if (!ctx_node) {
781 DRM_ERROR("DX Context not set.\n");
782 return -EINVAL;
783 }
784
785 man = sw_context->man;
786 for (i = 0; i < num_views; ++i) {
787 struct vmw_ctx_bindinfo_view binding;
788 struct vmw_resource *view = NULL;
789
790 if (view_ids[i] != SVGA3D_INVALID_ID) {
791 view = vmw_view_lookup(man, view_type, view_ids[i]);
792 if (IS_ERR(view)) {
793 DRM_ERROR("View not found.\n");
794 return PTR_ERR(view);
795 }
796
797 ret = vmw_view_res_val_add(sw_context, view);
798 if (ret) {
799 DRM_ERROR("Could not add view to "
800 "validation list.\n");
801 vmw_resource_unreference(&view);
802 return ret;
803 }
804 }
805 binding.bi.ctx = ctx_node->res;
806 binding.bi.res = view;
807 binding.bi.bt = binding_type;
808 binding.shader_slot = shader_slot;
809 binding.slot = first_slot + i;
810 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
811 shader_slot, binding.slot);
812 if (view)
813 vmw_resource_unreference(&view);
814 }
815
816 return 0;
817}
818
c0951b79
TH
819/**
820 * vmw_cmd_cid_check - Check a command header for valid context information.
821 *
822 * @dev_priv: Pointer to a device private structure.
823 * @sw_context: Pointer to the software context.
824 * @header: A command header with an embedded user-space context handle.
825 *
826 * Convenience function: Call vmw_cmd_res_check with the user-space context
827 * handle embedded in @header.
828 */
829static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
830 struct vmw_sw_context *sw_context,
831 SVGA3dCmdHeader *header)
832{
833 struct vmw_cid_cmd {
834 SVGA3dCmdHeader header;
8e67bbbc 835 uint32_t cid;
c0951b79
TH
836 } *cmd;
837
838 cmd = container_of(header, struct vmw_cid_cmd, header);
839 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
840 user_context_converter, &cmd->cid, NULL);
841}
fb1d9738
JB
842
843static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
844 struct vmw_sw_context *sw_context,
845 SVGA3dCmdHeader *header)
846{
847 struct vmw_sid_cmd {
848 SVGA3dCmdHeader header;
849 SVGA3dCmdSetRenderTarget body;
850 } *cmd;
b5c3b1a6 851 struct vmw_resource_val_node *ctx_node;
173fb7d4 852 struct vmw_resource_val_node *res_node;
fb1d9738
JB
853 int ret;
854
b5c3b1a6
TH
855 cmd = container_of(header, struct vmw_sid_cmd, header);
856
d80efd5c
TH
857 if (cmd->body.type >= SVGA3D_RT_MAX) {
858 DRM_ERROR("Illegal render target type %u.\n",
859 (unsigned) cmd->body.type);
860 return -EINVAL;
861 }
862
b5c3b1a6
TH
863 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
864 user_context_converter, &cmd->body.cid,
865 &ctx_node);
fb1d9738
JB
866 if (unlikely(ret != 0))
867 return ret;
868
c0951b79
TH
869 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
870 user_surface_converter,
173fb7d4 871 &cmd->body.target.sid, &res_node);
b5c3b1a6
TH
872 if (unlikely(ret != 0))
873 return ret;
874
875 if (dev_priv->has_mob) {
d80efd5c 876 struct vmw_ctx_bindinfo_view binding;
b5c3b1a6 877
d80efd5c
TH
878 binding.bi.ctx = ctx_node->res;
879 binding.bi.res = res_node ? res_node->res : NULL;
880 binding.bi.bt = vmw_ctx_binding_rt;
881 binding.slot = cmd->body.type;
882 vmw_binding_add(ctx_node->staged_bindings,
883 &binding.bi, 0, binding.slot);
b5c3b1a6
TH
884 }
885
886 return 0;
fb1d9738
JB
887}
888
889static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
890 struct vmw_sw_context *sw_context,
891 SVGA3dCmdHeader *header)
892{
893 struct vmw_sid_cmd {
894 SVGA3dCmdHeader header;
895 SVGA3dCmdSurfaceCopy body;
896 } *cmd;
897 int ret;
898
899 cmd = container_of(header, struct vmw_sid_cmd, header);
c9146cd9 900
6bf6bf03
TH
901 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
902 user_surface_converter,
903 &cmd->body.src.sid, NULL);
904 if (ret)
905 return ret;
c9146cd9 906
c0951b79
TH
907 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
908 user_surface_converter,
909 &cmd->body.dest.sid, NULL);
fb1d9738
JB
910}
911
0fca749e
NB
912static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
913 struct vmw_sw_context *sw_context,
914 SVGA3dCmdHeader *header)
915{
916 struct {
917 SVGA3dCmdHeader header;
918 SVGA3dCmdDXBufferCopy body;
919 } *cmd;
920 int ret;
921
922 cmd = container_of(header, typeof(*cmd), header);
923 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
924 user_surface_converter,
925 &cmd->body.src, NULL);
926 if (ret != 0)
927 return ret;
928
929 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
930 user_surface_converter,
931 &cmd->body.dest, NULL);
932}
933
934static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
935 struct vmw_sw_context *sw_context,
936 SVGA3dCmdHeader *header)
937{
938 struct {
939 SVGA3dCmdHeader header;
940 SVGA3dCmdDXPredCopyRegion body;
941 } *cmd;
942 int ret;
943
944 cmd = container_of(header, typeof(*cmd), header);
945 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
946 user_surface_converter,
947 &cmd->body.srcSid, NULL);
948 if (ret != 0)
949 return ret;
950
951 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
952 user_surface_converter,
953 &cmd->body.dstSid, NULL);
954}
955
fb1d9738
JB
956static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
957 struct vmw_sw_context *sw_context,
958 SVGA3dCmdHeader *header)
959{
960 struct vmw_sid_cmd {
961 SVGA3dCmdHeader header;
962 SVGA3dCmdSurfaceStretchBlt body;
963 } *cmd;
964 int ret;
965
966 cmd = container_of(header, struct vmw_sid_cmd, header);
c0951b79
TH
967 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
968 user_surface_converter,
969 &cmd->body.src.sid, NULL);
fb1d9738
JB
970 if (unlikely(ret != 0))
971 return ret;
c0951b79
TH
972 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
973 user_surface_converter,
974 &cmd->body.dest.sid, NULL);
fb1d9738
JB
975}
976
977static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
978 struct vmw_sw_context *sw_context,
979 SVGA3dCmdHeader *header)
980{
981 struct vmw_sid_cmd {
982 SVGA3dCmdHeader header;
983 SVGA3dCmdBlitSurfaceToScreen body;
984 } *cmd;
985
986 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6 987
c0951b79
TH
988 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
989 user_surface_converter,
990 &cmd->body.srcImage.sid, NULL);
fb1d9738
JB
991}
992
993static int vmw_cmd_present_check(struct vmw_private *dev_priv,
994 struct vmw_sw_context *sw_context,
995 SVGA3dCmdHeader *header)
996{
997 struct vmw_sid_cmd {
998 SVGA3dCmdHeader header;
999 SVGA3dCmdPresent body;
1000 } *cmd;
1001
5bb39e81 1002
fb1d9738 1003 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6 1004
c0951b79
TH
1005 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1006 user_surface_converter, &cmd->body.sid,
1007 NULL);
fb1d9738
JB
1008}
1009
e2fa3a76
TH
1010/**
1011 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1012 *
1013 * @dev_priv: The device private structure.
e2fa3a76
TH
1014 * @new_query_bo: The new buffer holding query results.
1015 * @sw_context: The software context used for this command submission.
1016 *
1017 * This function checks whether @new_query_bo is suitable for holding
1018 * query results, and if another buffer currently is pinned for query
1019 * results. If so, the function prepares the state of @sw_context for
1020 * switching pinned buffers after successful submission of the current
c0951b79 1021 * command batch.
e2fa3a76
TH
1022 */
1023static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
459d0fa7 1024 struct vmw_dma_buffer *new_query_bo,
e2fa3a76
TH
1025 struct vmw_sw_context *sw_context)
1026{
c0951b79
TH
1027 struct vmw_res_cache_entry *ctx_entry =
1028 &sw_context->res_cache[vmw_res_context];
e2fa3a76 1029 int ret;
c0951b79
TH
1030
1031 BUG_ON(!ctx_entry->valid);
1032 sw_context->last_query_ctx = ctx_entry->res;
e2fa3a76
TH
1033
1034 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1035
459d0fa7 1036 if (unlikely(new_query_bo->base.num_pages > 4)) {
e2fa3a76
TH
1037 DRM_ERROR("Query buffer too large.\n");
1038 return -EINVAL;
1039 }
1040
1041 if (unlikely(sw_context->cur_query_bo != NULL)) {
c0951b79 1042 sw_context->needs_post_query_barrier = true;
e2fa3a76
TH
1043 ret = vmw_bo_to_validate_list(sw_context,
1044 sw_context->cur_query_bo,
96c5f0df 1045 dev_priv->has_mob, NULL);
e2fa3a76
TH
1046 if (unlikely(ret != 0))
1047 return ret;
1048 }
1049 sw_context->cur_query_bo = new_query_bo;
1050
1051 ret = vmw_bo_to_validate_list(sw_context,
1052 dev_priv->dummy_query_bo,
96c5f0df 1053 dev_priv->has_mob, NULL);
e2fa3a76
TH
1054 if (unlikely(ret != 0))
1055 return ret;
1056
1057 }
1058
e2fa3a76
TH
1059 return 0;
1060}
1061
1062
1063/**
1064 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1065 *
1066 * @dev_priv: The device private structure.
1067 * @sw_context: The software context used for this command submission batch.
1068 *
1069 * This function will check if we're switching query buffers, and will then,
e2fa3a76
TH
1070 * issue a dummy occlusion query wait used as a query barrier. When the fence
1071 * object following that query wait has signaled, we are sure that all
c0951b79 1072 * preceding queries have finished, and the old query buffer can be unpinned.
e2fa3a76
TH
1073 * However, since both the new query buffer and the old one are fenced with
1074 * that fence, we can do an asynchronus unpin now, and be sure that the
1075 * old query buffer won't be moved until the fence has signaled.
1076 *
1077 * As mentioned above, both the new - and old query buffers need to be fenced
1078 * using a sequence emitted *after* calling this function.
1079 */
1080static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1081 struct vmw_sw_context *sw_context)
1082{
e2fa3a76
TH
1083 /*
1084 * The validate list should still hold references to all
1085 * contexts here.
1086 */
1087
c0951b79
TH
1088 if (sw_context->needs_post_query_barrier) {
1089 struct vmw_res_cache_entry *ctx_entry =
1090 &sw_context->res_cache[vmw_res_context];
1091 struct vmw_resource *ctx;
1092 int ret;
e2fa3a76 1093
c0951b79
TH
1094 BUG_ON(!ctx_entry->valid);
1095 ctx = ctx_entry->res;
e2fa3a76
TH
1096
1097 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1098
1099 if (unlikely(ret != 0))
1100 DRM_ERROR("Out of fifo space for dummy query.\n");
1101 }
1102
1103 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1104 if (dev_priv->pinned_bo) {
459d0fa7
TH
1105 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1106 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
e2fa3a76
TH
1107 }
1108
c0951b79 1109 if (!sw_context->needs_post_query_barrier) {
459d0fa7 1110 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
e2fa3a76 1111
c0951b79
TH
1112 /*
1113 * We pin also the dummy_query_bo buffer so that we
1114 * don't need to validate it when emitting
1115 * dummy queries in context destroy paths.
1116 */
e2fa3a76 1117
459d0fa7
TH
1118 if (!dev_priv->dummy_query_bo_pinned) {
1119 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1120 true);
1121 dev_priv->dummy_query_bo_pinned = true;
1122 }
e2fa3a76 1123
c0951b79
TH
1124 BUG_ON(sw_context->last_query_ctx == NULL);
1125 dev_priv->query_cid = sw_context->last_query_ctx->id;
1126 dev_priv->query_cid_valid = true;
1127 dev_priv->pinned_bo =
459d0fa7 1128 vmw_dmabuf_reference(sw_context->cur_query_bo);
c0951b79 1129 }
e2fa3a76
TH
1130 }
1131}
1132
ddcda24e
TH
1133/**
1134 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1135 * handle to a MOB id.
1136 *
1137 * @dev_priv: Pointer to a device private structure.
1138 * @sw_context: The software context used for this command batch validation.
1139 * @id: Pointer to the user-space handle to be translated.
1140 * @vmw_bo_p: Points to a location that, on successful return will carry
1141 * a reference-counted pointer to the DMA buffer identified by the
1142 * user-space handle in @id.
1143 *
1144 * This function saves information needed to translate a user-space buffer
1145 * handle to a MOB id. The translation does not take place immediately, but
1146 * during a call to vmw_apply_relocations(). This function builds a relocation
1147 * list and a list of buffers to validate. The former needs to be freed using
1148 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1149 * needs to be freed using vmw_clear_validations.
1150 */
1151static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1152 struct vmw_sw_context *sw_context,
1153 SVGAMobId *id,
1154 struct vmw_dma_buffer **vmw_bo_p)
1155{
1156 struct vmw_dma_buffer *vmw_bo = NULL;
ddcda24e
TH
1157 uint32_t handle = *id;
1158 struct vmw_relocation *reloc;
1159 int ret;
1160
d5bde956 1161 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
ddcda24e
TH
1162 if (unlikely(ret != 0)) {
1163 DRM_ERROR("Could not find or use MOB buffer.\n");
da5efffc
CIK
1164 ret = -EINVAL;
1165 goto out_no_reloc;
ddcda24e 1166 }
ddcda24e
TH
1167
1168 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1169 DRM_ERROR("Max number relocations per submission"
1170 " exceeded\n");
1171 ret = -EINVAL;
1172 goto out_no_reloc;
1173 }
1174
1175 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1176 reloc->mob_loc = id;
1177 reloc->location = NULL;
1178
459d0fa7 1179 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
ddcda24e
TH
1180 if (unlikely(ret != 0))
1181 goto out_no_reloc;
1182
1183 *vmw_bo_p = vmw_bo;
1184 return 0;
1185
1186out_no_reloc:
1187 vmw_dmabuf_unreference(&vmw_bo);
da5efffc 1188 *vmw_bo_p = NULL;
ddcda24e
TH
1189 return ret;
1190}
1191
e2fa3a76 1192/**
c0951b79
TH
1193 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1194 * handle to a valid SVGAGuestPtr
e2fa3a76 1195 *
c0951b79
TH
1196 * @dev_priv: Pointer to a device private structure.
1197 * @sw_context: The software context used for this command batch validation.
1198 * @ptr: Pointer to the user-space handle to be translated.
1199 * @vmw_bo_p: Points to a location that, on successful return will carry
1200 * a reference-counted pointer to the DMA buffer identified by the
1201 * user-space handle in @id.
e2fa3a76 1202 *
c0951b79
TH
1203 * This function saves information needed to translate a user-space buffer
1204 * handle to a valid SVGAGuestPtr. The translation does not take place
1205 * immediately, but during a call to vmw_apply_relocations().
1206 * This function builds a relocation list and a list of buffers to validate.
1207 * The former needs to be freed using either vmw_apply_relocations() or
1208 * vmw_free_relocations(). The latter needs to be freed using
1209 * vmw_clear_validations.
e2fa3a76 1210 */
4e4ddd47
TH
1211static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1212 struct vmw_sw_context *sw_context,
1213 SVGAGuestPtr *ptr,
1214 struct vmw_dma_buffer **vmw_bo_p)
fb1d9738 1215{
fb1d9738 1216 struct vmw_dma_buffer *vmw_bo = NULL;
4e4ddd47 1217 uint32_t handle = ptr->gmrId;
fb1d9738 1218 struct vmw_relocation *reloc;
4e4ddd47 1219 int ret;
fb1d9738 1220
d5bde956 1221 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
fb1d9738
JB
1222 if (unlikely(ret != 0)) {
1223 DRM_ERROR("Could not find or use GMR region.\n");
da5efffc
CIK
1224 ret = -EINVAL;
1225 goto out_no_reloc;
fb1d9738 1226 }
fb1d9738
JB
1227
1228 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
4e4ddd47 1229 DRM_ERROR("Max number relocations per submission"
fb1d9738
JB
1230 " exceeded\n");
1231 ret = -EINVAL;
1232 goto out_no_reloc;
1233 }
1234
1235 reloc = &sw_context->relocs[sw_context->cur_reloc++];
4e4ddd47 1236 reloc->location = ptr;
fb1d9738 1237
459d0fa7 1238 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
e2fa3a76 1239 if (unlikely(ret != 0))
fb1d9738 1240 goto out_no_reloc;
fb1d9738 1241
4e4ddd47
TH
1242 *vmw_bo_p = vmw_bo;
1243 return 0;
1244
1245out_no_reloc:
1246 vmw_dmabuf_unreference(&vmw_bo);
da5efffc 1247 *vmw_bo_p = NULL;
4e4ddd47
TH
1248 return ret;
1249}
1250
ddcda24e
TH
1251/**
1252 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1253 *
1254 * @dev_priv: Pointer to a device private struct.
1255 * @sw_context: The software context used for this command submission.
1256 * @header: Pointer to the command header in the command stream.
1257 */
1258static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1259 struct vmw_sw_context *sw_context,
1260 SVGA3dCmdHeader *header)
1261{
1262 struct vmw_begin_gb_query_cmd {
1263 SVGA3dCmdHeader header;
1264 SVGA3dCmdBeginGBQuery q;
1265 } *cmd;
1266
1267 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1268 header);
1269
1270 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1271 user_context_converter, &cmd->q.cid,
1272 NULL);
1273}
1274
c0951b79
TH
1275/**
1276 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1277 *
1278 * @dev_priv: Pointer to a device private struct.
1279 * @sw_context: The software context used for this command submission.
1280 * @header: Pointer to the command header in the command stream.
1281 */
1282static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1283 struct vmw_sw_context *sw_context,
1284 SVGA3dCmdHeader *header)
1285{
1286 struct vmw_begin_query_cmd {
1287 SVGA3dCmdHeader header;
1288 SVGA3dCmdBeginQuery q;
1289 } *cmd;
1290
1291 cmd = container_of(header, struct vmw_begin_query_cmd,
1292 header);
1293
ddcda24e
TH
1294 if (unlikely(dev_priv->has_mob)) {
1295 struct {
1296 SVGA3dCmdHeader header;
1297 SVGA3dCmdBeginGBQuery q;
1298 } gb_cmd;
1299
1300 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1301
1302 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1303 gb_cmd.header.size = cmd->header.size;
1304 gb_cmd.q.cid = cmd->q.cid;
1305 gb_cmd.q.type = cmd->q.type;
1306
1307 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1308 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1309 }
1310
c0951b79
TH
1311 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1312 user_context_converter, &cmd->q.cid,
1313 NULL);
1314}
1315
ddcda24e
TH
1316/**
1317 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1318 *
1319 * @dev_priv: Pointer to a device private struct.
1320 * @sw_context: The software context used for this command submission.
1321 * @header: Pointer to the command header in the command stream.
1322 */
1323static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1324 struct vmw_sw_context *sw_context,
1325 SVGA3dCmdHeader *header)
1326{
1327 struct vmw_dma_buffer *vmw_bo;
1328 struct vmw_query_cmd {
1329 SVGA3dCmdHeader header;
1330 SVGA3dCmdEndGBQuery q;
1331 } *cmd;
1332 int ret;
1333
1334 cmd = container_of(header, struct vmw_query_cmd, header);
1335 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1336 if (unlikely(ret != 0))
1337 return ret;
1338
1339 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1340 &cmd->q.mobid,
1341 &vmw_bo);
1342 if (unlikely(ret != 0))
1343 return ret;
1344
459d0fa7 1345 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
ddcda24e
TH
1346
1347 vmw_dmabuf_unreference(&vmw_bo);
1348 return ret;
1349}
1350
c0951b79
TH
1351/**
1352 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1353 *
1354 * @dev_priv: Pointer to a device private struct.
1355 * @sw_context: The software context used for this command submission.
1356 * @header: Pointer to the command header in the command stream.
1357 */
4e4ddd47
TH
1358static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1359 struct vmw_sw_context *sw_context,
1360 SVGA3dCmdHeader *header)
1361{
1362 struct vmw_dma_buffer *vmw_bo;
1363 struct vmw_query_cmd {
1364 SVGA3dCmdHeader header;
1365 SVGA3dCmdEndQuery q;
1366 } *cmd;
1367 int ret;
1368
1369 cmd = container_of(header, struct vmw_query_cmd, header);
ddcda24e
TH
1370 if (dev_priv->has_mob) {
1371 struct {
1372 SVGA3dCmdHeader header;
1373 SVGA3dCmdEndGBQuery q;
1374 } gb_cmd;
1375
1376 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1377
1378 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1379 gb_cmd.header.size = cmd->header.size;
1380 gb_cmd.q.cid = cmd->q.cid;
1381 gb_cmd.q.type = cmd->q.type;
1382 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1383 gb_cmd.q.offset = cmd->q.guestResult.offset;
1384
1385 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1386 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1387 }
1388
4e4ddd47
TH
1389 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1390 if (unlikely(ret != 0))
1391 return ret;
1392
1393 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1394 &cmd->q.guestResult,
1395 &vmw_bo);
1396 if (unlikely(ret != 0))
1397 return ret;
1398
459d0fa7 1399 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
e2fa3a76 1400
4e4ddd47 1401 vmw_dmabuf_unreference(&vmw_bo);
e2fa3a76 1402 return ret;
4e4ddd47 1403}
fb1d9738 1404
ddcda24e
TH
1405/**
1406 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1407 *
1408 * @dev_priv: Pointer to a device private struct.
1409 * @sw_context: The software context used for this command submission.
1410 * @header: Pointer to the command header in the command stream.
1411 */
1412static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1413 struct vmw_sw_context *sw_context,
1414 SVGA3dCmdHeader *header)
1415{
1416 struct vmw_dma_buffer *vmw_bo;
1417 struct vmw_query_cmd {
1418 SVGA3dCmdHeader header;
1419 SVGA3dCmdWaitForGBQuery q;
1420 } *cmd;
1421 int ret;
1422
1423 cmd = container_of(header, struct vmw_query_cmd, header);
1424 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1425 if (unlikely(ret != 0))
1426 return ret;
1427
1428 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1429 &cmd->q.mobid,
1430 &vmw_bo);
1431 if (unlikely(ret != 0))
1432 return ret;
1433
1434 vmw_dmabuf_unreference(&vmw_bo);
1435 return 0;
1436}
1437
1438/**
c0951b79
TH
1439 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1440 *
1441 * @dev_priv: Pointer to a device private struct.
1442 * @sw_context: The software context used for this command submission.
1443 * @header: Pointer to the command header in the command stream.
1444 */
4e4ddd47
TH
1445static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1446 struct vmw_sw_context *sw_context,
1447 SVGA3dCmdHeader *header)
1448{
1449 struct vmw_dma_buffer *vmw_bo;
1450 struct vmw_query_cmd {
1451 SVGA3dCmdHeader header;
1452 SVGA3dCmdWaitForQuery q;
1453 } *cmd;
1454 int ret;
1455
1456 cmd = container_of(header, struct vmw_query_cmd, header);
ddcda24e
TH
1457 if (dev_priv->has_mob) {
1458 struct {
1459 SVGA3dCmdHeader header;
1460 SVGA3dCmdWaitForGBQuery q;
1461 } gb_cmd;
1462
1463 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1464
1465 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1466 gb_cmd.header.size = cmd->header.size;
1467 gb_cmd.q.cid = cmd->q.cid;
1468 gb_cmd.q.type = cmd->q.type;
1469 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1470 gb_cmd.q.offset = cmd->q.guestResult.offset;
1471
1472 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1473 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1474 }
1475
4e4ddd47
TH
1476 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1477 if (unlikely(ret != 0))
1478 return ret;
1479
1480 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1481 &cmd->q.guestResult,
1482 &vmw_bo);
1483 if (unlikely(ret != 0))
1484 return ret;
1485
1486 vmw_dmabuf_unreference(&vmw_bo);
1487 return 0;
1488}
1489
4e4ddd47
TH
1490static int vmw_cmd_dma(struct vmw_private *dev_priv,
1491 struct vmw_sw_context *sw_context,
1492 SVGA3dCmdHeader *header)
1493{
1494 struct vmw_dma_buffer *vmw_bo = NULL;
4e4ddd47
TH
1495 struct vmw_surface *srf = NULL;
1496 struct vmw_dma_cmd {
1497 SVGA3dCmdHeader header;
1498 SVGA3dCmdSurfaceDMA dma;
1499 } *cmd;
1500 int ret;
cbd75e97
TH
1501 SVGA3dCmdSurfaceDMASuffix *suffix;
1502 uint32_t bo_size;
4e4ddd47
TH
1503
1504 cmd = container_of(header, struct vmw_dma_cmd, header);
cbd75e97
TH
1505 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1506 header->size - sizeof(*suffix));
1507
1508 /* Make sure device and verifier stays in sync. */
1509 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1510 DRM_ERROR("Invalid DMA suffix size.\n");
1511 return -EINVAL;
1512 }
1513
4e4ddd47
TH
1514 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1515 &cmd->dma.guest.ptr,
1516 &vmw_bo);
1517 if (unlikely(ret != 0))
1518 return ret;
1519
cbd75e97
TH
1520 /* Make sure DMA doesn't cross BO boundaries. */
1521 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1522 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1523 DRM_ERROR("Invalid DMA offset.\n");
1524 return -EINVAL;
1525 }
1526
1527 bo_size -= cmd->dma.guest.ptr.offset;
1528 if (unlikely(suffix->maximumOffset > bo_size))
1529 suffix->maximumOffset = bo_size;
1530
c0951b79
TH
1531 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1532 user_surface_converter, &cmd->dma.host.sid,
1533 NULL);
5bb39e81 1534 if (unlikely(ret != 0)) {
c0951b79
TH
1535 if (unlikely(ret != -ERESTARTSYS))
1536 DRM_ERROR("could not find surface for DMA.\n");
1537 goto out_no_surface;
5bb39e81
TH
1538 }
1539
c0951b79 1540 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
f18c8840 1541
d5bde956
TH
1542 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1543 header);
fb1d9738 1544
c0951b79 1545out_no_surface:
fb1d9738
JB
1546 vmw_dmabuf_unreference(&vmw_bo);
1547 return ret;
1548}
1549
7a73ba74
TH
1550static int vmw_cmd_draw(struct vmw_private *dev_priv,
1551 struct vmw_sw_context *sw_context,
1552 SVGA3dCmdHeader *header)
1553{
1554 struct vmw_draw_cmd {
1555 SVGA3dCmdHeader header;
1556 SVGA3dCmdDrawPrimitives body;
1557 } *cmd;
1558 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1559 (unsigned long)header + sizeof(*cmd));
1560 SVGA3dPrimitiveRange *range;
1561 uint32_t i;
1562 uint32_t maxnum;
1563 int ret;
1564
1565 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1566 if (unlikely(ret != 0))
1567 return ret;
1568
1569 cmd = container_of(header, struct vmw_draw_cmd, header);
1570 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1571
1572 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1573 DRM_ERROR("Illegal number of vertex declarations.\n");
1574 return -EINVAL;
1575 }
1576
1577 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
c0951b79
TH
1578 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1579 user_surface_converter,
1580 &decl->array.surfaceId, NULL);
7a73ba74
TH
1581 if (unlikely(ret != 0))
1582 return ret;
1583 }
1584
1585 maxnum = (header->size - sizeof(cmd->body) -
1586 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1587 if (unlikely(cmd->body.numRanges > maxnum)) {
1588 DRM_ERROR("Illegal number of index ranges.\n");
1589 return -EINVAL;
1590 }
1591
1592 range = (SVGA3dPrimitiveRange *) decl;
1593 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
c0951b79
TH
1594 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1595 user_surface_converter,
1596 &range->indexArray.surfaceId, NULL);
7a73ba74
TH
1597 if (unlikely(ret != 0))
1598 return ret;
1599 }
1600 return 0;
1601}
1602
1603
1604static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1605 struct vmw_sw_context *sw_context,
1606 SVGA3dCmdHeader *header)
1607{
1608 struct vmw_tex_state_cmd {
1609 SVGA3dCmdHeader header;
1610 SVGA3dCmdSetTextureState state;
b5c3b1a6 1611 } *cmd;
7a73ba74
TH
1612
1613 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1614 ((unsigned long) header + header->size + sizeof(header));
1615 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1616 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
b5c3b1a6 1617 struct vmw_resource_val_node *ctx_node;
173fb7d4 1618 struct vmw_resource_val_node *res_node;
7a73ba74
TH
1619 int ret;
1620
b5c3b1a6
TH
1621 cmd = container_of(header, struct vmw_tex_state_cmd,
1622 header);
1623
1624 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1625 user_context_converter, &cmd->state.cid,
1626 &ctx_node);
7a73ba74
TH
1627 if (unlikely(ret != 0))
1628 return ret;
1629
1630 for (; cur_state < last_state; ++cur_state) {
1631 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1632 continue;
1633
d80efd5c
TH
1634 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1635 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1636 (unsigned) cur_state->stage);
1637 return -EINVAL;
1638 }
1639
c0951b79
TH
1640 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1641 user_surface_converter,
173fb7d4 1642 &cur_state->value, &res_node);
7a73ba74
TH
1643 if (unlikely(ret != 0))
1644 return ret;
b5c3b1a6
TH
1645
1646 if (dev_priv->has_mob) {
d80efd5c
TH
1647 struct vmw_ctx_bindinfo_tex binding;
1648
1649 binding.bi.ctx = ctx_node->res;
1650 binding.bi.res = res_node ? res_node->res : NULL;
1651 binding.bi.bt = vmw_ctx_binding_tex;
1652 binding.texture_stage = cur_state->stage;
1653 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1654 0, binding.texture_stage);
b5c3b1a6 1655 }
7a73ba74
TH
1656 }
1657
1658 return 0;
1659}
1660
4084fb89
JB
1661static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1662 struct vmw_sw_context *sw_context,
1663 void *buf)
1664{
1665 struct vmw_dma_buffer *vmw_bo;
1666 int ret;
1667
1668 struct {
1669 uint32_t header;
1670 SVGAFifoCmdDefineGMRFB body;
1671 } *cmd = buf;
1672
1673 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1674 &cmd->body.ptr,
1675 &vmw_bo);
1676 if (unlikely(ret != 0))
1677 return ret;
1678
1679 vmw_dmabuf_unreference(&vmw_bo);
1680
1681 return ret;
1682}
1683
d80efd5c
TH
1684
1685/**
1686 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1687 * switching
1688 *
1689 * @dev_priv: Pointer to a device private struct.
1690 * @sw_context: The software context being used for this batch.
1691 * @val_node: The validation node representing the resource.
1692 * @buf_id: Pointer to the user-space backup buffer handle in the command
1693 * stream.
1694 * @backup_offset: Offset of backup into MOB.
1695 *
1696 * This function prepares for registering a switch of backup buffers
1697 * in the resource metadata just prior to unreserving. It's basically a wrapper
1698 * around vmw_cmd_res_switch_backup with a different interface.
1699 */
1700static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1701 struct vmw_sw_context *sw_context,
1702 struct vmw_resource_val_node *val_node,
1703 uint32_t *buf_id,
1704 unsigned long backup_offset)
1705{
1706 struct vmw_dma_buffer *dma_buf;
1707 int ret;
1708
1709 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1710 if (ret)
1711 return ret;
1712
1713 val_node->switching_backup = true;
1714 if (val_node->first_usage)
1715 val_node->no_buffer_needed = true;
1716
1717 vmw_dmabuf_unreference(&val_node->new_backup);
1718 val_node->new_backup = dma_buf;
1719 val_node->new_backup_offset = backup_offset;
1720
1721 return 0;
1722}
1723
1724
a97e2192
TH
1725/**
1726 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1727 *
1728 * @dev_priv: Pointer to a device private struct.
1729 * @sw_context: The software context being used for this batch.
1730 * @res_type: The resource type.
1731 * @converter: Information about user-space binding for this resource type.
1732 * @res_id: Pointer to the user-space resource handle in the command stream.
1733 * @buf_id: Pointer to the user-space backup buffer handle in the command
1734 * stream.
1735 * @backup_offset: Offset of backup into MOB.
1736 *
1737 * This function prepares for registering a switch of backup buffers
d80efd5c
TH
1738 * in the resource metadata just prior to unreserving. It's basically a wrapper
1739 * around vmw_cmd_res_switch_backup with a different interface.
a97e2192
TH
1740 */
1741static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1742 struct vmw_sw_context *sw_context,
1743 enum vmw_res_type res_type,
1744 const struct vmw_user_resource_conv
1745 *converter,
1746 uint32_t *res_id,
1747 uint32_t *buf_id,
1748 unsigned long backup_offset)
1749{
a97e2192 1750 struct vmw_resource_val_node *val_node;
d80efd5c 1751 int ret;
a97e2192
TH
1752
1753 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1754 converter, res_id, &val_node);
d80efd5c 1755 if (ret)
a97e2192
TH
1756 return ret;
1757
d80efd5c
TH
1758 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1759 buf_id, backup_offset);
a97e2192
TH
1760}
1761
1762/**
1763 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1764 * command
1765 *
1766 * @dev_priv: Pointer to a device private struct.
1767 * @sw_context: The software context being used for this batch.
1768 * @header: Pointer to the command header in the command stream.
1769 */
1770static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1771 struct vmw_sw_context *sw_context,
1772 SVGA3dCmdHeader *header)
1773{
1774 struct vmw_bind_gb_surface_cmd {
1775 SVGA3dCmdHeader header;
1776 SVGA3dCmdBindGBSurface body;
1777 } *cmd;
1778
1779 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1780
1781 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1782 user_surface_converter,
1783 &cmd->body.sid, &cmd->body.mobid,
1784 0);
1785}
1786
1787/**
1788 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1789 * command
1790 *
1791 * @dev_priv: Pointer to a device private struct.
1792 * @sw_context: The software context being used for this batch.
1793 * @header: Pointer to the command header in the command stream.
1794 */
1795static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1796 struct vmw_sw_context *sw_context,
1797 SVGA3dCmdHeader *header)
1798{
1799 struct vmw_gb_surface_cmd {
1800 SVGA3dCmdHeader header;
1801 SVGA3dCmdUpdateGBImage body;
1802 } *cmd;
1803
1804 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1805
1806 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1807 user_surface_converter,
1808 &cmd->body.image.sid, NULL);
1809}
1810
1811/**
1812 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1813 * command
1814 *
1815 * @dev_priv: Pointer to a device private struct.
1816 * @sw_context: The software context being used for this batch.
1817 * @header: Pointer to the command header in the command stream.
1818 */
1819static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1820 struct vmw_sw_context *sw_context,
1821 SVGA3dCmdHeader *header)
1822{
1823 struct vmw_gb_surface_cmd {
1824 SVGA3dCmdHeader header;
1825 SVGA3dCmdUpdateGBSurface body;
1826 } *cmd;
1827
1828 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1829
1830 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1831 user_surface_converter,
1832 &cmd->body.sid, NULL);
1833}
1834
1835/**
1836 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1837 * command
1838 *
1839 * @dev_priv: Pointer to a device private struct.
1840 * @sw_context: The software context being used for this batch.
1841 * @header: Pointer to the command header in the command stream.
1842 */
1843static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1844 struct vmw_sw_context *sw_context,
1845 SVGA3dCmdHeader *header)
1846{
1847 struct vmw_gb_surface_cmd {
1848 SVGA3dCmdHeader header;
1849 SVGA3dCmdReadbackGBImage body;
1850 } *cmd;
1851
1852 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1853
1854 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1855 user_surface_converter,
1856 &cmd->body.image.sid, NULL);
1857}
1858
1859/**
1860 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1861 * command
1862 *
1863 * @dev_priv: Pointer to a device private struct.
1864 * @sw_context: The software context being used for this batch.
1865 * @header: Pointer to the command header in the command stream.
1866 */
1867static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1868 struct vmw_sw_context *sw_context,
1869 SVGA3dCmdHeader *header)
1870{
1871 struct vmw_gb_surface_cmd {
1872 SVGA3dCmdHeader header;
1873 SVGA3dCmdReadbackGBSurface body;
1874 } *cmd;
1875
1876 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1877
1878 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1879 user_surface_converter,
1880 &cmd->body.sid, NULL);
1881}
1882
1883/**
1884 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1885 * command
1886 *
1887 * @dev_priv: Pointer to a device private struct.
1888 * @sw_context: The software context being used for this batch.
1889 * @header: Pointer to the command header in the command stream.
1890 */
1891static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1892 struct vmw_sw_context *sw_context,
1893 SVGA3dCmdHeader *header)
1894{
1895 struct vmw_gb_surface_cmd {
1896 SVGA3dCmdHeader header;
1897 SVGA3dCmdInvalidateGBImage body;
1898 } *cmd;
1899
1900 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1901
1902 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1903 user_surface_converter,
1904 &cmd->body.image.sid, NULL);
1905}
1906
1907/**
1908 * vmw_cmd_invalidate_gb_surface - Validate an
1909 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1910 *
1911 * @dev_priv: Pointer to a device private struct.
1912 * @sw_context: The software context being used for this batch.
1913 * @header: Pointer to the command header in the command stream.
1914 */
1915static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1916 struct vmw_sw_context *sw_context,
1917 SVGA3dCmdHeader *header)
1918{
1919 struct vmw_gb_surface_cmd {
1920 SVGA3dCmdHeader header;
1921 SVGA3dCmdInvalidateGBSurface body;
1922 } *cmd;
1923
1924 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1925
1926 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1927 user_surface_converter,
1928 &cmd->body.sid, NULL);
1929}
1930
d5bde956
TH
1931
1932/**
1933 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1934 * command
1935 *
1936 * @dev_priv: Pointer to a device private struct.
1937 * @sw_context: The software context being used for this batch.
1938 * @header: Pointer to the command header in the command stream.
1939 */
1940static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1941 struct vmw_sw_context *sw_context,
1942 SVGA3dCmdHeader *header)
1943{
1944 struct vmw_shader_define_cmd {
1945 SVGA3dCmdHeader header;
1946 SVGA3dCmdDefineShader body;
1947 } *cmd;
1948 int ret;
1949 size_t size;
18e4a466 1950 struct vmw_resource_val_node *val;
d5bde956
TH
1951
1952 cmd = container_of(header, struct vmw_shader_define_cmd,
1953 header);
1954
1955 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1956 user_context_converter, &cmd->body.cid,
18e4a466 1957 &val);
d5bde956
TH
1958 if (unlikely(ret != 0))
1959 return ret;
1960
1961 if (unlikely(!dev_priv->has_mob))
1962 return 0;
1963
1964 size = cmd->header.size - sizeof(cmd->body);
18e4a466
TH
1965 ret = vmw_compat_shader_add(dev_priv,
1966 vmw_context_res_man(val->res),
d5bde956
TH
1967 cmd->body.shid, cmd + 1,
1968 cmd->body.type, size,
18e4a466 1969 &sw_context->staged_cmd_res);
d5bde956
TH
1970 if (unlikely(ret != 0))
1971 return ret;
1972
1973 return vmw_resource_relocation_add(&sw_context->res_relocations,
1974 NULL, &cmd->header.id -
1975 sw_context->buf_start);
1976
1977 return 0;
1978}
1979
1980/**
1981 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1982 * command
1983 *
1984 * @dev_priv: Pointer to a device private struct.
1985 * @sw_context: The software context being used for this batch.
1986 * @header: Pointer to the command header in the command stream.
1987 */
1988static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1989 struct vmw_sw_context *sw_context,
1990 SVGA3dCmdHeader *header)
1991{
1992 struct vmw_shader_destroy_cmd {
1993 SVGA3dCmdHeader header;
1994 SVGA3dCmdDestroyShader body;
1995 } *cmd;
1996 int ret;
18e4a466 1997 struct vmw_resource_val_node *val;
d5bde956
TH
1998
1999 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2000 header);
2001
2002 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2003 user_context_converter, &cmd->body.cid,
18e4a466 2004 &val);
d5bde956
TH
2005 if (unlikely(ret != 0))
2006 return ret;
2007
2008 if (unlikely(!dev_priv->has_mob))
2009 return 0;
2010
d80efd5c
TH
2011 ret = vmw_shader_remove(vmw_context_res_man(val->res),
2012 cmd->body.shid,
2013 cmd->body.type,
2014 &sw_context->staged_cmd_res);
d5bde956
TH
2015 if (unlikely(ret != 0))
2016 return ret;
2017
2018 return vmw_resource_relocation_add(&sw_context->res_relocations,
2019 NULL, &cmd->header.id -
2020 sw_context->buf_start);
2021
2022 return 0;
2023}
2024
c0951b79
TH
2025/**
2026 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2027 * command
2028 *
2029 * @dev_priv: Pointer to a device private struct.
2030 * @sw_context: The software context being used for this batch.
2031 * @header: Pointer to the command header in the command stream.
2032 */
2033static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2034 struct vmw_sw_context *sw_context,
2035 SVGA3dCmdHeader *header)
2036{
2037 struct vmw_set_shader_cmd {
2038 SVGA3dCmdHeader header;
2039 SVGA3dCmdSetShader body;
2040 } *cmd;
18e4a466 2041 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
d80efd5c 2042 struct vmw_ctx_bindinfo_shader binding;
18e4a466 2043 struct vmw_resource *res = NULL;
c0951b79
TH
2044 int ret;
2045
2046 cmd = container_of(header, struct vmw_set_shader_cmd,
2047 header);
2048
d80efd5c
TH
2049 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2050 DRM_ERROR("Illegal shader type %u.\n",
2051 (unsigned) cmd->body.type);
2052 return -EINVAL;
2053 }
2054
b5c3b1a6
TH
2055 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2056 user_context_converter, &cmd->body.cid,
2057 &ctx_node);
c0951b79
TH
2058 if (unlikely(ret != 0))
2059 return ret;
2060
18e4a466
TH
2061 if (!dev_priv->has_mob)
2062 return 0;
2063
2064 if (cmd->body.shid != SVGA3D_INVALID_ID) {
d80efd5c
TH
2065 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2066 cmd->body.shid,
2067 cmd->body.type);
18e4a466
TH
2068
2069 if (!IS_ERR(res)) {
2070 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
18e4a466
TH
2071 &cmd->body.shid, res,
2072 &res_node);
2073 vmw_resource_unreference(&res);
2074 if (unlikely(ret != 0))
2075 return ret;
2076 }
2077 }
2078
2079 if (!res_node) {
2080 ret = vmw_cmd_res_check(dev_priv, sw_context,
2081 vmw_res_shader,
2082 user_shader_converter,
2083 &cmd->body.shid, &res_node);
b5c3b1a6
TH
2084 if (unlikely(ret != 0))
2085 return ret;
b5c3b1a6 2086 }
c74c162f 2087
d80efd5c
TH
2088 binding.bi.ctx = ctx_node->res;
2089 binding.bi.res = res_node ? res_node->res : NULL;
2090 binding.bi.bt = vmw_ctx_binding_shader;
2091 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2092 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2093 binding.shader_slot, 0);
2094 return 0;
c0951b79
TH
2095}
2096
0ccbbae4
TH
2097/**
2098 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2099 * command
2100 *
2101 * @dev_priv: Pointer to a device private struct.
2102 * @sw_context: The software context being used for this batch.
2103 * @header: Pointer to the command header in the command stream.
2104 */
2105static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2106 struct vmw_sw_context *sw_context,
2107 SVGA3dCmdHeader *header)
2108{
2109 struct vmw_set_shader_const_cmd {
2110 SVGA3dCmdHeader header;
2111 SVGA3dCmdSetShaderConst body;
2112 } *cmd;
2113 int ret;
2114
2115 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2116 header);
2117
2118 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2119 user_context_converter, &cmd->body.cid,
2120 NULL);
2121 if (unlikely(ret != 0))
2122 return ret;
2123
2124 if (dev_priv->has_mob)
2125 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2126
2127 return 0;
2128}
2129
c74c162f
TH
2130/**
2131 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2132 * command
2133 *
2134 * @dev_priv: Pointer to a device private struct.
2135 * @sw_context: The software context being used for this batch.
2136 * @header: Pointer to the command header in the command stream.
2137 */
2138static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2139 struct vmw_sw_context *sw_context,
2140 SVGA3dCmdHeader *header)
2141{
2142 struct vmw_bind_gb_shader_cmd {
2143 SVGA3dCmdHeader header;
2144 SVGA3dCmdBindGBShader body;
2145 } *cmd;
2146
2147 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2148 header);
2149
2150 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2151 user_shader_converter,
2152 &cmd->body.shid, &cmd->body.mobid,
2153 cmd->body.offsetInBytes);
2154}
2155
d80efd5c
TH
2156/**
2157 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2158 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2159 *
2160 * @dev_priv: Pointer to a device private struct.
2161 * @sw_context: The software context being used for this batch.
2162 * @header: Pointer to the command header in the command stream.
2163 */
2164static int
2165vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2166 struct vmw_sw_context *sw_context,
2167 SVGA3dCmdHeader *header)
4084fb89 2168{
d80efd5c
TH
2169 struct {
2170 SVGA3dCmdHeader header;
2171 SVGA3dCmdDXSetSingleConstantBuffer body;
2172 } *cmd;
2173 struct vmw_resource_val_node *res_node = NULL;
2174 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2175 struct vmw_ctx_bindinfo_cb binding;
2176 int ret;
4084fb89 2177
d80efd5c
TH
2178 if (unlikely(ctx_node == NULL)) {
2179 DRM_ERROR("DX Context not set.\n");
4084fb89
JB
2180 return -EINVAL;
2181 }
2182
d80efd5c
TH
2183 cmd = container_of(header, typeof(*cmd), header);
2184 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2185 user_surface_converter,
2186 &cmd->body.sid, &res_node);
2187 if (unlikely(ret != 0))
2188 return ret;
4084fb89 2189
d80efd5c
TH
2190 binding.bi.ctx = ctx_node->res;
2191 binding.bi.res = res_node ? res_node->res : NULL;
2192 binding.bi.bt = vmw_ctx_binding_cb;
2193 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2194 binding.offset = cmd->body.offsetInBytes;
2195 binding.size = cmd->body.sizeInBytes;
2196 binding.slot = cmd->body.slot;
2197
2198 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2199 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2200 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2201 (unsigned) cmd->body.type,
2202 (unsigned) binding.slot);
2203 return -EINVAL;
4084fb89
JB
2204 }
2205
d80efd5c
TH
2206 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2207 binding.shader_slot, binding.slot);
4084fb89
JB
2208
2209 return 0;
2210}
fb1d9738 2211
d80efd5c
TH
2212/**
2213 * vmw_cmd_dx_set_shader_res - Validate an
2214 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2215 *
2216 * @dev_priv: Pointer to a device private struct.
2217 * @sw_context: The software context being used for this batch.
2218 * @header: Pointer to the command header in the command stream.
2219 */
2220static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2221 struct vmw_sw_context *sw_context,
2222 SVGA3dCmdHeader *header)
2223{
2224 struct {
2225 SVGA3dCmdHeader header;
2226 SVGA3dCmdDXSetShaderResources body;
2227 } *cmd = container_of(header, typeof(*cmd), header);
2228 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2229 sizeof(SVGA3dShaderResourceViewId);
2230
2231 if ((u64) cmd->body.startView + (u64) num_sr_view >
2232 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2233 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2234 DRM_ERROR("Invalid shader binding.\n");
2235 return -EINVAL;
2236 }
2237
2238 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2239 vmw_ctx_binding_sr,
2240 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2241 (void *) &cmd[1], num_sr_view,
2242 cmd->body.startView);
2243}
2244
2245/**
2246 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2247 * command
2248 *
2249 * @dev_priv: Pointer to a device private struct.
2250 * @sw_context: The software context being used for this batch.
2251 * @header: Pointer to the command header in the command stream.
2252 */
2253static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2254 struct vmw_sw_context *sw_context,
2255 SVGA3dCmdHeader *header)
2256{
2257 struct {
2258 SVGA3dCmdHeader header;
2259 SVGA3dCmdDXSetShader body;
2260 } *cmd;
2261 struct vmw_resource *res = NULL;
2262 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2263 struct vmw_ctx_bindinfo_shader binding;
2264 int ret = 0;
2265
2266 if (unlikely(ctx_node == NULL)) {
2267 DRM_ERROR("DX Context not set.\n");
2268 return -EINVAL;
2269 }
2270
2271 cmd = container_of(header, typeof(*cmd), header);
2272
2273 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2274 DRM_ERROR("Illegal shader type %u.\n",
2275 (unsigned) cmd->body.type);
2276 return -EINVAL;
2277 }
2278
2279 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2280 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2281 if (IS_ERR(res)) {
2282 DRM_ERROR("Could not find shader for binding.\n");
2283 return PTR_ERR(res);
2284 }
2285
2286 ret = vmw_resource_val_add(sw_context, res, NULL);
2287 if (ret)
2288 goto out_unref;
2289 }
2290
2291 binding.bi.ctx = ctx_node->res;
2292 binding.bi.res = res;
2293 binding.bi.bt = vmw_ctx_binding_dx_shader;
2294 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2295
2296 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2297 binding.shader_slot, 0);
2298out_unref:
2299 if (res)
2300 vmw_resource_unreference(&res);
2301
2302 return ret;
2303}
2304
2305/**
2306 * vmw_cmd_dx_set_vertex_buffers - Validates an
2307 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2308 *
2309 * @dev_priv: Pointer to a device private struct.
2310 * @sw_context: The software context being used for this batch.
2311 * @header: Pointer to the command header in the command stream.
2312 */
2313static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2314 struct vmw_sw_context *sw_context,
2315 SVGA3dCmdHeader *header)
2316{
2317 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2318 struct vmw_ctx_bindinfo_vb binding;
2319 struct vmw_resource_val_node *res_node;
2320 struct {
2321 SVGA3dCmdHeader header;
2322 SVGA3dCmdDXSetVertexBuffers body;
2323 SVGA3dVertexBuffer buf[];
2324 } *cmd;
2325 int i, ret, num;
2326
2327 if (unlikely(ctx_node == NULL)) {
2328 DRM_ERROR("DX Context not set.\n");
2329 return -EINVAL;
2330 }
2331
2332 cmd = container_of(header, typeof(*cmd), header);
2333 num = (cmd->header.size - sizeof(cmd->body)) /
2334 sizeof(SVGA3dVertexBuffer);
2335 if ((u64)num + (u64)cmd->body.startBuffer >
2336 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2337 DRM_ERROR("Invalid number of vertex buffers.\n");
2338 return -EINVAL;
2339 }
2340
2341 for (i = 0; i < num; i++) {
2342 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2343 user_surface_converter,
2344 &cmd->buf[i].sid, &res_node);
2345 if (unlikely(ret != 0))
2346 return ret;
2347
2348 binding.bi.ctx = ctx_node->res;
2349 binding.bi.bt = vmw_ctx_binding_vb;
2350 binding.bi.res = ((res_node) ? res_node->res : NULL);
2351 binding.offset = cmd->buf[i].offset;
2352 binding.stride = cmd->buf[i].stride;
2353 binding.slot = i + cmd->body.startBuffer;
2354
2355 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2356 0, binding.slot);
2357 }
2358
2359 return 0;
2360}
2361
2362/**
2363 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2364 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2365 *
2366 * @dev_priv: Pointer to a device private struct.
2367 * @sw_context: The software context being used for this batch.
2368 * @header: Pointer to the command header in the command stream.
2369 */
2370static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2371 struct vmw_sw_context *sw_context,
2372 SVGA3dCmdHeader *header)
2373{
2374 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2375 struct vmw_ctx_bindinfo_ib binding;
2376 struct vmw_resource_val_node *res_node;
2377 struct {
2378 SVGA3dCmdHeader header;
2379 SVGA3dCmdDXSetIndexBuffer body;
2380 } *cmd;
2381 int ret;
2382
2383 if (unlikely(ctx_node == NULL)) {
2384 DRM_ERROR("DX Context not set.\n");
2385 return -EINVAL;
2386 }
2387
2388 cmd = container_of(header, typeof(*cmd), header);
2389 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2390 user_surface_converter,
2391 &cmd->body.sid, &res_node);
2392 if (unlikely(ret != 0))
2393 return ret;
2394
2395 binding.bi.ctx = ctx_node->res;
2396 binding.bi.res = ((res_node) ? res_node->res : NULL);
2397 binding.bi.bt = vmw_ctx_binding_ib;
2398 binding.offset = cmd->body.offset;
2399 binding.format = cmd->body.format;
2400
2401 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2402
2403 return 0;
2404}
2405
2406/**
2407 * vmw_cmd_dx_set_rendertarget - Validate an
2408 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2409 *
2410 * @dev_priv: Pointer to a device private struct.
2411 * @sw_context: The software context being used for this batch.
2412 * @header: Pointer to the command header in the command stream.
2413 */
2414static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2415 struct vmw_sw_context *sw_context,
2416 SVGA3dCmdHeader *header)
2417{
2418 struct {
2419 SVGA3dCmdHeader header;
2420 SVGA3dCmdDXSetRenderTargets body;
2421 } *cmd = container_of(header, typeof(*cmd), header);
2422 int ret;
2423 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2424 sizeof(SVGA3dRenderTargetViewId);
2425
2426 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2427 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2428 return -EINVAL;
2429 }
2430
2431 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2432 vmw_ctx_binding_ds, 0,
2433 &cmd->body.depthStencilViewId, 1, 0);
2434 if (ret)
2435 return ret;
2436
2437 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2438 vmw_ctx_binding_dx_rt, 0,
2439 (void *)&cmd[1], num_rt_view, 0);
2440}
2441
2442/**
2443 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2444 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2445 *
2446 * @dev_priv: Pointer to a device private struct.
2447 * @sw_context: The software context being used for this batch.
2448 * @header: Pointer to the command header in the command stream.
2449 */
2450static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2451 struct vmw_sw_context *sw_context,
2452 SVGA3dCmdHeader *header)
2453{
2454 struct {
2455 SVGA3dCmdHeader header;
2456 SVGA3dCmdDXClearRenderTargetView body;
2457 } *cmd = container_of(header, typeof(*cmd), header);
2458
2459 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2460 cmd->body.renderTargetViewId);
2461}
2462
2463/**
2464 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2465 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2466 *
2467 * @dev_priv: Pointer to a device private struct.
2468 * @sw_context: The software context being used for this batch.
2469 * @header: Pointer to the command header in the command stream.
2470 */
2471static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2472 struct vmw_sw_context *sw_context,
2473 SVGA3dCmdHeader *header)
2474{
2475 struct {
2476 SVGA3dCmdHeader header;
2477 SVGA3dCmdDXClearDepthStencilView body;
2478 } *cmd = container_of(header, typeof(*cmd), header);
2479
2480 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2481 cmd->body.depthStencilViewId);
2482}
2483
2484static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2485 struct vmw_sw_context *sw_context,
2486 SVGA3dCmdHeader *header)
2487{
2488 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2489 struct vmw_resource_val_node *srf_node;
2490 struct vmw_resource *res;
2491 enum vmw_view_type view_type;
2492 int ret;
2493 /*
2494 * This is based on the fact that all affected define commands have
2495 * the same initial command body layout.
2496 */
2497 struct {
2498 SVGA3dCmdHeader header;
2499 uint32 defined_id;
2500 uint32 sid;
2501 } *cmd;
2502
2503 if (unlikely(ctx_node == NULL)) {
2504 DRM_ERROR("DX Context not set.\n");
2505 return -EINVAL;
2506 }
2507
2508 view_type = vmw_view_cmd_to_type(header->id);
2509 cmd = container_of(header, typeof(*cmd), header);
2510 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2511 user_surface_converter,
2512 &cmd->sid, &srf_node);
2513 if (unlikely(ret != 0))
2514 return ret;
2515
2516 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2517 ret = vmw_cotable_notify(res, cmd->defined_id);
2518 vmw_resource_unreference(&res);
2519 if (unlikely(ret != 0))
2520 return ret;
2521
2522 return vmw_view_add(sw_context->man,
2523 ctx_node->res,
2524 srf_node->res,
2525 view_type,
2526 cmd->defined_id,
2527 header,
2528 header->size + sizeof(*header),
2529 &sw_context->staged_cmd_res);
2530}
2531
2f633e5e
CL
2532/**
2533 * vmw_cmd_dx_set_so_targets - Validate an
2534 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2535 *
2536 * @dev_priv: Pointer to a device private struct.
2537 * @sw_context: The software context being used for this batch.
2538 * @header: Pointer to the command header in the command stream.
2539 */
2540static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2541 struct vmw_sw_context *sw_context,
2542 SVGA3dCmdHeader *header)
2543{
2544 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2545 struct vmw_ctx_bindinfo_so binding;
2546 struct vmw_resource_val_node *res_node;
2547 struct {
2548 SVGA3dCmdHeader header;
2549 SVGA3dCmdDXSetSOTargets body;
2550 SVGA3dSoTarget targets[];
2551 } *cmd;
2552 int i, ret, num;
2553
2554 if (unlikely(ctx_node == NULL)) {
2555 DRM_ERROR("DX Context not set.\n");
2556 return -EINVAL;
2557 }
2558
2559 cmd = container_of(header, typeof(*cmd), header);
2560 num = (cmd->header.size - sizeof(cmd->body)) /
2561 sizeof(SVGA3dSoTarget);
2562
2563 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2564 DRM_ERROR("Invalid DX SO binding.\n");
2565 return -EINVAL;
2566 }
2567
2568 for (i = 0; i < num; i++) {
2569 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2570 user_surface_converter,
2571 &cmd->targets[i].sid, &res_node);
2572 if (unlikely(ret != 0))
2573 return ret;
2574
2575 binding.bi.ctx = ctx_node->res;
2576 binding.bi.res = ((res_node) ? res_node->res : NULL);
2577 binding.bi.bt = vmw_ctx_binding_so,
2578 binding.offset = cmd->targets[i].offset;
2579 binding.size = cmd->targets[i].sizeInBytes;
2580 binding.slot = i;
2581
2582 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2583 0, binding.slot);
2584 }
2585
2586 return 0;
2587}
2588
d80efd5c
TH
2589static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2590 struct vmw_sw_context *sw_context,
2591 SVGA3dCmdHeader *header)
2592{
2593 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2594 struct vmw_resource *res;
2595 /*
2596 * This is based on the fact that all affected define commands have
2597 * the same initial command body layout.
2598 */
2599 struct {
2600 SVGA3dCmdHeader header;
2601 uint32 defined_id;
2602 } *cmd;
2603 enum vmw_so_type so_type;
2604 int ret;
2605
2606 if (unlikely(ctx_node == NULL)) {
2607 DRM_ERROR("DX Context not set.\n");
2608 return -EINVAL;
2609 }
2610
2611 so_type = vmw_so_cmd_to_type(header->id);
2612 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2613 cmd = container_of(header, typeof(*cmd), header);
2614 ret = vmw_cotable_notify(res, cmd->defined_id);
2615 vmw_resource_unreference(&res);
2616
2617 return ret;
2618}
2619
2620/**
2621 * vmw_cmd_dx_check_subresource - Validate an
2622 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2623 *
2624 * @dev_priv: Pointer to a device private struct.
2625 * @sw_context: The software context being used for this batch.
2626 * @header: Pointer to the command header in the command stream.
2627 */
2628static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2629 struct vmw_sw_context *sw_context,
2630 SVGA3dCmdHeader *header)
2631{
2632 struct {
2633 SVGA3dCmdHeader header;
2634 union {
2635 SVGA3dCmdDXReadbackSubResource r_body;
2636 SVGA3dCmdDXInvalidateSubResource i_body;
2637 SVGA3dCmdDXUpdateSubResource u_body;
2638 SVGA3dSurfaceId sid;
2639 };
2640 } *cmd;
2641
2642 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2643 offsetof(typeof(*cmd), sid));
2644 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2645 offsetof(typeof(*cmd), sid));
2646 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2647 offsetof(typeof(*cmd), sid));
2648
2649 cmd = container_of(header, typeof(*cmd), header);
2650
2651 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2652 user_surface_converter,
2653 &cmd->sid, NULL);
2654}
2655
2656static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2657 struct vmw_sw_context *sw_context,
2658 SVGA3dCmdHeader *header)
2659{
2660 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2661
2662 if (unlikely(ctx_node == NULL)) {
2663 DRM_ERROR("DX Context not set.\n");
2664 return -EINVAL;
2665 }
2666
2667 return 0;
2668}
2669
2670/**
2671 * vmw_cmd_dx_view_remove - validate a view remove command and
2672 * schedule the view resource for removal.
2673 *
2674 * @dev_priv: Pointer to a device private struct.
2675 * @sw_context: The software context being used for this batch.
2676 * @header: Pointer to the command header in the command stream.
2677 *
2678 * Check that the view exists, and if it was not created using this
2679 * command batch, make sure it's validated (present in the device) so that
2680 * the remove command will not confuse the device.
2681 */
2682static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2683 struct vmw_sw_context *sw_context,
2684 SVGA3dCmdHeader *header)
2685{
2686 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2687 struct {
2688 SVGA3dCmdHeader header;
2689 union vmw_view_destroy body;
2690 } *cmd = container_of(header, typeof(*cmd), header);
2691 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2692 struct vmw_resource *view;
2693 int ret;
2694
2695 if (!ctx_node) {
2696 DRM_ERROR("DX Context not set.\n");
2697 return -EINVAL;
2698 }
2699
2700 ret = vmw_view_remove(sw_context->man,
2701 cmd->body.view_id, view_type,
2702 &sw_context->staged_cmd_res,
2703 &view);
2704 if (ret || !view)
2705 return ret;
2706
2707 /*
2708 * Add view to the validate list iff it was not created using this
2709 * command batch.
2710 */
2711 return vmw_view_res_val_add(sw_context, view);
2712}
2713
2714/**
2715 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2716 * command
2717 *
2718 * @dev_priv: Pointer to a device private struct.
2719 * @sw_context: The software context being used for this batch.
2720 * @header: Pointer to the command header in the command stream.
2721 */
2722static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2723 struct vmw_sw_context *sw_context,
2724 SVGA3dCmdHeader *header)
2725{
2726 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2727 struct vmw_resource *res;
2728 struct {
2729 SVGA3dCmdHeader header;
2730 SVGA3dCmdDXDefineShader body;
2731 } *cmd = container_of(header, typeof(*cmd), header);
2732 int ret;
2733
2734 if (!ctx_node) {
2735 DRM_ERROR("DX Context not set.\n");
2736 return -EINVAL;
2737 }
2738
2739 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2740 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2741 vmw_resource_unreference(&res);
2742 if (ret)
2743 return ret;
2744
2745 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2746 cmd->body.shaderId, cmd->body.type,
2747 &sw_context->staged_cmd_res);
2748}
2749
2750/**
2751 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2752 * command
2753 *
2754 * @dev_priv: Pointer to a device private struct.
2755 * @sw_context: The software context being used for this batch.
2756 * @header: Pointer to the command header in the command stream.
2757 */
2758static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2759 struct vmw_sw_context *sw_context,
2760 SVGA3dCmdHeader *header)
2761{
2762 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2763 struct {
2764 SVGA3dCmdHeader header;
2765 SVGA3dCmdDXDestroyShader body;
2766 } *cmd = container_of(header, typeof(*cmd), header);
2767 int ret;
2768
2769 if (!ctx_node) {
2770 DRM_ERROR("DX Context not set.\n");
2771 return -EINVAL;
2772 }
2773
2774 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2775 &sw_context->staged_cmd_res);
2776 if (ret)
2777 DRM_ERROR("Could not find shader to remove.\n");
2778
2779 return ret;
2780}
2781
2782/**
2783 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2784 * command
2785 *
2786 * @dev_priv: Pointer to a device private struct.
2787 * @sw_context: The software context being used for this batch.
2788 * @header: Pointer to the command header in the command stream.
2789 */
2790static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2791 struct vmw_sw_context *sw_context,
2792 SVGA3dCmdHeader *header)
2793{
2794 struct vmw_resource_val_node *ctx_node;
2795 struct vmw_resource_val_node *res_node;
2796 struct vmw_resource *res;
2797 struct {
2798 SVGA3dCmdHeader header;
2799 SVGA3dCmdDXBindShader body;
2800 } *cmd = container_of(header, typeof(*cmd), header);
2801 int ret;
2802
2803 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2804 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2805 user_context_converter,
2806 &cmd->body.cid, &ctx_node);
2807 if (ret)
2808 return ret;
2809 } else {
2810 ctx_node = sw_context->dx_ctx_node;
2811 if (!ctx_node) {
2812 DRM_ERROR("DX Context not set.\n");
2813 return -EINVAL;
2814 }
2815 }
2816
2817 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2818 cmd->body.shid, 0);
2819 if (IS_ERR(res)) {
2820 DRM_ERROR("Could not find shader to bind.\n");
2821 return PTR_ERR(res);
2822 }
2823
2824 ret = vmw_resource_val_add(sw_context, res, &res_node);
2825 if (ret) {
2826 DRM_ERROR("Error creating resource validation node.\n");
2827 goto out_unref;
2828 }
2829
2830
2831 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
2832 &cmd->body.mobid,
2833 cmd->body.offsetInBytes);
2834out_unref:
2835 vmw_resource_unreference(&res);
2836
2837 return ret;
2838}
2839
2840static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2841 struct vmw_sw_context *sw_context,
2842 void *buf, uint32_t *size)
2843{
2844 uint32_t size_remaining = *size;
2845 uint32_t cmd_id;
2846
2847 cmd_id = ((uint32_t *)buf)[0];
2848 switch (cmd_id) {
2849 case SVGA_CMD_UPDATE:
2850 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
2851 break;
2852 case SVGA_CMD_DEFINE_GMRFB:
2853 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2854 break;
2855 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2856 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2857 break;
2858 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2859 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2860 break;
2861 default:
2862 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
2863 return -EINVAL;
2864 }
2865
2866 if (*size > size_remaining) {
2867 DRM_ERROR("Invalid SVGA command (size mismatch):"
2868 " %u.\n", cmd_id);
2869 return -EINVAL;
2870 }
2871
2872 if (unlikely(!sw_context->kernel)) {
2873 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
2874 return -EPERM;
2875 }
2876
2877 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
2878 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
2879
2880 return 0;
2881}
2882
2883static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
2884 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
2885 false, false, false),
2886 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
2887 false, false, false),
2888 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
2889 true, false, false),
2890 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
2891 true, false, false),
2892 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
2893 true, false, false),
2894 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
2895 false, false, false),
2896 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
2897 false, false, false),
2898 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
2899 true, false, false),
2900 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
2901 true, false, false),
2902 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
2903 true, false, false),
2904 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
2905 &vmw_cmd_set_render_target_check, true, false, false),
2906 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
2907 true, false, false),
2908 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
2909 true, false, false),
2910 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
2911 true, false, false),
c373d4ea
TH
2912 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
2913 true, false, false),
2914 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
2915 true, false, false),
2916 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
2917 true, false, false),
2918 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
2919 true, false, false),
2920 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
2921 false, false, false),
d5bde956
TH
2922 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
2923 true, false, false),
2924 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
2925 true, false, false),
c373d4ea
TH
2926 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
2927 true, false, false),
0ccbbae4
TH
2928 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
2929 true, false, false),
c373d4ea
TH
2930 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
2931 true, false, false),
2932 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
2933 true, false, false),
2934 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
2935 true, false, false),
2936 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
2937 true, false, false),
2938 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
2939 true, false, false),
2940 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
2941 true, false, false),
fb1d9738 2942 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
c373d4ea
TH
2943 &vmw_cmd_blt_surf_screen_check, false, false, false),
2944 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
2945 false, false, false),
2946 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
2947 false, false, false),
2948 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
2949 false, false, false),
2950 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
2951 false, false, false),
2952 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
2953 false, false, false),
2954 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
2955 false, false, false),
2956 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
2957 false, false, false),
2958 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
2959 false, false, false),
2960 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
2961 false, false, false),
2962 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
2963 false, false, false),
2964 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
2965 false, false, false),
2966 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
2967 false, false, false),
2968 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
2969 false, false, false),
2970 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
2971 false, false, true),
2972 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
2973 false, false, true),
2974 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
2975 false, false, true),
2976 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
2977 false, false, true),
c373d4ea
TH
2978 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
2979 false, false, true),
2980 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2981 false, false, true),
2982 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2983 false, false, true),
2984 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2985 true, false, true),
2986 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2987 false, false, true),
2988 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2989 true, false, true),
a97e2192 2990 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
c373d4ea 2991 &vmw_cmd_update_gb_surface, true, false, true),
a97e2192 2992 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
c373d4ea 2993 &vmw_cmd_readback_gb_image, true, false, true),
a97e2192 2994 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
c373d4ea 2995 &vmw_cmd_readback_gb_surface, true, false, true),
a97e2192 2996 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
c373d4ea 2997 &vmw_cmd_invalidate_gb_image, true, false, true),
a97e2192 2998 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
c373d4ea
TH
2999 &vmw_cmd_invalidate_gb_surface, true, false, true),
3000 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3001 false, false, true),
3002 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3003 false, false, true),
3004 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3005 false, false, true),
3006 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3007 false, false, true),
3008 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3009 false, false, true),
3010 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3011 false, false, true),
3012 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3013 true, false, true),
3014 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3015 false, false, true),
f2a0dcb1 3016 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
8ba07315 3017 false, false, false),
c373d4ea
TH
3018 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3019 true, false, true),
3020 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3021 true, false, true),
3022 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3023 true, false, true),
3024 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3025 true, false, true),
3026 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3027 false, false, true),
3028 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3029 false, false, true),
3030 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3031 false, false, true),
3032 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3033 false, false, true),
3034 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3035 false, false, true),
3036 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3037 false, false, true),
3038 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3039 false, false, true),
3040 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3041 false, false, true),
3042 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3043 false, false, true),
3044 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3045 false, false, true),
3046 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
d80efd5c
TH
3047 true, false, true),
3048 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3049 false, false, true),
3050 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3051 false, false, true),
3052 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3053 false, false, true),
3054 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3055 false, false, true),
3056
3057 /*
3058 * DX commands
3059 */
3060 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3061 false, false, true),
3062 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3063 false, false, true),
3064 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3065 false, false, true),
3066 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3067 false, false, true),
3068 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3069 false, false, true),
3070 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3071 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3072 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3073 &vmw_cmd_dx_set_shader_res, true, false, true),
3074 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3075 true, false, true),
2f633e5e
CL
3076 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3077 true, false, true),
3078 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3079 true, false, true),
3080 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
d80efd5c 3081 true, false, true),
2f633e5e 3082 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
d80efd5c 3083 true, false, true),
2f633e5e
CL
3084 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3085 &vmw_cmd_dx_cid_check, true, false, true),
3086 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
d80efd5c
TH
3087 true, false, true),
3088 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3089 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3090 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3091 &vmw_cmd_dx_set_index_buffer, true, false, true),
3092 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3093 &vmw_cmd_dx_set_rendertargets, true, false, true),
3094 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3095 true, false, true),
d80efd5c 3096 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
2f633e5e
CL
3097 &vmw_cmd_dx_cid_check, true, false, true),
3098 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3099 &vmw_cmd_dx_cid_check, true, false, true),
d80efd5c
TH
3100 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_invalid,
3101 true, false, true),
3102 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_invalid,
3103 true, false, true),
3104 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_invalid,
3105 true, false, true),
3106 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_invalid,
3107 true, false, true),
3108 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_invalid,
3109 true, false, true),
3110 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3111 true, false, true),
3112 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
3113 true, false, true),
3114 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3115 true, false, true),
3116 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3117 true, false, true),
3118 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3119 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3120 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3121 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
d80efd5c
TH
3122 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3123 true, false, true),
3124 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
3125 true, false, true),
3126 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3127 &vmw_cmd_dx_check_subresource, true, false, true),
3128 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3129 &vmw_cmd_dx_check_subresource, true, false, true),
3130 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3131 &vmw_cmd_dx_check_subresource, true, false, true),
3132 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3133 &vmw_cmd_dx_view_define, true, false, true),
3134 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3135 &vmw_cmd_dx_view_remove, true, false, true),
3136 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3137 &vmw_cmd_dx_view_define, true, false, true),
3138 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3139 &vmw_cmd_dx_view_remove, true, false, true),
3140 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3141 &vmw_cmd_dx_view_define, true, false, true),
3142 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3143 &vmw_cmd_dx_view_remove, true, false, true),
3144 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3145 &vmw_cmd_dx_so_define, true, false, true),
3146 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3147 &vmw_cmd_dx_cid_check, true, false, true),
3148 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3149 &vmw_cmd_dx_so_define, true, false, true),
3150 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3151 &vmw_cmd_dx_cid_check, true, false, true),
3152 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3153 &vmw_cmd_dx_so_define, true, false, true),
3154 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3155 &vmw_cmd_dx_cid_check, true, false, true),
3156 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3157 &vmw_cmd_dx_so_define, true, false, true),
3158 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3159 &vmw_cmd_dx_cid_check, true, false, true),
3160 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3161 &vmw_cmd_dx_so_define, true, false, true),
3162 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3163 &vmw_cmd_dx_cid_check, true, false, true),
3164 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3165 &vmw_cmd_dx_define_shader, true, false, true),
3166 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3167 &vmw_cmd_dx_destroy_shader, true, false, true),
3168 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3169 &vmw_cmd_dx_bind_shader, true, false, true),
3170 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3171 &vmw_cmd_dx_so_define, true, false, true),
3172 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3173 &vmw_cmd_dx_cid_check, true, false, true),
2f633e5e 3174 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
d80efd5c 3175 true, false, true),
2f633e5e
CL
3176 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3177 &vmw_cmd_dx_set_so_targets, true, false, true),
d80efd5c
TH
3178 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3179 &vmw_cmd_dx_cid_check, true, false, true),
3180 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3181 &vmw_cmd_dx_cid_check, true, false, true),
0fca749e
NB
3182 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3183 &vmw_cmd_buffer_copy_check, true, false, true),
3184 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3185 &vmw_cmd_pred_copy_check, true, false, true),
fb1d9738
JB
3186};
3187
3188static int vmw_cmd_check(struct vmw_private *dev_priv,
3189 struct vmw_sw_context *sw_context,
3190 void *buf, uint32_t *size)
3191{
3192 uint32_t cmd_id;
7a73ba74 3193 uint32_t size_remaining = *size;
fb1d9738
JB
3194 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3195 int ret;
c373d4ea
TH
3196 const struct vmw_cmd_entry *entry;
3197 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
fb1d9738 3198
b9eb1a61 3199 cmd_id = ((uint32_t *)buf)[0];
4084fb89
JB
3200 /* Handle any none 3D commands */
3201 if (unlikely(cmd_id < SVGA_CMD_MAX))
3202 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3203
fb1d9738 3204
b9eb1a61
TH
3205 cmd_id = header->id;
3206 *size = header->size + sizeof(SVGA3dCmdHeader);
fb1d9738
JB
3207
3208 cmd_id -= SVGA_3D_CMD_BASE;
7a73ba74 3209 if (unlikely(*size > size_remaining))
c373d4ea 3210 goto out_invalid;
7a73ba74 3211
fb1d9738 3212 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
c373d4ea
TH
3213 goto out_invalid;
3214
3215 entry = &vmw_cmd_entries[cmd_id];
36e952c1
TH
3216 if (unlikely(!entry->func))
3217 goto out_invalid;
3218
c373d4ea
TH
3219 if (unlikely(!entry->user_allow && !sw_context->kernel))
3220 goto out_privileged;
3221
3222 if (unlikely(entry->gb_disable && gb))
3223 goto out_old;
3224
3225 if (unlikely(entry->gb_enable && !gb))
3226 goto out_new;
fb1d9738 3227
c373d4ea 3228 ret = entry->func(dev_priv, sw_context, header);
fb1d9738 3229 if (unlikely(ret != 0))
c373d4ea 3230 goto out_invalid;
fb1d9738
JB
3231
3232 return 0;
c373d4ea
TH
3233out_invalid:
3234 DRM_ERROR("Invalid SVGA3D command: %d\n",
3235 cmd_id + SVGA_3D_CMD_BASE);
3236 return -EINVAL;
3237out_privileged:
3238 DRM_ERROR("Privileged SVGA3D command: %d\n",
3239 cmd_id + SVGA_3D_CMD_BASE);
3240 return -EPERM;
3241out_old:
3242 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3243 cmd_id + SVGA_3D_CMD_BASE);
3244 return -EINVAL;
3245out_new:
3246 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
fb1d9738
JB
3247 cmd_id + SVGA_3D_CMD_BASE);
3248 return -EINVAL;
3249}
3250
3251static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3252 struct vmw_sw_context *sw_context,
922ade0d 3253 void *buf,
be38ab6e 3254 uint32_t size)
fb1d9738
JB
3255{
3256 int32_t cur_size = size;
3257 int ret;
3258
c0951b79
TH
3259 sw_context->buf_start = buf;
3260
fb1d9738 3261 while (cur_size > 0) {
7a73ba74 3262 size = cur_size;
fb1d9738
JB
3263 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3264 if (unlikely(ret != 0))
3265 return ret;
3266 buf = (void *)((unsigned long) buf + size);
3267 cur_size -= size;
3268 }
3269
3270 if (unlikely(cur_size != 0)) {
3271 DRM_ERROR("Command verifier out of sync.\n");
3272 return -EINVAL;
3273 }
3274
3275 return 0;
3276}
3277
3278static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3279{
3280 sw_context->cur_reloc = 0;
3281}
3282
3283static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3284{
3285 uint32_t i;
3286 struct vmw_relocation *reloc;
3287 struct ttm_validate_buffer *validate;
3288 struct ttm_buffer_object *bo;
3289
3290 for (i = 0; i < sw_context->cur_reloc; ++i) {
3291 reloc = &sw_context->relocs[i];
c0951b79 3292 validate = &sw_context->val_bufs[reloc->index].base;
fb1d9738 3293 bo = validate->bo;
c0951b79
TH
3294 switch (bo->mem.mem_type) {
3295 case TTM_PL_VRAM:
135cba0d
TH
3296 reloc->location->offset += bo->offset;
3297 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
c0951b79
TH
3298 break;
3299 case VMW_PL_GMR:
135cba0d 3300 reloc->location->gmrId = bo->mem.start;
c0951b79 3301 break;
ddcda24e
TH
3302 case VMW_PL_MOB:
3303 *reloc->mob_loc = bo->mem.start;
3304 break;
c0951b79
TH
3305 default:
3306 BUG();
3307 }
fb1d9738
JB
3308 }
3309 vmw_free_relocations(sw_context);
3310}
3311
c0951b79
TH
3312/**
3313 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3314 * all resources referenced by it.
3315 *
3316 * @list: The resource list.
3317 */
d80efd5c
TH
3318static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3319 struct list_head *list)
c0951b79
TH
3320{
3321 struct vmw_resource_val_node *val, *val_next;
3322
3323 /*
3324 * Drop references to resources held during command submission.
3325 */
3326
3327 list_for_each_entry_safe(val, val_next, list, head) {
3328 list_del_init(&val->head);
3329 vmw_resource_unreference(&val->res);
d80efd5c
TH
3330
3331 if (val->staged_bindings) {
3332 if (val->staged_bindings != sw_context->staged_bindings)
3333 vmw_binding_state_free(val->staged_bindings);
3334 else
3335 sw_context->staged_bindings_inuse = false;
3336 val->staged_bindings = NULL;
3337 }
3338
c0951b79
TH
3339 kfree(val);
3340 }
3341}
3342
fb1d9738
JB
3343static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3344{
c0951b79
TH
3345 struct vmw_validate_buffer *entry, *next;
3346 struct vmw_resource_val_node *val;
fb1d9738 3347
be38ab6e
TH
3348 /*
3349 * Drop references to DMA buffers held during command submission.
3350 */
fb1d9738 3351 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
c0951b79
TH
3352 base.head) {
3353 list_del(&entry->base.head);
3354 ttm_bo_unref(&entry->base.bo);
3355 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
fb1d9738
JB
3356 sw_context->cur_val_buf--;
3357 }
3358 BUG_ON(sw_context->cur_val_buf != 0);
be38ab6e 3359
c0951b79
TH
3360 list_for_each_entry(val, &sw_context->resource_list, head)
3361 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
fb1d9738
JB
3362}
3363
1a4b172a
TH
3364int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3365 struct ttm_buffer_object *bo,
3366 bool interruptible,
3367 bool validate_as_mob)
fb1d9738 3368{
459d0fa7
TH
3369 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3370 base);
fb1d9738
JB
3371 int ret;
3372
459d0fa7 3373 if (vbo->pin_count > 0)
e2fa3a76
TH
3374 return 0;
3375
96c5f0df 3376 if (validate_as_mob)
1a4b172a
TH
3377 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3378 false);
96c5f0df 3379
8ba5152a 3380 /**
135cba0d
TH
3381 * Put BO in VRAM if there is space, otherwise as a GMR.
3382 * If there is no space in VRAM and GMR ids are all used up,
3383 * start evicting GMRs to make room. If the DMA buffer can't be
3384 * used as a GMR, this will return -ENOMEM.
8ba5152a
TH
3385 */
3386
1a4b172a
TH
3387 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3388 false);
3d3a5b32 3389 if (likely(ret == 0 || ret == -ERESTARTSYS))
fb1d9738
JB
3390 return ret;
3391
8ba5152a
TH
3392 /**
3393 * If that failed, try VRAM again, this time evicting
3394 * previous contents.
3395 */
fb1d9738 3396
1a4b172a 3397 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
fb1d9738
JB
3398 return ret;
3399}
3400
fb1d9738
JB
3401static int vmw_validate_buffers(struct vmw_private *dev_priv,
3402 struct vmw_sw_context *sw_context)
3403{
c0951b79 3404 struct vmw_validate_buffer *entry;
fb1d9738
JB
3405 int ret;
3406
c0951b79 3407 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
96c5f0df 3408 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1a4b172a 3409 true,
96c5f0df 3410 entry->validate_as_mob);
fb1d9738
JB
3411 if (unlikely(ret != 0))
3412 return ret;
3413 }
3414 return 0;
3415}
3416
be38ab6e
TH
3417static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3418 uint32_t size)
3419{
3420 if (likely(sw_context->cmd_bounce_size >= size))
3421 return 0;
3422
3423 if (sw_context->cmd_bounce_size == 0)
3424 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3425
3426 while (sw_context->cmd_bounce_size < size) {
3427 sw_context->cmd_bounce_size =
3428 PAGE_ALIGN(sw_context->cmd_bounce_size +
3429 (sw_context->cmd_bounce_size >> 1));
3430 }
3431
3432 if (sw_context->cmd_bounce != NULL)
3433 vfree(sw_context->cmd_bounce);
3434
3435 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3436
3437 if (sw_context->cmd_bounce == NULL) {
3438 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3439 sw_context->cmd_bounce_size = 0;
3440 return -ENOMEM;
3441 }
3442
3443 return 0;
3444}
3445
ae2a1040
TH
3446/**
3447 * vmw_execbuf_fence_commands - create and submit a command stream fence
3448 *
3449 * Creates a fence object and submits a command stream marker.
3450 * If this fails for some reason, We sync the fifo and return NULL.
3451 * It is then safe to fence buffers with a NULL pointer.
6070e9fa
JB
3452 *
3453 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3454 * a userspace handle if @p_handle is not NULL, otherwise not.
ae2a1040
TH
3455 */
3456
3457int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3458 struct vmw_private *dev_priv,
3459 struct vmw_fence_obj **p_fence,
3460 uint32_t *p_handle)
3461{
3462 uint32_t sequence;
3463 int ret;
3464 bool synced = false;
3465
6070e9fa
JB
3466 /* p_handle implies file_priv. */
3467 BUG_ON(p_handle != NULL && file_priv == NULL);
ae2a1040
TH
3468
3469 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3470 if (unlikely(ret != 0)) {
3471 DRM_ERROR("Fence submission error. Syncing.\n");
3472 synced = true;
3473 }
3474
3475 if (p_handle != NULL)
3476 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
c060a4e1 3477 sequence, p_fence, p_handle);
ae2a1040 3478 else
c060a4e1 3479 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
ae2a1040
TH
3480
3481 if (unlikely(ret != 0 && !synced)) {
3482 (void) vmw_fallback_wait(dev_priv, false, false,
3483 sequence, false,
3484 VMW_FENCE_WAIT_TIMEOUT);
3485 *p_fence = NULL;
3486 }
3487
3488 return 0;
3489}
3490
8bf445ce
TH
3491/**
3492 * vmw_execbuf_copy_fence_user - copy fence object information to
3493 * user-space.
3494 *
3495 * @dev_priv: Pointer to a vmw_private struct.
3496 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3497 * @ret: Return value from fence object creation.
3498 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3499 * which the information should be copied.
3500 * @fence: Pointer to the fenc object.
3501 * @fence_handle: User-space fence handle.
3502 *
3503 * This function copies fence information to user-space. If copying fails,
3504 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3505 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3506 * the error will hopefully be detected.
3507 * Also if copying fails, user-space will be unable to signal the fence
3508 * object so we wait for it immediately, and then unreference the
3509 * user-space reference.
3510 */
57c5ee79 3511void
8bf445ce
TH
3512vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3513 struct vmw_fpriv *vmw_fp,
3514 int ret,
3515 struct drm_vmw_fence_rep __user *user_fence_rep,
3516 struct vmw_fence_obj *fence,
3517 uint32_t fence_handle)
3518{
3519 struct drm_vmw_fence_rep fence_rep;
3520
3521 if (user_fence_rep == NULL)
3522 return;
3523
80d9b24a
DC
3524 memset(&fence_rep, 0, sizeof(fence_rep));
3525
8bf445ce
TH
3526 fence_rep.error = ret;
3527 if (ret == 0) {
3528 BUG_ON(fence == NULL);
3529
3530 fence_rep.handle = fence_handle;
2298e804 3531 fence_rep.seqno = fence->base.seqno;
8bf445ce
TH
3532 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3533 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3534 }
3535
3536 /*
3537 * copy_to_user errors will be detected by user space not
3538 * seeing fence_rep::error filled in. Typically
3539 * user-space would have pre-set that member to -EFAULT.
3540 */
3541 ret = copy_to_user(user_fence_rep, &fence_rep,
3542 sizeof(fence_rep));
3543
3544 /*
3545 * User-space lost the fence object. We need to sync
3546 * and unreference the handle.
3547 */
3548 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3549 ttm_ref_object_base_unref(vmw_fp->tfile,
3550 fence_handle, TTM_REF_USAGE);
3551 DRM_ERROR("Fence copy error. Syncing.\n");
c060a4e1 3552 (void) vmw_fence_obj_wait(fence, false, false,
8bf445ce
TH
3553 VMW_FENCE_WAIT_TIMEOUT);
3554 }
3555}
3556
3eab3d9e
TH
3557/**
3558 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3559 * the fifo.
3560 *
3561 * @dev_priv: Pointer to a device private structure.
3562 * @kernel_commands: Pointer to the unpatched command batch.
3563 * @command_size: Size of the unpatched command batch.
3564 * @sw_context: Structure holding the relocation lists.
3565 *
3566 * Side effects: If this function returns 0, then the command batch
3567 * pointed to by @kernel_commands will have been modified.
3568 */
3569static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3570 void *kernel_commands,
3571 u32 command_size,
3572 struct vmw_sw_context *sw_context)
3573{
d80efd5c 3574 void *cmd;
3eab3d9e 3575
d80efd5c
TH
3576 if (sw_context->dx_ctx_node)
3577 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3578 sw_context->dx_ctx_node->res->id);
3579 else
3580 cmd = vmw_fifo_reserve(dev_priv, command_size);
3eab3d9e
TH
3581 if (!cmd) {
3582 DRM_ERROR("Failed reserving fifo space for commands.\n");
3583 return -ENOMEM;
3584 }
18e4a466 3585
3eab3d9e
TH
3586 vmw_apply_relocations(sw_context);
3587 memcpy(cmd, kernel_commands, command_size);
3588 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3589 vmw_resource_relocations_free(&sw_context->res_relocations);
3590 vmw_fifo_commit(dev_priv, command_size);
3591
3592 return 0;
3593}
3594
3595/**
3596 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3597 * the command buffer manager.
3598 *
3599 * @dev_priv: Pointer to a device private structure.
3600 * @header: Opaque handle to the command buffer allocation.
3601 * @command_size: Size of the unpatched command batch.
3602 * @sw_context: Structure holding the relocation lists.
3603 *
3604 * Side effects: If this function returns 0, then the command buffer
3605 * represented by @header will have been modified.
3606 */
3607static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3608 struct vmw_cmdbuf_header *header,
3609 u32 command_size,
3610 struct vmw_sw_context *sw_context)
3611{
d80efd5c
TH
3612 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3613 SVGA3D_INVALID_ID);
3eab3d9e 3614 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
d80efd5c 3615 id, false, header);
3eab3d9e
TH
3616
3617 vmw_apply_relocations(sw_context);
3618 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3619 vmw_resource_relocations_free(&sw_context->res_relocations);
3620 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3621
3622 return 0;
3623}
3624
3625/**
3626 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3627 * submission using a command buffer.
3628 *
3629 * @dev_priv: Pointer to a device private structure.
3630 * @user_commands: User-space pointer to the commands to be submitted.
3631 * @command_size: Size of the unpatched command batch.
3632 * @header: Out parameter returning the opaque pointer to the command buffer.
3633 *
3634 * This function checks whether we can use the command buffer manager for
3635 * submission and if so, creates a command buffer of suitable size and
3636 * copies the user data into that buffer.
3637 *
3638 * On successful return, the function returns a pointer to the data in the
3639 * command buffer and *@header is set to non-NULL.
3640 * If command buffers could not be used, the function will return the value
3641 * of @kernel_commands on function call. That value may be NULL. In that case,
3642 * the value of *@header will be set to NULL.
3643 * If an error is encountered, the function will return a pointer error value.
3644 * If the function is interrupted by a signal while sleeping, it will return
3645 * -ERESTARTSYS casted to a pointer error value.
3646 */
b9eb1a61
TH
3647static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3648 void __user *user_commands,
3649 void *kernel_commands,
3650 u32 command_size,
3651 struct vmw_cmdbuf_header **header)
3eab3d9e
TH
3652{
3653 size_t cmdbuf_size;
3654 int ret;
3655
3656 *header = NULL;
3657 if (!dev_priv->cman || kernel_commands)
3658 return kernel_commands;
3659
3660 if (command_size > SVGA_CB_MAX_SIZE) {
3661 DRM_ERROR("Command buffer is too large.\n");
3662 return ERR_PTR(-EINVAL);
3663 }
3664
3665 /* If possible, add a little space for fencing. */
3666 cmdbuf_size = command_size + 512;
3667 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3668 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3669 true, header);
3670 if (IS_ERR(kernel_commands))
3671 return kernel_commands;
3672
3673 ret = copy_from_user(kernel_commands, user_commands,
3674 command_size);
3675 if (ret) {
3676 DRM_ERROR("Failed copying commands.\n");
3677 vmw_cmdbuf_header_free(*header);
3678 *header = NULL;
3679 return ERR_PTR(-EFAULT);
3680 }
3681
3682 return kernel_commands;
3683}
18e4a466 3684
d80efd5c
TH
3685static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3686 struct vmw_sw_context *sw_context,
3687 uint32_t handle)
3688{
3689 struct vmw_resource_val_node *ctx_node;
3690 struct vmw_resource *res;
3691 int ret;
3692
3693 if (handle == SVGA3D_INVALID_ID)
3694 return 0;
3695
3696 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3697 handle, user_context_converter,
3698 &res);
3699 if (unlikely(ret != 0)) {
3700 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3701 (unsigned) handle);
3702 return ret;
3703 }
3704
3705 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3706 if (unlikely(ret != 0))
3707 goto out_err;
3708
3709 sw_context->dx_ctx_node = ctx_node;
3710 sw_context->man = vmw_context_res_man(res);
3711out_err:
3712 vmw_resource_unreference(&res);
3713 return ret;
3714}
3715
922ade0d
TH
3716int vmw_execbuf_process(struct drm_file *file_priv,
3717 struct vmw_private *dev_priv,
3718 void __user *user_commands,
3719 void *kernel_commands,
3720 uint32_t command_size,
3721 uint64_t throttle_us,
d80efd5c 3722 uint32_t dx_context_handle,
bb1bd2f4
JB
3723 struct drm_vmw_fence_rep __user *user_fence_rep,
3724 struct vmw_fence_obj **out_fence)
fb1d9738 3725{
fb1d9738 3726 struct vmw_sw_context *sw_context = &dev_priv->ctx;
bb1bd2f4 3727 struct vmw_fence_obj *fence = NULL;
c0951b79
TH
3728 struct vmw_resource *error_resource;
3729 struct list_head resource_list;
3eab3d9e 3730 struct vmw_cmdbuf_header *header;
ecff665f 3731 struct ww_acquire_ctx ticket;
ae2a1040 3732 uint32_t handle;
922ade0d 3733 int ret;
fb1d9738 3734
2f633e5e 3735 if (throttle_us) {
3eab3d9e
TH
3736 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3737 throttle_us);
2f633e5e 3738
3eab3d9e
TH
3739 if (ret)
3740 return ret;
3741 }
2f633e5e 3742
3eab3d9e
TH
3743 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3744 kernel_commands, command_size,
3745 &header);
3746 if (IS_ERR(kernel_commands))
3747 return PTR_ERR(kernel_commands);
3748
922ade0d 3749 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3eab3d9e
TH
3750 if (ret) {
3751 ret = -ERESTARTSYS;
3752 goto out_free_header;
3753 }
fb1d9738 3754
3eab3d9e 3755 sw_context->kernel = false;
922ade0d 3756 if (kernel_commands == NULL) {
922ade0d
TH
3757 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3758 if (unlikely(ret != 0))
3759 goto out_unlock;
fb1d9738 3760
fb1d9738 3761
922ade0d
TH
3762 ret = copy_from_user(sw_context->cmd_bounce,
3763 user_commands, command_size);
3764
3765 if (unlikely(ret != 0)) {
3766 ret = -EFAULT;
3767 DRM_ERROR("Failed copying commands.\n");
3768 goto out_unlock;
3769 }
3770 kernel_commands = sw_context->cmd_bounce;
3eab3d9e 3771 } else if (!header)
922ade0d 3772 sw_context->kernel = true;
fb1d9738 3773
d5bde956 3774 sw_context->fp = vmw_fpriv(file_priv);
fb1d9738
JB
3775 sw_context->cur_reloc = 0;
3776 sw_context->cur_val_buf = 0;
f18c8840 3777 INIT_LIST_HEAD(&sw_context->resource_list);
d80efd5c 3778 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
e2fa3a76 3779 sw_context->cur_query_bo = dev_priv->pinned_bo;
c0951b79
TH
3780 sw_context->last_query_ctx = NULL;
3781 sw_context->needs_post_query_barrier = false;
d80efd5c 3782 sw_context->dx_ctx_node = NULL;
c0951b79 3783 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
fb1d9738 3784 INIT_LIST_HEAD(&sw_context->validate_nodes);
c0951b79 3785 INIT_LIST_HEAD(&sw_context->res_relocations);
d80efd5c
TH
3786 if (sw_context->staged_bindings)
3787 vmw_binding_state_reset(sw_context->staged_bindings);
3788
c0951b79
TH
3789 if (!sw_context->res_ht_initialized) {
3790 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3791 if (unlikely(ret != 0))
3792 goto out_unlock;
3793 sw_context->res_ht_initialized = true;
3794 }
18e4a466 3795 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
c0951b79 3796 INIT_LIST_HEAD(&resource_list);
d80efd5c
TH
3797 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3798 if (unlikely(ret != 0)) {
3799 list_splice_init(&sw_context->ctx_resource_list,
3800 &sw_context->resource_list);
3801 goto out_err_nores;
3802 }
3803
922ade0d
TH
3804 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3805 command_size);
be38ab6e 3806
2f633e5e
CL
3807 /*
3808 * Merge the resource lists before checking the return status
3809 * from vmd_cmd_check_all so that all the open hashtabs will
3810 * be handled properly even if vmw_cmd_check_all fails.
3811 */
d80efd5c
TH
3812 list_splice_init(&sw_context->ctx_resource_list,
3813 &sw_context->resource_list);
2f633e5e
CL
3814
3815 if (unlikely(ret != 0))
3816 goto out_err_nores;
3817
c0951b79
TH
3818 ret = vmw_resources_reserve(sw_context);
3819 if (unlikely(ret != 0))
cf5e3413 3820 goto out_err_nores;
c0951b79 3821
aa35071c
CK
3822 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
3823 true, NULL);
fb1d9738 3824 if (unlikely(ret != 0))
d80efd5c 3825 goto out_err_nores;
fb1d9738
JB
3826
3827 ret = vmw_validate_buffers(dev_priv, sw_context);
3828 if (unlikely(ret != 0))
3829 goto out_err;
3830
c0951b79
TH
3831 ret = vmw_resources_validate(sw_context);
3832 if (unlikely(ret != 0))
3833 goto out_err;
1925d456 3834
173fb7d4
TH
3835 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3836 if (unlikely(ret != 0)) {
3837 ret = -ERESTARTSYS;
3838 goto out_err;
3839 }
3840
30f82d81
TH
3841 if (dev_priv->has_mob) {
3842 ret = vmw_rebind_contexts(sw_context);
3843 if (unlikely(ret != 0))
b2ad9881 3844 goto out_unlock_binding;
30f82d81
TH
3845 }
3846
3eab3d9e
TH
3847 if (!header) {
3848 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3849 command_size, sw_context);
3850 } else {
3851 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3852 sw_context);
3853 header = NULL;
1925d456 3854 }
d80efd5c 3855 mutex_unlock(&dev_priv->binding_mutex);
3eab3d9e 3856 if (ret)
d80efd5c 3857 goto out_err;
fb1d9738 3858
e2fa3a76 3859 vmw_query_bo_switch_commit(dev_priv, sw_context);
ae2a1040
TH
3860 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
3861 &fence,
3862 (user_fence_rep) ? &handle : NULL);
fb1d9738
JB
3863 /*
3864 * This error is harmless, because if fence submission fails,
ae2a1040
TH
3865 * vmw_fifo_send_fence will sync. The error will be propagated to
3866 * user-space in @fence_rep
fb1d9738
JB
3867 */
3868
3869 if (ret != 0)
3870 DRM_ERROR("Fence submission error. Syncing.\n");
3871
d80efd5c
TH
3872 vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
3873 false);
173fb7d4 3874
ecff665f 3875 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
ae2a1040 3876 (void *) fence);
fb1d9738 3877
c0951b79
TH
3878 if (unlikely(dev_priv->pinned_bo != NULL &&
3879 !dev_priv->query_cid_valid))
3880 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
3881
ae2a1040 3882 vmw_clear_validations(sw_context);
8bf445ce
TH
3883 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
3884 user_fence_rep, fence, handle);
fb1d9738 3885
bb1bd2f4
JB
3886 /* Don't unreference when handing fence out */
3887 if (unlikely(out_fence != NULL)) {
3888 *out_fence = fence;
3889 fence = NULL;
3890 } else if (likely(fence != NULL)) {
ae2a1040 3891 vmw_fence_obj_unreference(&fence);
bb1bd2f4 3892 }
fb1d9738 3893
c0951b79 3894 list_splice_init(&sw_context->resource_list, &resource_list);
18e4a466 3895 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
922ade0d 3896 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
3897
3898 /*
3899 * Unreference resources outside of the cmdbuf_mutex to
3900 * avoid deadlocks in resource destruction paths.
3901 */
d80efd5c 3902 vmw_resource_list_unreference(sw_context, &resource_list);
c0951b79 3903
fb1d9738 3904 return 0;
922ade0d 3905
173fb7d4
TH
3906out_unlock_binding:
3907 mutex_unlock(&dev_priv->binding_mutex);
fb1d9738 3908out_err:
ecff665f 3909 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
cf5e3413 3910out_err_nores:
d80efd5c
TH
3911 vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
3912 true);
cf5e3413
TH
3913 vmw_resource_relocations_free(&sw_context->res_relocations);
3914 vmw_free_relocations(sw_context);
fb1d9738 3915 vmw_clear_validations(sw_context);
c0951b79
TH
3916 if (unlikely(dev_priv->pinned_bo != NULL &&
3917 !dev_priv->query_cid_valid))
3918 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
fb1d9738 3919out_unlock:
c0951b79
TH
3920 list_splice_init(&sw_context->resource_list, &resource_list);
3921 error_resource = sw_context->error_resource;
3922 sw_context->error_resource = NULL;
18e4a466 3923 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
fb1d9738 3924 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
3925
3926 /*
3927 * Unreference resources outside of the cmdbuf_mutex to
3928 * avoid deadlocks in resource destruction paths.
3929 */
d80efd5c 3930 vmw_resource_list_unreference(sw_context, &resource_list);
c0951b79
TH
3931 if (unlikely(error_resource != NULL))
3932 vmw_resource_unreference(&error_resource);
3eab3d9e
TH
3933out_free_header:
3934 if (header)
3935 vmw_cmdbuf_header_free(header);
c0951b79 3936
922ade0d
TH
3937 return ret;
3938}
3939
e2fa3a76
TH
3940/**
3941 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3942 *
3943 * @dev_priv: The device private structure.
3944 *
3945 * This function is called to idle the fifo and unpin the query buffer
3946 * if the normal way to do this hits an error, which should typically be
3947 * extremely rare.
3948 */
3949static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3950{
3951 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
3952
3953 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
459d0fa7
TH
3954 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3955 if (dev_priv->dummy_query_bo_pinned) {
3956 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3957 dev_priv->dummy_query_bo_pinned = false;
3958 }
e2fa3a76
TH
3959}
3960
3961
3962/**
c0951b79 3963 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
e2fa3a76
TH
3964 * query bo.
3965 *
3966 * @dev_priv: The device private structure.
c0951b79
TH
3967 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
3968 * _after_ a query barrier that flushes all queries touching the current
3969 * buffer pointed to by @dev_priv->pinned_bo
e2fa3a76
TH
3970 *
3971 * This function should be used to unpin the pinned query bo, or
3972 * as a query barrier when we need to make sure that all queries have
3973 * finished before the next fifo command. (For example on hardware
3974 * context destructions where the hardware may otherwise leak unfinished
3975 * queries).
3976 *
3977 * This function does not return any failure codes, but make attempts
3978 * to do safe unpinning in case of errors.
3979 *
3980 * The function will synchronize on the previous query barrier, and will
3981 * thus not finish until that barrier has executed.
c0951b79
TH
3982 *
3983 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
3984 * before calling this function.
e2fa3a76 3985 */
c0951b79
TH
3986void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
3987 struct vmw_fence_obj *fence)
e2fa3a76
TH
3988{
3989 int ret = 0;
3990 struct list_head validate_list;
3991 struct ttm_validate_buffer pinned_val, query_val;
c0951b79 3992 struct vmw_fence_obj *lfence = NULL;
ecff665f 3993 struct ww_acquire_ctx ticket;
e2fa3a76
TH
3994
3995 if (dev_priv->pinned_bo == NULL)
3996 goto out_unlock;
3997
e2fa3a76
TH
3998 INIT_LIST_HEAD(&validate_list);
3999
459d0fa7 4000 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
ae9c0af2 4001 pinned_val.shared = false;
e2fa3a76
TH
4002 list_add_tail(&pinned_val.head, &validate_list);
4003
459d0fa7 4004 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
ae9c0af2 4005 query_val.shared = false;
e2fa3a76
TH
4006 list_add_tail(&query_val.head, &validate_list);
4007
aa35071c
CK
4008 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4009 false, NULL);
e2fa3a76
TH
4010 if (unlikely(ret != 0)) {
4011 vmw_execbuf_unpin_panic(dev_priv);
4012 goto out_no_reserve;
4013 }
4014
c0951b79
TH
4015 if (dev_priv->query_cid_valid) {
4016 BUG_ON(fence != NULL);
4017 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4018 if (unlikely(ret != 0)) {
4019 vmw_execbuf_unpin_panic(dev_priv);
4020 goto out_no_emit;
4021 }
4022 dev_priv->query_cid_valid = false;
e2fa3a76
TH
4023 }
4024
459d0fa7
TH
4025 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4026 if (dev_priv->dummy_query_bo_pinned) {
4027 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4028 dev_priv->dummy_query_bo_pinned = false;
4029 }
c0951b79
TH
4030 if (fence == NULL) {
4031 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4032 NULL);
4033 fence = lfence;
4034 }
ecff665f 4035 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
c0951b79
TH
4036 if (lfence != NULL)
4037 vmw_fence_obj_unreference(&lfence);
e2fa3a76
TH
4038
4039 ttm_bo_unref(&query_val.bo);
4040 ttm_bo_unref(&pinned_val.bo);
459d0fa7
TH
4041 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4042 DRM_INFO("Dummy query bo pin count: %d\n",
4043 dev_priv->dummy_query_bo->pin_count);
e2fa3a76
TH
4044
4045out_unlock:
e2fa3a76
TH
4046 return;
4047
4048out_no_emit:
ecff665f 4049 ttm_eu_backoff_reservation(&ticket, &validate_list);
e2fa3a76
TH
4050out_no_reserve:
4051 ttm_bo_unref(&query_val.bo);
4052 ttm_bo_unref(&pinned_val.bo);
459d0fa7 4053 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
c0951b79
TH
4054}
4055
4056/**
4057 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4058 * query bo.
4059 *
4060 * @dev_priv: The device private structure.
4061 *
4062 * This function should be used to unpin the pinned query bo, or
4063 * as a query barrier when we need to make sure that all queries have
4064 * finished before the next fifo command. (For example on hardware
4065 * context destructions where the hardware may otherwise leak unfinished
4066 * queries).
4067 *
4068 * This function does not return any failure codes, but make attempts
4069 * to do safe unpinning in case of errors.
4070 *
4071 * The function will synchronize on the previous query barrier, and will
4072 * thus not finish until that barrier has executed.
4073 */
4074void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4075{
4076 mutex_lock(&dev_priv->cmdbuf_mutex);
4077 if (dev_priv->query_cid_valid)
4078 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
e2fa3a76
TH
4079 mutex_unlock(&dev_priv->cmdbuf_mutex);
4080}
4081
d80efd5c
TH
4082int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4083 struct drm_file *file_priv, size_t size)
922ade0d
TH
4084{
4085 struct vmw_private *dev_priv = vmw_priv(dev);
d80efd5c 4086 struct drm_vmw_execbuf_arg arg;
922ade0d 4087 int ret;
d80efd5c
TH
4088 static const size_t copy_offset[] = {
4089 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4090 sizeof(struct drm_vmw_execbuf_arg)};
4091
4092 if (unlikely(size < copy_offset[0])) {
4093 DRM_ERROR("Invalid command size, ioctl %d\n",
4094 DRM_VMW_EXECBUF);
4095 return -EINVAL;
4096 }
4097
4098 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4099 return -EFAULT;
922ade0d
TH
4100
4101 /*
d80efd5c 4102 * Extend the ioctl argument while
922ade0d
TH
4103 * maintaining backwards compatibility:
4104 * We take different code paths depending on the value of
d80efd5c 4105 * arg.version.
922ade0d
TH
4106 */
4107
d80efd5c
TH
4108 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4109 arg.version == 0)) {
922ade0d 4110 DRM_ERROR("Incorrect execbuf version.\n");
922ade0d
TH
4111 return -EINVAL;
4112 }
4113
d80efd5c
TH
4114 if (arg.version > 1 &&
4115 copy_from_user(&arg.context_handle,
4116 (void __user *) (data + copy_offset[0]),
4117 copy_offset[arg.version - 1] -
4118 copy_offset[0]) != 0)
4119 return -EFAULT;
4120
4121 switch (arg.version) {
4122 case 1:
4123 arg.context_handle = (uint32_t) -1;
4124 break;
4125 case 2:
4126 if (arg.pad64 != 0) {
4127 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4128 return -EINVAL;
4129 }
4130 break;
4131 default:
4132 break;
4133 }
4134
294adf7d 4135 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
922ade0d
TH
4136 if (unlikely(ret != 0))
4137 return ret;
4138
4139 ret = vmw_execbuf_process(file_priv, dev_priv,
d80efd5c
TH
4140 (void __user *)(unsigned long)arg.commands,
4141 NULL, arg.command_size, arg.throttle_us,
4142 arg.context_handle,
4143 (void __user *)(unsigned long)arg.fence_rep,
bb1bd2f4 4144 NULL);
5151adb3 4145 ttm_read_unlock(&dev_priv->reservation_sem);
922ade0d 4146 if (unlikely(ret != 0))
5151adb3 4147 return ret;
922ade0d
TH
4148
4149 vmw_kms_cursor_post_execbuf(dev_priv);
4150
5151adb3 4151 return 0;
fb1d9738 4152}