drm/vmwgfx: Initial DX support
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
760285e7
DH
30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
d80efd5c
TH
32#include "vmwgfx_so.h"
33#include "vmwgfx_binding.h"
fb1d9738 34
c0951b79
TH
35#define VMW_RES_HT_ORDER 12
36
37/**
38 * struct vmw_resource_relocation - Relocation info for resources
39 *
40 * @head: List head for the software context's relocation list.
41 * @res: Non-ref-counted pointer to the resource.
42 * @offset: Offset of 4 byte entries into the command buffer where the
43 * id that needs fixup is located.
44 */
45struct vmw_resource_relocation {
46 struct list_head head;
47 const struct vmw_resource *res;
48 unsigned long offset;
49};
50
51/**
52 * struct vmw_resource_val_node - Validation info for resources
53 *
54 * @head: List head for the software context's resource list.
55 * @hash: Hash entry for quick resouce to val_node lookup.
56 * @res: Ref-counted pointer to the resource.
57 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
58 * @new_backup: Refcounted pointer to the new backup buffer.
b5c3b1a6
TH
59 * @staged_bindings: If @res is a context, tracks bindings set up during
60 * the command batch. Otherwise NULL.
c0951b79
TH
61 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
62 * @first_usage: Set to true the first time the resource is referenced in
63 * the command stream.
d80efd5c
TH
64 * @switching_backup: The command stream provides a new backup buffer for a
65 * resource.
66 * @no_buffer_needed: This means @switching_backup is true on first buffer
67 * reference. So resource reservation does not need to allocate a backup
68 * buffer for the resource.
c0951b79
TH
69 */
70struct vmw_resource_val_node {
71 struct list_head head;
72 struct drm_hash_item hash;
73 struct vmw_resource *res;
74 struct vmw_dma_buffer *new_backup;
b5c3b1a6 75 struct vmw_ctx_binding_state *staged_bindings;
c0951b79 76 unsigned long new_backup_offset;
d80efd5c
TH
77 u32 first_usage : 1;
78 u32 switching_backup : 1;
79 u32 no_buffer_needed : 1;
c0951b79
TH
80};
81
c373d4ea
TH
82/**
83 * struct vmw_cmd_entry - Describe a command for the verifier
84 *
85 * @user_allow: Whether allowed from the execbuf ioctl.
86 * @gb_disable: Whether disabled if guest-backed objects are available.
87 * @gb_enable: Whether enabled iff guest-backed objects are available.
88 */
89struct vmw_cmd_entry {
90 int (*func) (struct vmw_private *, struct vmw_sw_context *,
91 SVGA3dCmdHeader *);
92 bool user_allow;
93 bool gb_disable;
94 bool gb_enable;
95};
96
97#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
98 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
99 (_gb_disable), (_gb_enable)}
100
d80efd5c
TH
101static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102 struct vmw_sw_context *sw_context,
103 struct vmw_resource *ctx);
104
c0951b79
TH
105/**
106 * vmw_resource_unreserve - unreserve resources previously reserved for
107 * command submission.
108 *
109 * @list_head: list of resources to unreserve.
110 * @backoff: Whether command submission failed.
111 */
d80efd5c
TH
112static void vmw_resource_list_unreserve(struct vmw_sw_context *sw_context,
113 struct list_head *list,
c0951b79
TH
114 bool backoff)
115{
116 struct vmw_resource_val_node *val;
117
118 list_for_each_entry(val, list, head) {
119 struct vmw_resource *res = val->res;
d80efd5c
TH
120 bool switch_backup =
121 (backoff) ? false : val->switching_backup;
c0951b79 122
173fb7d4
TH
123 /*
124 * Transfer staged context bindings to the
125 * persistent context binding tracker.
126 */
b5c3b1a6 127 if (unlikely(val->staged_bindings)) {
76c7d18b 128 if (!backoff) {
d80efd5c
TH
129 vmw_binding_state_commit
130 (vmw_context_binding_state(val->res),
131 val->staged_bindings);
76c7d18b 132 }
d80efd5c
TH
133
134 if (val->staged_bindings != sw_context->staged_bindings)
135 vmw_binding_state_free(val->staged_bindings);
136 else
137 sw_context->staged_bindings_inuse = false;
b5c3b1a6
TH
138 val->staged_bindings = NULL;
139 }
d80efd5c
TH
140 vmw_resource_unreserve(res, switch_backup, val->new_backup,
141 val->new_backup_offset);
c0951b79
TH
142 vmw_dmabuf_unreference(&val->new_backup);
143 }
144}
145
d80efd5c
TH
146/**
147 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
148 * added to the validate list.
149 *
150 * @dev_priv: Pointer to the device private:
151 * @sw_context: The validation context:
152 * @node: The validation node holding this context.
153 */
154static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
155 struct vmw_sw_context *sw_context,
156 struct vmw_resource_val_node *node)
157{
158 int ret;
159
160 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
161 if (unlikely(ret != 0))
162 goto out_err;
163
164 if (!sw_context->staged_bindings) {
165 sw_context->staged_bindings =
166 vmw_binding_state_alloc(dev_priv);
167 if (IS_ERR(sw_context->staged_bindings)) {
168 DRM_ERROR("Failed to allocate context binding "
169 "information.\n");
170 ret = PTR_ERR(sw_context->staged_bindings);
171 sw_context->staged_bindings = NULL;
172 goto out_err;
173 }
174 }
175
176 if (sw_context->staged_bindings_inuse) {
177 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
178 if (IS_ERR(node->staged_bindings)) {
179 DRM_ERROR("Failed to allocate context binding "
180 "information.\n");
181 ret = PTR_ERR(node->staged_bindings);
182 node->staged_bindings = NULL;
183 goto out_err;
184 }
185 } else {
186 node->staged_bindings = sw_context->staged_bindings;
187 sw_context->staged_bindings_inuse = true;
188 }
189
190 return 0;
191out_err:
192 return ret;
193}
c0951b79
TH
194
195/**
196 * vmw_resource_val_add - Add a resource to the software context's
197 * resource list if it's not already on it.
198 *
199 * @sw_context: Pointer to the software context.
200 * @res: Pointer to the resource.
201 * @p_node On successful return points to a valid pointer to a
202 * struct vmw_resource_val_node, if non-NULL on entry.
203 */
204static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
205 struct vmw_resource *res,
206 struct vmw_resource_val_node **p_node)
207{
d80efd5c 208 struct vmw_private *dev_priv = res->dev_priv;
c0951b79
TH
209 struct vmw_resource_val_node *node;
210 struct drm_hash_item *hash;
211 int ret;
212
213 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
214 &hash) == 0)) {
215 node = container_of(hash, struct vmw_resource_val_node, hash);
216 node->first_usage = false;
217 if (unlikely(p_node != NULL))
218 *p_node = node;
219 return 0;
220 }
221
222 node = kzalloc(sizeof(*node), GFP_KERNEL);
223 if (unlikely(node == NULL)) {
224 DRM_ERROR("Failed to allocate a resource validation "
225 "entry.\n");
226 return -ENOMEM;
227 }
228
229 node->hash.key = (unsigned long) res;
230 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
231 if (unlikely(ret != 0)) {
232 DRM_ERROR("Failed to initialize a resource validation "
233 "entry.\n");
234 kfree(node);
235 return ret;
236 }
c0951b79
TH
237 node->res = vmw_resource_reference(res);
238 node->first_usage = true;
c0951b79
TH
239 if (unlikely(p_node != NULL))
240 *p_node = node;
241
d80efd5c
TH
242 if (!dev_priv->has_mob) {
243 list_add_tail(&node->head, &sw_context->resource_list);
244 return 0;
245 }
246
247 switch (vmw_res_type(res)) {
248 case vmw_res_context:
249 case vmw_res_dx_context:
250 list_add(&node->head, &sw_context->ctx_resource_list);
251 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
252 break;
253 case vmw_res_cotable:
254 list_add_tail(&node->head, &sw_context->ctx_resource_list);
255 break;
256 default:
257 list_add_tail(&node->head, &sw_context->resource_list);
258 break;
259 }
260
261 return ret;
262}
263
264/**
265 * vmw_view_res_val_add - Add a view and the surface it's pointing to
266 * to the validation list
267 *
268 * @sw_context: The software context holding the validation list.
269 * @view: Pointer to the view resource.
270 *
271 * Returns 0 if success, negative error code otherwise.
272 */
273static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
274 struct vmw_resource *view)
275{
276 int ret;
277
278 /*
279 * First add the resource the view is pointing to, otherwise
280 * it may be swapped out when the view is validated.
281 */
282 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
283 if (ret)
284 return ret;
285
286 return vmw_resource_val_add(sw_context, view, NULL);
287}
288
289/**
290 * vmw_view_id_val_add - Look up a view and add it and the surface it's
291 * pointing to to the validation list.
292 *
293 * @sw_context: The software context holding the validation list.
294 * @view_type: The view type to look up.
295 * @id: view id of the view.
296 *
297 * The view is represented by a view id and the DX context it's created on,
298 * or scheduled for creation on. If there is no DX context set, the function
299 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
300 */
301static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
302 enum vmw_view_type view_type, u32 id)
303{
304 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
305 struct vmw_resource *view;
306 int ret;
307
308 if (!ctx_node) {
309 DRM_ERROR("DX Context not set.\n");
310 return -EINVAL;
311 }
312
313 view = vmw_view_lookup(sw_context->man, view_type, id);
314 if (IS_ERR(view))
315 return PTR_ERR(view);
316
317 ret = vmw_view_res_val_add(sw_context, view);
318 vmw_resource_unreference(&view);
319
320 return ret;
c0951b79
TH
321}
322
30f82d81
TH
323/**
324 * vmw_resource_context_res_add - Put resources previously bound to a context on
325 * the validation list
326 *
327 * @dev_priv: Pointer to a device private structure
328 * @sw_context: Pointer to a software context used for this command submission
329 * @ctx: Pointer to the context resource
330 *
331 * This function puts all resources that were previously bound to @ctx on
332 * the resource validation list. This is part of the context state reemission
333 */
334static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
335 struct vmw_sw_context *sw_context,
336 struct vmw_resource *ctx)
337{
338 struct list_head *binding_list;
d80efd5c 339 struct vmw_ctx_bindinfo *entry;
30f82d81
TH
340 int ret = 0;
341 struct vmw_resource *res;
d80efd5c
TH
342 u32 i;
343
344 /* Add all cotables to the validation list. */
345 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
346 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
347 res = vmw_context_cotable(ctx, i);
348 if (IS_ERR(res))
349 continue;
350
351 ret = vmw_resource_val_add(sw_context, res, NULL);
352 vmw_resource_unreference(&res);
353 if (unlikely(ret != 0))
354 return ret;
355 }
356 }
357
30f82d81 358
d80efd5c 359 /* Add all resources bound to the context to the validation list */
30f82d81
TH
360 mutex_lock(&dev_priv->binding_mutex);
361 binding_list = vmw_context_binding_list(ctx);
362
363 list_for_each_entry(entry, binding_list, ctx_list) {
d80efd5c
TH
364 /* entry->res is not refcounted */
365 res = vmw_resource_reference_unless_doomed(entry->res);
30f82d81
TH
366 if (unlikely(res == NULL))
367 continue;
368
d80efd5c
TH
369 if (vmw_res_type(entry->res) == vmw_res_view)
370 ret = vmw_view_res_val_add(sw_context, entry->res);
371 else
372 ret = vmw_resource_val_add(sw_context, entry->res,
373 NULL);
30f82d81
TH
374 vmw_resource_unreference(&res);
375 if (unlikely(ret != 0))
376 break;
377 }
378
379 mutex_unlock(&dev_priv->binding_mutex);
380 return ret;
381}
382
c0951b79
TH
383/**
384 * vmw_resource_relocation_add - Add a relocation to the relocation list
385 *
386 * @list: Pointer to head of relocation list.
387 * @res: The resource.
388 * @offset: Offset into the command buffer currently being parsed where the
389 * id that needs fixup is located. Granularity is 4 bytes.
390 */
391static int vmw_resource_relocation_add(struct list_head *list,
392 const struct vmw_resource *res,
393 unsigned long offset)
394{
395 struct vmw_resource_relocation *rel;
396
397 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
398 if (unlikely(rel == NULL)) {
399 DRM_ERROR("Failed to allocate a resource relocation.\n");
400 return -ENOMEM;
401 }
402
403 rel->res = res;
404 rel->offset = offset;
405 list_add_tail(&rel->head, list);
406
407 return 0;
408}
409
410/**
411 * vmw_resource_relocations_free - Free all relocations on a list
412 *
413 * @list: Pointer to the head of the relocation list.
414 */
415static void vmw_resource_relocations_free(struct list_head *list)
416{
417 struct vmw_resource_relocation *rel, *n;
418
419 list_for_each_entry_safe(rel, n, list, head) {
420 list_del(&rel->head);
421 kfree(rel);
422 }
423}
424
425/**
426 * vmw_resource_relocations_apply - Apply all relocations on a list
427 *
428 * @cb: Pointer to the start of the command buffer bein patch. This need
429 * not be the same buffer as the one being parsed when the relocation
430 * list was built, but the contents must be the same modulo the
431 * resource ids.
432 * @list: Pointer to the head of the relocation list.
433 */
434static void vmw_resource_relocations_apply(uint32_t *cb,
435 struct list_head *list)
436{
437 struct vmw_resource_relocation *rel;
438
d5bde956
TH
439 list_for_each_entry(rel, list, head) {
440 if (likely(rel->res != NULL))
441 cb[rel->offset] = rel->res->id;
442 else
443 cb[rel->offset] = SVGA_3D_CMD_NOP;
444 }
c0951b79
TH
445}
446
fb1d9738
JB
447static int vmw_cmd_invalid(struct vmw_private *dev_priv,
448 struct vmw_sw_context *sw_context,
449 SVGA3dCmdHeader *header)
450{
451 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
452}
453
454static int vmw_cmd_ok(struct vmw_private *dev_priv,
455 struct vmw_sw_context *sw_context,
456 SVGA3dCmdHeader *header)
457{
458 return 0;
459}
460
e2fa3a76
TH
461/**
462 * vmw_bo_to_validate_list - add a bo to a validate list
463 *
464 * @sw_context: The software context used for this command submission batch.
465 * @bo: The buffer object to add.
96c5f0df 466 * @validate_as_mob: Validate this buffer as a MOB.
e2fa3a76
TH
467 * @p_val_node: If non-NULL Will be updated with the validate node number
468 * on return.
469 *
470 * Returns -EINVAL if the limit of number of buffer objects per command
471 * submission is reached.
472 */
473static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
459d0fa7 474 struct vmw_dma_buffer *vbo,
96c5f0df 475 bool validate_as_mob,
e2fa3a76
TH
476 uint32_t *p_val_node)
477{
478 uint32_t val_node;
c0951b79 479 struct vmw_validate_buffer *vval_buf;
e2fa3a76 480 struct ttm_validate_buffer *val_buf;
c0951b79
TH
481 struct drm_hash_item *hash;
482 int ret;
e2fa3a76 483
459d0fa7 484 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
c0951b79
TH
485 &hash) == 0)) {
486 vval_buf = container_of(hash, struct vmw_validate_buffer,
487 hash);
96c5f0df
TH
488 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
489 DRM_ERROR("Inconsistent buffer usage.\n");
490 return -EINVAL;
491 }
c0951b79
TH
492 val_buf = &vval_buf->base;
493 val_node = vval_buf - sw_context->val_bufs;
494 } else {
495 val_node = sw_context->cur_val_buf;
496 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
497 DRM_ERROR("Max number of DMA buffers per submission "
498 "exceeded.\n");
499 return -EINVAL;
500 }
501 vval_buf = &sw_context->val_bufs[val_node];
459d0fa7 502 vval_buf->hash.key = (unsigned long) vbo;
c0951b79
TH
503 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
504 if (unlikely(ret != 0)) {
505 DRM_ERROR("Failed to initialize a buffer validation "
506 "entry.\n");
507 return ret;
508 }
509 ++sw_context->cur_val_buf;
510 val_buf = &vval_buf->base;
459d0fa7 511 val_buf->bo = ttm_bo_reference(&vbo->base);
ae9c0af2 512 val_buf->shared = false;
e2fa3a76 513 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
96c5f0df 514 vval_buf->validate_as_mob = validate_as_mob;
e2fa3a76
TH
515 }
516
e2fa3a76
TH
517 if (p_val_node)
518 *p_val_node = val_node;
519
520 return 0;
521}
522
c0951b79
TH
523/**
524 * vmw_resources_reserve - Reserve all resources on the sw_context's
525 * resource list.
526 *
527 * @sw_context: Pointer to the software context.
528 *
529 * Note that since vmware's command submission currently is protected by
530 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
531 * since only a single thread at once will attempt this.
532 */
533static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
fb1d9738 534{
c0951b79 535 struct vmw_resource_val_node *val;
fb1d9738
JB
536 int ret;
537
c0951b79
TH
538 list_for_each_entry(val, &sw_context->resource_list, head) {
539 struct vmw_resource *res = val->res;
fb1d9738 540
1a4b172a 541 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
c0951b79
TH
542 if (unlikely(ret != 0))
543 return ret;
544
545 if (res->backup) {
459d0fa7 546 struct vmw_dma_buffer *vbo = res->backup;
c0951b79
TH
547
548 ret = vmw_bo_to_validate_list
459d0fa7 549 (sw_context, vbo,
96c5f0df 550 vmw_resource_needs_backup(res), NULL);
c0951b79
TH
551
552 if (unlikely(ret != 0))
553 return ret;
554 }
fb1d9738 555 }
c0951b79
TH
556 return 0;
557}
fb1d9738 558
c0951b79
TH
559/**
560 * vmw_resources_validate - Validate all resources on the sw_context's
561 * resource list.
562 *
563 * @sw_context: Pointer to the software context.
564 *
565 * Before this function is called, all resource backup buffers must have
566 * been validated.
567 */
568static int vmw_resources_validate(struct vmw_sw_context *sw_context)
569{
570 struct vmw_resource_val_node *val;
571 int ret;
572
573 list_for_each_entry(val, &sw_context->resource_list, head) {
574 struct vmw_resource *res = val->res;
d80efd5c 575 struct vmw_dma_buffer *backup = res->backup;
f18c8840 576
c0951b79
TH
577 ret = vmw_resource_validate(res);
578 if (unlikely(ret != 0)) {
579 if (ret != -ERESTARTSYS)
580 DRM_ERROR("Failed to validate resource.\n");
581 return ret;
582 }
d80efd5c
TH
583
584 /* Check if the resource switched backup buffer */
585 if (backup && res->backup && (backup != res->backup)) {
586 struct vmw_dma_buffer *vbo = res->backup;
587
588 ret = vmw_bo_to_validate_list
589 (sw_context, vbo,
590 vmw_resource_needs_backup(res), NULL);
591 if (ret) {
592 ttm_bo_unreserve(&vbo->base);
593 return ret;
594 }
595 }
c0951b79 596 }
f18c8840 597 return 0;
fb1d9738
JB
598}
599
18e4a466
TH
600/**
601 * vmw_cmd_res_reloc_add - Add a resource to a software context's
602 * relocation- and validation lists.
603 *
604 * @dev_priv: Pointer to a struct vmw_private identifying the device.
605 * @sw_context: Pointer to the software context.
18e4a466
TH
606 * @id_loc: Pointer to where the id that needs translation is located.
607 * @res: Valid pointer to a struct vmw_resource.
608 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
609 * used for this resource is returned here.
610 */
611static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
612 struct vmw_sw_context *sw_context,
18e4a466
TH
613 uint32_t *id_loc,
614 struct vmw_resource *res,
615 struct vmw_resource_val_node **p_val)
616{
617 int ret;
618 struct vmw_resource_val_node *node;
619
620 *p_val = NULL;
621 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
622 res,
623 id_loc - sw_context->buf_start);
624 if (unlikely(ret != 0))
9f9cb84f 625 return ret;
18e4a466
TH
626
627 ret = vmw_resource_val_add(sw_context, res, &node);
628 if (unlikely(ret != 0))
9f9cb84f 629 return ret;
18e4a466 630
18e4a466
TH
631 if (p_val)
632 *p_val = node;
633
9f9cb84f 634 return 0;
18e4a466
TH
635}
636
637
c0951b79 638/**
18e4a466 639 * vmw_cmd_res_check - Check that a resource is present and if so, put it
c0951b79
TH
640 * on the resource validate list unless it's already there.
641 *
642 * @dev_priv: Pointer to a device private structure.
643 * @sw_context: Pointer to the software context.
644 * @res_type: Resource type.
645 * @converter: User-space visisble type specific information.
d5bde956 646 * @id_loc: Pointer to the location in the command buffer currently being
c0951b79 647 * parsed from where the user-space resource id handle is located.
d5bde956
TH
648 * @p_val: Pointer to pointer to resource validalidation node. Populated
649 * on exit.
c0951b79 650 */
d5bde956 651static int
18e4a466
TH
652vmw_cmd_res_check(struct vmw_private *dev_priv,
653 struct vmw_sw_context *sw_context,
654 enum vmw_res_type res_type,
655 const struct vmw_user_resource_conv *converter,
656 uint32_t *id_loc,
657 struct vmw_resource_val_node **p_val)
fb1d9738 658{
c0951b79
TH
659 struct vmw_res_cache_entry *rcache =
660 &sw_context->res_cache[res_type];
be38ab6e 661 struct vmw_resource *res;
c0951b79
TH
662 struct vmw_resource_val_node *node;
663 int ret;
be38ab6e 664
18e4a466 665 if (*id_loc == SVGA3D_INVALID_ID) {
b5c3b1a6
TH
666 if (p_val)
667 *p_val = NULL;
668 if (res_type == vmw_res_context) {
669 DRM_ERROR("Illegal context invalid id.\n");
670 return -EINVAL;
671 }
7a73ba74 672 return 0;
b5c3b1a6 673 }
7a73ba74 674
c0951b79
TH
675 /*
676 * Fastpath in case of repeated commands referencing the same
677 * resource
678 */
7a73ba74 679
18e4a466 680 if (likely(rcache->valid && *id_loc == rcache->handle)) {
c0951b79
TH
681 const struct vmw_resource *res = rcache->res;
682
683 rcache->node->first_usage = false;
684 if (p_val)
685 *p_val = rcache->node;
686
687 return vmw_resource_relocation_add
688 (&sw_context->res_relocations, res,
d5bde956 689 id_loc - sw_context->buf_start);
be38ab6e
TH
690 }
691
c0951b79 692 ret = vmw_user_resource_lookup_handle(dev_priv,
d5bde956 693 sw_context->fp->tfile,
18e4a466 694 *id_loc,
c0951b79
TH
695 converter,
696 &res);
5bb39e81 697 if (unlikely(ret != 0)) {
c0951b79 698 DRM_ERROR("Could not find or use resource 0x%08x.\n",
18e4a466 699 (unsigned) *id_loc);
c0951b79 700 dump_stack();
5bb39e81
TH
701 return ret;
702 }
703
c0951b79
TH
704 rcache->valid = true;
705 rcache->res = res;
18e4a466 706 rcache->handle = *id_loc;
c0951b79 707
d80efd5c 708 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
18e4a466 709 res, &node);
c0951b79
TH
710 if (unlikely(ret != 0))
711 goto out_no_reloc;
f18c8840 712
c0951b79
TH
713 rcache->node = node;
714 if (p_val)
715 *p_val = node;
716 vmw_resource_unreference(&res);
f18c8840 717 return 0;
c0951b79
TH
718
719out_no_reloc:
720 BUG_ON(sw_context->error_resource != NULL);
721 sw_context->error_resource = res;
722
723 return ret;
fb1d9738
JB
724}
725
30f82d81
TH
726/**
727 * vmw_rebind_contexts - Rebind all resources previously bound to
728 * referenced contexts.
729 *
730 * @sw_context: Pointer to the software context.
731 *
732 * Rebind context binding points that have been scrubbed because of eviction.
733 */
734static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
735{
736 struct vmw_resource_val_node *val;
737 int ret;
738
739 list_for_each_entry(val, &sw_context->resource_list, head) {
18e4a466
TH
740 if (unlikely(!val->staged_bindings))
741 break;
30f82d81 742
d80efd5c
TH
743 ret = vmw_binding_rebind_all
744 (vmw_context_binding_state(val->res));
30f82d81
TH
745 if (unlikely(ret != 0)) {
746 if (ret != -ERESTARTSYS)
747 DRM_ERROR("Failed to rebind context.\n");
748 return ret;
749 }
750 }
751
752 return 0;
753}
754
d80efd5c
TH
755/**
756 * vmw_view_bindings_add - Add an array of view bindings to a context
757 * binding state tracker.
758 *
759 * @sw_context: The execbuf state used for this command.
760 * @view_type: View type for the bindings.
761 * @binding_type: Binding type for the bindings.
762 * @shader_slot: The shader slot to user for the bindings.
763 * @view_ids: Array of view ids to be bound.
764 * @num_views: Number of view ids in @view_ids.
765 * @first_slot: The binding slot to be used for the first view id in @view_ids.
766 */
767static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
768 enum vmw_view_type view_type,
769 enum vmw_ctx_binding_type binding_type,
770 uint32 shader_slot,
771 uint32 view_ids[], u32 num_views,
772 u32 first_slot)
773{
774 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
775 struct vmw_cmdbuf_res_manager *man;
776 u32 i;
777 int ret;
778
779 if (!ctx_node) {
780 DRM_ERROR("DX Context not set.\n");
781 return -EINVAL;
782 }
783
784 man = sw_context->man;
785 for (i = 0; i < num_views; ++i) {
786 struct vmw_ctx_bindinfo_view binding;
787 struct vmw_resource *view = NULL;
788
789 if (view_ids[i] != SVGA3D_INVALID_ID) {
790 view = vmw_view_lookup(man, view_type, view_ids[i]);
791 if (IS_ERR(view)) {
792 DRM_ERROR("View not found.\n");
793 return PTR_ERR(view);
794 }
795
796 ret = vmw_view_res_val_add(sw_context, view);
797 if (ret) {
798 DRM_ERROR("Could not add view to "
799 "validation list.\n");
800 vmw_resource_unreference(&view);
801 return ret;
802 }
803 }
804 binding.bi.ctx = ctx_node->res;
805 binding.bi.res = view;
806 binding.bi.bt = binding_type;
807 binding.shader_slot = shader_slot;
808 binding.slot = first_slot + i;
809 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
810 shader_slot, binding.slot);
811 if (view)
812 vmw_resource_unreference(&view);
813 }
814
815 return 0;
816}
817
c0951b79
TH
818/**
819 * vmw_cmd_cid_check - Check a command header for valid context information.
820 *
821 * @dev_priv: Pointer to a device private structure.
822 * @sw_context: Pointer to the software context.
823 * @header: A command header with an embedded user-space context handle.
824 *
825 * Convenience function: Call vmw_cmd_res_check with the user-space context
826 * handle embedded in @header.
827 */
828static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
829 struct vmw_sw_context *sw_context,
830 SVGA3dCmdHeader *header)
831{
832 struct vmw_cid_cmd {
833 SVGA3dCmdHeader header;
8e67bbbc 834 uint32_t cid;
c0951b79
TH
835 } *cmd;
836
837 cmd = container_of(header, struct vmw_cid_cmd, header);
838 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
839 user_context_converter, &cmd->cid, NULL);
840}
fb1d9738
JB
841
842static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
843 struct vmw_sw_context *sw_context,
844 SVGA3dCmdHeader *header)
845{
846 struct vmw_sid_cmd {
847 SVGA3dCmdHeader header;
848 SVGA3dCmdSetRenderTarget body;
849 } *cmd;
b5c3b1a6 850 struct vmw_resource_val_node *ctx_node;
173fb7d4 851 struct vmw_resource_val_node *res_node;
fb1d9738
JB
852 int ret;
853
b5c3b1a6
TH
854 cmd = container_of(header, struct vmw_sid_cmd, header);
855
d80efd5c
TH
856 if (cmd->body.type >= SVGA3D_RT_MAX) {
857 DRM_ERROR("Illegal render target type %u.\n",
858 (unsigned) cmd->body.type);
859 return -EINVAL;
860 }
861
b5c3b1a6
TH
862 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
863 user_context_converter, &cmd->body.cid,
864 &ctx_node);
fb1d9738
JB
865 if (unlikely(ret != 0))
866 return ret;
867
c0951b79
TH
868 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
869 user_surface_converter,
173fb7d4 870 &cmd->body.target.sid, &res_node);
b5c3b1a6
TH
871 if (unlikely(ret != 0))
872 return ret;
873
874 if (dev_priv->has_mob) {
d80efd5c 875 struct vmw_ctx_bindinfo_view binding;
b5c3b1a6 876
d80efd5c
TH
877 binding.bi.ctx = ctx_node->res;
878 binding.bi.res = res_node ? res_node->res : NULL;
879 binding.bi.bt = vmw_ctx_binding_rt;
880 binding.slot = cmd->body.type;
881 vmw_binding_add(ctx_node->staged_bindings,
882 &binding.bi, 0, binding.slot);
b5c3b1a6
TH
883 }
884
885 return 0;
fb1d9738
JB
886}
887
888static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
889 struct vmw_sw_context *sw_context,
890 SVGA3dCmdHeader *header)
891{
892 struct vmw_sid_cmd {
893 SVGA3dCmdHeader header;
894 SVGA3dCmdSurfaceCopy body;
895 } *cmd;
896 int ret;
897
898 cmd = container_of(header, struct vmw_sid_cmd, header);
c9146cd9 899
6bf6bf03
TH
900 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
901 user_surface_converter,
902 &cmd->body.src.sid, NULL);
903 if (ret)
904 return ret;
c9146cd9 905
c0951b79
TH
906 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
907 user_surface_converter,
908 &cmd->body.dest.sid, NULL);
fb1d9738
JB
909}
910
911static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
912 struct vmw_sw_context *sw_context,
913 SVGA3dCmdHeader *header)
914{
915 struct vmw_sid_cmd {
916 SVGA3dCmdHeader header;
917 SVGA3dCmdSurfaceStretchBlt body;
918 } *cmd;
919 int ret;
920
921 cmd = container_of(header, struct vmw_sid_cmd, header);
c0951b79
TH
922 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
923 user_surface_converter,
924 &cmd->body.src.sid, NULL);
fb1d9738
JB
925 if (unlikely(ret != 0))
926 return ret;
c0951b79
TH
927 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
928 user_surface_converter,
929 &cmd->body.dest.sid, NULL);
fb1d9738
JB
930}
931
932static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
933 struct vmw_sw_context *sw_context,
934 SVGA3dCmdHeader *header)
935{
936 struct vmw_sid_cmd {
937 SVGA3dCmdHeader header;
938 SVGA3dCmdBlitSurfaceToScreen body;
939 } *cmd;
940
941 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6 942
c0951b79
TH
943 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
944 user_surface_converter,
945 &cmd->body.srcImage.sid, NULL);
fb1d9738
JB
946}
947
948static int vmw_cmd_present_check(struct vmw_private *dev_priv,
949 struct vmw_sw_context *sw_context,
950 SVGA3dCmdHeader *header)
951{
952 struct vmw_sid_cmd {
953 SVGA3dCmdHeader header;
954 SVGA3dCmdPresent body;
955 } *cmd;
956
5bb39e81 957
fb1d9738 958 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6 959
c0951b79
TH
960 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
961 user_surface_converter, &cmd->body.sid,
962 NULL);
fb1d9738
JB
963}
964
e2fa3a76
TH
965/**
966 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
967 *
968 * @dev_priv: The device private structure.
e2fa3a76
TH
969 * @new_query_bo: The new buffer holding query results.
970 * @sw_context: The software context used for this command submission.
971 *
972 * This function checks whether @new_query_bo is suitable for holding
973 * query results, and if another buffer currently is pinned for query
974 * results. If so, the function prepares the state of @sw_context for
975 * switching pinned buffers after successful submission of the current
c0951b79 976 * command batch.
e2fa3a76
TH
977 */
978static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
459d0fa7 979 struct vmw_dma_buffer *new_query_bo,
e2fa3a76
TH
980 struct vmw_sw_context *sw_context)
981{
c0951b79
TH
982 struct vmw_res_cache_entry *ctx_entry =
983 &sw_context->res_cache[vmw_res_context];
e2fa3a76 984 int ret;
c0951b79
TH
985
986 BUG_ON(!ctx_entry->valid);
987 sw_context->last_query_ctx = ctx_entry->res;
e2fa3a76
TH
988
989 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
990
459d0fa7 991 if (unlikely(new_query_bo->base.num_pages > 4)) {
e2fa3a76
TH
992 DRM_ERROR("Query buffer too large.\n");
993 return -EINVAL;
994 }
995
996 if (unlikely(sw_context->cur_query_bo != NULL)) {
c0951b79 997 sw_context->needs_post_query_barrier = true;
e2fa3a76
TH
998 ret = vmw_bo_to_validate_list(sw_context,
999 sw_context->cur_query_bo,
96c5f0df 1000 dev_priv->has_mob, NULL);
e2fa3a76
TH
1001 if (unlikely(ret != 0))
1002 return ret;
1003 }
1004 sw_context->cur_query_bo = new_query_bo;
1005
1006 ret = vmw_bo_to_validate_list(sw_context,
1007 dev_priv->dummy_query_bo,
96c5f0df 1008 dev_priv->has_mob, NULL);
e2fa3a76
TH
1009 if (unlikely(ret != 0))
1010 return ret;
1011
1012 }
1013
e2fa3a76
TH
1014 return 0;
1015}
1016
1017
1018/**
1019 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1020 *
1021 * @dev_priv: The device private structure.
1022 * @sw_context: The software context used for this command submission batch.
1023 *
1024 * This function will check if we're switching query buffers, and will then,
e2fa3a76
TH
1025 * issue a dummy occlusion query wait used as a query barrier. When the fence
1026 * object following that query wait has signaled, we are sure that all
c0951b79 1027 * preceding queries have finished, and the old query buffer can be unpinned.
e2fa3a76
TH
1028 * However, since both the new query buffer and the old one are fenced with
1029 * that fence, we can do an asynchronus unpin now, and be sure that the
1030 * old query buffer won't be moved until the fence has signaled.
1031 *
1032 * As mentioned above, both the new - and old query buffers need to be fenced
1033 * using a sequence emitted *after* calling this function.
1034 */
1035static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1036 struct vmw_sw_context *sw_context)
1037{
e2fa3a76
TH
1038 /*
1039 * The validate list should still hold references to all
1040 * contexts here.
1041 */
1042
c0951b79
TH
1043 if (sw_context->needs_post_query_barrier) {
1044 struct vmw_res_cache_entry *ctx_entry =
1045 &sw_context->res_cache[vmw_res_context];
1046 struct vmw_resource *ctx;
1047 int ret;
e2fa3a76 1048
c0951b79
TH
1049 BUG_ON(!ctx_entry->valid);
1050 ctx = ctx_entry->res;
e2fa3a76
TH
1051
1052 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1053
1054 if (unlikely(ret != 0))
1055 DRM_ERROR("Out of fifo space for dummy query.\n");
1056 }
1057
1058 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1059 if (dev_priv->pinned_bo) {
459d0fa7
TH
1060 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1061 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
e2fa3a76
TH
1062 }
1063
c0951b79 1064 if (!sw_context->needs_post_query_barrier) {
459d0fa7 1065 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
e2fa3a76 1066
c0951b79
TH
1067 /*
1068 * We pin also the dummy_query_bo buffer so that we
1069 * don't need to validate it when emitting
1070 * dummy queries in context destroy paths.
1071 */
e2fa3a76 1072
459d0fa7
TH
1073 if (!dev_priv->dummy_query_bo_pinned) {
1074 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1075 true);
1076 dev_priv->dummy_query_bo_pinned = true;
1077 }
e2fa3a76 1078
c0951b79
TH
1079 BUG_ON(sw_context->last_query_ctx == NULL);
1080 dev_priv->query_cid = sw_context->last_query_ctx->id;
1081 dev_priv->query_cid_valid = true;
1082 dev_priv->pinned_bo =
459d0fa7 1083 vmw_dmabuf_reference(sw_context->cur_query_bo);
c0951b79 1084 }
e2fa3a76
TH
1085 }
1086}
1087
ddcda24e
TH
1088/**
1089 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1090 * handle to a MOB id.
1091 *
1092 * @dev_priv: Pointer to a device private structure.
1093 * @sw_context: The software context used for this command batch validation.
1094 * @id: Pointer to the user-space handle to be translated.
1095 * @vmw_bo_p: Points to a location that, on successful return will carry
1096 * a reference-counted pointer to the DMA buffer identified by the
1097 * user-space handle in @id.
1098 *
1099 * This function saves information needed to translate a user-space buffer
1100 * handle to a MOB id. The translation does not take place immediately, but
1101 * during a call to vmw_apply_relocations(). This function builds a relocation
1102 * list and a list of buffers to validate. The former needs to be freed using
1103 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1104 * needs to be freed using vmw_clear_validations.
1105 */
1106static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1107 struct vmw_sw_context *sw_context,
1108 SVGAMobId *id,
1109 struct vmw_dma_buffer **vmw_bo_p)
1110{
1111 struct vmw_dma_buffer *vmw_bo = NULL;
ddcda24e
TH
1112 uint32_t handle = *id;
1113 struct vmw_relocation *reloc;
1114 int ret;
1115
d5bde956 1116 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
ddcda24e
TH
1117 if (unlikely(ret != 0)) {
1118 DRM_ERROR("Could not find or use MOB buffer.\n");
da5efffc
CIK
1119 ret = -EINVAL;
1120 goto out_no_reloc;
ddcda24e 1121 }
ddcda24e
TH
1122
1123 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1124 DRM_ERROR("Max number relocations per submission"
1125 " exceeded\n");
1126 ret = -EINVAL;
1127 goto out_no_reloc;
1128 }
1129
1130 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1131 reloc->mob_loc = id;
1132 reloc->location = NULL;
1133
459d0fa7 1134 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
ddcda24e
TH
1135 if (unlikely(ret != 0))
1136 goto out_no_reloc;
1137
1138 *vmw_bo_p = vmw_bo;
1139 return 0;
1140
1141out_no_reloc:
1142 vmw_dmabuf_unreference(&vmw_bo);
da5efffc 1143 *vmw_bo_p = NULL;
ddcda24e
TH
1144 return ret;
1145}
1146
e2fa3a76 1147/**
c0951b79
TH
1148 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1149 * handle to a valid SVGAGuestPtr
e2fa3a76 1150 *
c0951b79
TH
1151 * @dev_priv: Pointer to a device private structure.
1152 * @sw_context: The software context used for this command batch validation.
1153 * @ptr: Pointer to the user-space handle to be translated.
1154 * @vmw_bo_p: Points to a location that, on successful return will carry
1155 * a reference-counted pointer to the DMA buffer identified by the
1156 * user-space handle in @id.
e2fa3a76 1157 *
c0951b79
TH
1158 * This function saves information needed to translate a user-space buffer
1159 * handle to a valid SVGAGuestPtr. The translation does not take place
1160 * immediately, but during a call to vmw_apply_relocations().
1161 * This function builds a relocation list and a list of buffers to validate.
1162 * The former needs to be freed using either vmw_apply_relocations() or
1163 * vmw_free_relocations(). The latter needs to be freed using
1164 * vmw_clear_validations.
e2fa3a76 1165 */
4e4ddd47
TH
1166static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1167 struct vmw_sw_context *sw_context,
1168 SVGAGuestPtr *ptr,
1169 struct vmw_dma_buffer **vmw_bo_p)
fb1d9738 1170{
fb1d9738 1171 struct vmw_dma_buffer *vmw_bo = NULL;
4e4ddd47 1172 uint32_t handle = ptr->gmrId;
fb1d9738 1173 struct vmw_relocation *reloc;
4e4ddd47 1174 int ret;
fb1d9738 1175
d5bde956 1176 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
fb1d9738
JB
1177 if (unlikely(ret != 0)) {
1178 DRM_ERROR("Could not find or use GMR region.\n");
da5efffc
CIK
1179 ret = -EINVAL;
1180 goto out_no_reloc;
fb1d9738 1181 }
fb1d9738
JB
1182
1183 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
4e4ddd47 1184 DRM_ERROR("Max number relocations per submission"
fb1d9738
JB
1185 " exceeded\n");
1186 ret = -EINVAL;
1187 goto out_no_reloc;
1188 }
1189
1190 reloc = &sw_context->relocs[sw_context->cur_reloc++];
4e4ddd47 1191 reloc->location = ptr;
fb1d9738 1192
459d0fa7 1193 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
e2fa3a76 1194 if (unlikely(ret != 0))
fb1d9738 1195 goto out_no_reloc;
fb1d9738 1196
4e4ddd47
TH
1197 *vmw_bo_p = vmw_bo;
1198 return 0;
1199
1200out_no_reloc:
1201 vmw_dmabuf_unreference(&vmw_bo);
da5efffc 1202 *vmw_bo_p = NULL;
4e4ddd47
TH
1203 return ret;
1204}
1205
ddcda24e
TH
1206/**
1207 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1208 *
1209 * @dev_priv: Pointer to a device private struct.
1210 * @sw_context: The software context used for this command submission.
1211 * @header: Pointer to the command header in the command stream.
1212 */
1213static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1214 struct vmw_sw_context *sw_context,
1215 SVGA3dCmdHeader *header)
1216{
1217 struct vmw_begin_gb_query_cmd {
1218 SVGA3dCmdHeader header;
1219 SVGA3dCmdBeginGBQuery q;
1220 } *cmd;
1221
1222 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1223 header);
1224
1225 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1226 user_context_converter, &cmd->q.cid,
1227 NULL);
1228}
1229
c0951b79
TH
1230/**
1231 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1232 *
1233 * @dev_priv: Pointer to a device private struct.
1234 * @sw_context: The software context used for this command submission.
1235 * @header: Pointer to the command header in the command stream.
1236 */
1237static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1238 struct vmw_sw_context *sw_context,
1239 SVGA3dCmdHeader *header)
1240{
1241 struct vmw_begin_query_cmd {
1242 SVGA3dCmdHeader header;
1243 SVGA3dCmdBeginQuery q;
1244 } *cmd;
1245
1246 cmd = container_of(header, struct vmw_begin_query_cmd,
1247 header);
1248
ddcda24e
TH
1249 if (unlikely(dev_priv->has_mob)) {
1250 struct {
1251 SVGA3dCmdHeader header;
1252 SVGA3dCmdBeginGBQuery q;
1253 } gb_cmd;
1254
1255 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1256
1257 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1258 gb_cmd.header.size = cmd->header.size;
1259 gb_cmd.q.cid = cmd->q.cid;
1260 gb_cmd.q.type = cmd->q.type;
1261
1262 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1263 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1264 }
1265
c0951b79
TH
1266 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1267 user_context_converter, &cmd->q.cid,
1268 NULL);
1269}
1270
ddcda24e
TH
1271/**
1272 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1273 *
1274 * @dev_priv: Pointer to a device private struct.
1275 * @sw_context: The software context used for this command submission.
1276 * @header: Pointer to the command header in the command stream.
1277 */
1278static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1279 struct vmw_sw_context *sw_context,
1280 SVGA3dCmdHeader *header)
1281{
1282 struct vmw_dma_buffer *vmw_bo;
1283 struct vmw_query_cmd {
1284 SVGA3dCmdHeader header;
1285 SVGA3dCmdEndGBQuery q;
1286 } *cmd;
1287 int ret;
1288
1289 cmd = container_of(header, struct vmw_query_cmd, header);
1290 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1291 if (unlikely(ret != 0))
1292 return ret;
1293
1294 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1295 &cmd->q.mobid,
1296 &vmw_bo);
1297 if (unlikely(ret != 0))
1298 return ret;
1299
459d0fa7 1300 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
ddcda24e
TH
1301
1302 vmw_dmabuf_unreference(&vmw_bo);
1303 return ret;
1304}
1305
c0951b79
TH
1306/**
1307 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1308 *
1309 * @dev_priv: Pointer to a device private struct.
1310 * @sw_context: The software context used for this command submission.
1311 * @header: Pointer to the command header in the command stream.
1312 */
4e4ddd47
TH
1313static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1314 struct vmw_sw_context *sw_context,
1315 SVGA3dCmdHeader *header)
1316{
1317 struct vmw_dma_buffer *vmw_bo;
1318 struct vmw_query_cmd {
1319 SVGA3dCmdHeader header;
1320 SVGA3dCmdEndQuery q;
1321 } *cmd;
1322 int ret;
1323
1324 cmd = container_of(header, struct vmw_query_cmd, header);
ddcda24e
TH
1325 if (dev_priv->has_mob) {
1326 struct {
1327 SVGA3dCmdHeader header;
1328 SVGA3dCmdEndGBQuery q;
1329 } gb_cmd;
1330
1331 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1332
1333 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1334 gb_cmd.header.size = cmd->header.size;
1335 gb_cmd.q.cid = cmd->q.cid;
1336 gb_cmd.q.type = cmd->q.type;
1337 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1338 gb_cmd.q.offset = cmd->q.guestResult.offset;
1339
1340 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1341 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1342 }
1343
4e4ddd47
TH
1344 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1345 if (unlikely(ret != 0))
1346 return ret;
1347
1348 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1349 &cmd->q.guestResult,
1350 &vmw_bo);
1351 if (unlikely(ret != 0))
1352 return ret;
1353
459d0fa7 1354 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
e2fa3a76 1355
4e4ddd47 1356 vmw_dmabuf_unreference(&vmw_bo);
e2fa3a76 1357 return ret;
4e4ddd47 1358}
fb1d9738 1359
ddcda24e
TH
1360/**
1361 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1362 *
1363 * @dev_priv: Pointer to a device private struct.
1364 * @sw_context: The software context used for this command submission.
1365 * @header: Pointer to the command header in the command stream.
1366 */
1367static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1368 struct vmw_sw_context *sw_context,
1369 SVGA3dCmdHeader *header)
1370{
1371 struct vmw_dma_buffer *vmw_bo;
1372 struct vmw_query_cmd {
1373 SVGA3dCmdHeader header;
1374 SVGA3dCmdWaitForGBQuery q;
1375 } *cmd;
1376 int ret;
1377
1378 cmd = container_of(header, struct vmw_query_cmd, header);
1379 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1380 if (unlikely(ret != 0))
1381 return ret;
1382
1383 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1384 &cmd->q.mobid,
1385 &vmw_bo);
1386 if (unlikely(ret != 0))
1387 return ret;
1388
1389 vmw_dmabuf_unreference(&vmw_bo);
1390 return 0;
1391}
1392
1393/**
c0951b79
TH
1394 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1395 *
1396 * @dev_priv: Pointer to a device private struct.
1397 * @sw_context: The software context used for this command submission.
1398 * @header: Pointer to the command header in the command stream.
1399 */
4e4ddd47
TH
1400static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1401 struct vmw_sw_context *sw_context,
1402 SVGA3dCmdHeader *header)
1403{
1404 struct vmw_dma_buffer *vmw_bo;
1405 struct vmw_query_cmd {
1406 SVGA3dCmdHeader header;
1407 SVGA3dCmdWaitForQuery q;
1408 } *cmd;
1409 int ret;
1410
1411 cmd = container_of(header, struct vmw_query_cmd, header);
ddcda24e
TH
1412 if (dev_priv->has_mob) {
1413 struct {
1414 SVGA3dCmdHeader header;
1415 SVGA3dCmdWaitForGBQuery q;
1416 } gb_cmd;
1417
1418 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1419
1420 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1421 gb_cmd.header.size = cmd->header.size;
1422 gb_cmd.q.cid = cmd->q.cid;
1423 gb_cmd.q.type = cmd->q.type;
1424 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1425 gb_cmd.q.offset = cmd->q.guestResult.offset;
1426
1427 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1428 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1429 }
1430
4e4ddd47
TH
1431 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1432 if (unlikely(ret != 0))
1433 return ret;
1434
1435 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1436 &cmd->q.guestResult,
1437 &vmw_bo);
1438 if (unlikely(ret != 0))
1439 return ret;
1440
1441 vmw_dmabuf_unreference(&vmw_bo);
1442 return 0;
1443}
1444
4e4ddd47
TH
1445static int vmw_cmd_dma(struct vmw_private *dev_priv,
1446 struct vmw_sw_context *sw_context,
1447 SVGA3dCmdHeader *header)
1448{
1449 struct vmw_dma_buffer *vmw_bo = NULL;
4e4ddd47
TH
1450 struct vmw_surface *srf = NULL;
1451 struct vmw_dma_cmd {
1452 SVGA3dCmdHeader header;
1453 SVGA3dCmdSurfaceDMA dma;
1454 } *cmd;
1455 int ret;
cbd75e97
TH
1456 SVGA3dCmdSurfaceDMASuffix *suffix;
1457 uint32_t bo_size;
4e4ddd47
TH
1458
1459 cmd = container_of(header, struct vmw_dma_cmd, header);
cbd75e97
TH
1460 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1461 header->size - sizeof(*suffix));
1462
1463 /* Make sure device and verifier stays in sync. */
1464 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1465 DRM_ERROR("Invalid DMA suffix size.\n");
1466 return -EINVAL;
1467 }
1468
4e4ddd47
TH
1469 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1470 &cmd->dma.guest.ptr,
1471 &vmw_bo);
1472 if (unlikely(ret != 0))
1473 return ret;
1474
cbd75e97
TH
1475 /* Make sure DMA doesn't cross BO boundaries. */
1476 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1477 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1478 DRM_ERROR("Invalid DMA offset.\n");
1479 return -EINVAL;
1480 }
1481
1482 bo_size -= cmd->dma.guest.ptr.offset;
1483 if (unlikely(suffix->maximumOffset > bo_size))
1484 suffix->maximumOffset = bo_size;
1485
c0951b79
TH
1486 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1487 user_surface_converter, &cmd->dma.host.sid,
1488 NULL);
5bb39e81 1489 if (unlikely(ret != 0)) {
c0951b79
TH
1490 if (unlikely(ret != -ERESTARTSYS))
1491 DRM_ERROR("could not find surface for DMA.\n");
1492 goto out_no_surface;
5bb39e81
TH
1493 }
1494
c0951b79 1495 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
f18c8840 1496
d5bde956
TH
1497 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1498 header);
fb1d9738 1499
c0951b79 1500out_no_surface:
fb1d9738
JB
1501 vmw_dmabuf_unreference(&vmw_bo);
1502 return ret;
1503}
1504
7a73ba74
TH
1505static int vmw_cmd_draw(struct vmw_private *dev_priv,
1506 struct vmw_sw_context *sw_context,
1507 SVGA3dCmdHeader *header)
1508{
1509 struct vmw_draw_cmd {
1510 SVGA3dCmdHeader header;
1511 SVGA3dCmdDrawPrimitives body;
1512 } *cmd;
1513 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1514 (unsigned long)header + sizeof(*cmd));
1515 SVGA3dPrimitiveRange *range;
1516 uint32_t i;
1517 uint32_t maxnum;
1518 int ret;
1519
1520 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1521 if (unlikely(ret != 0))
1522 return ret;
1523
1524 cmd = container_of(header, struct vmw_draw_cmd, header);
1525 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1526
1527 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1528 DRM_ERROR("Illegal number of vertex declarations.\n");
1529 return -EINVAL;
1530 }
1531
1532 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
c0951b79
TH
1533 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1534 user_surface_converter,
1535 &decl->array.surfaceId, NULL);
7a73ba74
TH
1536 if (unlikely(ret != 0))
1537 return ret;
1538 }
1539
1540 maxnum = (header->size - sizeof(cmd->body) -
1541 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1542 if (unlikely(cmd->body.numRanges > maxnum)) {
1543 DRM_ERROR("Illegal number of index ranges.\n");
1544 return -EINVAL;
1545 }
1546
1547 range = (SVGA3dPrimitiveRange *) decl;
1548 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
c0951b79
TH
1549 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1550 user_surface_converter,
1551 &range->indexArray.surfaceId, NULL);
7a73ba74
TH
1552 if (unlikely(ret != 0))
1553 return ret;
1554 }
1555 return 0;
1556}
1557
1558
1559static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1560 struct vmw_sw_context *sw_context,
1561 SVGA3dCmdHeader *header)
1562{
1563 struct vmw_tex_state_cmd {
1564 SVGA3dCmdHeader header;
1565 SVGA3dCmdSetTextureState state;
b5c3b1a6 1566 } *cmd;
7a73ba74
TH
1567
1568 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1569 ((unsigned long) header + header->size + sizeof(header));
1570 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1571 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
b5c3b1a6 1572 struct vmw_resource_val_node *ctx_node;
173fb7d4 1573 struct vmw_resource_val_node *res_node;
7a73ba74
TH
1574 int ret;
1575
b5c3b1a6
TH
1576 cmd = container_of(header, struct vmw_tex_state_cmd,
1577 header);
1578
1579 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1580 user_context_converter, &cmd->state.cid,
1581 &ctx_node);
7a73ba74
TH
1582 if (unlikely(ret != 0))
1583 return ret;
1584
1585 for (; cur_state < last_state; ++cur_state) {
1586 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1587 continue;
1588
d80efd5c
TH
1589 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1590 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1591 (unsigned) cur_state->stage);
1592 return -EINVAL;
1593 }
1594
c0951b79
TH
1595 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1596 user_surface_converter,
173fb7d4 1597 &cur_state->value, &res_node);
7a73ba74
TH
1598 if (unlikely(ret != 0))
1599 return ret;
b5c3b1a6
TH
1600
1601 if (dev_priv->has_mob) {
d80efd5c
TH
1602 struct vmw_ctx_bindinfo_tex binding;
1603
1604 binding.bi.ctx = ctx_node->res;
1605 binding.bi.res = res_node ? res_node->res : NULL;
1606 binding.bi.bt = vmw_ctx_binding_tex;
1607 binding.texture_stage = cur_state->stage;
1608 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1609 0, binding.texture_stage);
b5c3b1a6 1610 }
7a73ba74
TH
1611 }
1612
1613 return 0;
1614}
1615
4084fb89
JB
1616static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1617 struct vmw_sw_context *sw_context,
1618 void *buf)
1619{
1620 struct vmw_dma_buffer *vmw_bo;
1621 int ret;
1622
1623 struct {
1624 uint32_t header;
1625 SVGAFifoCmdDefineGMRFB body;
1626 } *cmd = buf;
1627
1628 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1629 &cmd->body.ptr,
1630 &vmw_bo);
1631 if (unlikely(ret != 0))
1632 return ret;
1633
1634 vmw_dmabuf_unreference(&vmw_bo);
1635
1636 return ret;
1637}
1638
d80efd5c
TH
1639
1640/**
1641 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1642 * switching
1643 *
1644 * @dev_priv: Pointer to a device private struct.
1645 * @sw_context: The software context being used for this batch.
1646 * @val_node: The validation node representing the resource.
1647 * @buf_id: Pointer to the user-space backup buffer handle in the command
1648 * stream.
1649 * @backup_offset: Offset of backup into MOB.
1650 *
1651 * This function prepares for registering a switch of backup buffers
1652 * in the resource metadata just prior to unreserving. It's basically a wrapper
1653 * around vmw_cmd_res_switch_backup with a different interface.
1654 */
1655static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1656 struct vmw_sw_context *sw_context,
1657 struct vmw_resource_val_node *val_node,
1658 uint32_t *buf_id,
1659 unsigned long backup_offset)
1660{
1661 struct vmw_dma_buffer *dma_buf;
1662 int ret;
1663
1664 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1665 if (ret)
1666 return ret;
1667
1668 val_node->switching_backup = true;
1669 if (val_node->first_usage)
1670 val_node->no_buffer_needed = true;
1671
1672 vmw_dmabuf_unreference(&val_node->new_backup);
1673 val_node->new_backup = dma_buf;
1674 val_node->new_backup_offset = backup_offset;
1675
1676 return 0;
1677}
1678
1679
a97e2192
TH
1680/**
1681 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1682 *
1683 * @dev_priv: Pointer to a device private struct.
1684 * @sw_context: The software context being used for this batch.
1685 * @res_type: The resource type.
1686 * @converter: Information about user-space binding for this resource type.
1687 * @res_id: Pointer to the user-space resource handle in the command stream.
1688 * @buf_id: Pointer to the user-space backup buffer handle in the command
1689 * stream.
1690 * @backup_offset: Offset of backup into MOB.
1691 *
1692 * This function prepares for registering a switch of backup buffers
d80efd5c
TH
1693 * in the resource metadata just prior to unreserving. It's basically a wrapper
1694 * around vmw_cmd_res_switch_backup with a different interface.
a97e2192
TH
1695 */
1696static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1697 struct vmw_sw_context *sw_context,
1698 enum vmw_res_type res_type,
1699 const struct vmw_user_resource_conv
1700 *converter,
1701 uint32_t *res_id,
1702 uint32_t *buf_id,
1703 unsigned long backup_offset)
1704{
a97e2192 1705 struct vmw_resource_val_node *val_node;
d80efd5c 1706 int ret;
a97e2192
TH
1707
1708 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1709 converter, res_id, &val_node);
d80efd5c 1710 if (ret)
a97e2192
TH
1711 return ret;
1712
d80efd5c
TH
1713 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1714 buf_id, backup_offset);
a97e2192
TH
1715}
1716
1717/**
1718 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1719 * command
1720 *
1721 * @dev_priv: Pointer to a device private struct.
1722 * @sw_context: The software context being used for this batch.
1723 * @header: Pointer to the command header in the command stream.
1724 */
1725static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1726 struct vmw_sw_context *sw_context,
1727 SVGA3dCmdHeader *header)
1728{
1729 struct vmw_bind_gb_surface_cmd {
1730 SVGA3dCmdHeader header;
1731 SVGA3dCmdBindGBSurface body;
1732 } *cmd;
1733
1734 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1735
1736 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1737 user_surface_converter,
1738 &cmd->body.sid, &cmd->body.mobid,
1739 0);
1740}
1741
1742/**
1743 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1744 * command
1745 *
1746 * @dev_priv: Pointer to a device private struct.
1747 * @sw_context: The software context being used for this batch.
1748 * @header: Pointer to the command header in the command stream.
1749 */
1750static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1751 struct vmw_sw_context *sw_context,
1752 SVGA3dCmdHeader *header)
1753{
1754 struct vmw_gb_surface_cmd {
1755 SVGA3dCmdHeader header;
1756 SVGA3dCmdUpdateGBImage body;
1757 } *cmd;
1758
1759 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1760
1761 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1762 user_surface_converter,
1763 &cmd->body.image.sid, NULL);
1764}
1765
1766/**
1767 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1768 * command
1769 *
1770 * @dev_priv: Pointer to a device private struct.
1771 * @sw_context: The software context being used for this batch.
1772 * @header: Pointer to the command header in the command stream.
1773 */
1774static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1775 struct vmw_sw_context *sw_context,
1776 SVGA3dCmdHeader *header)
1777{
1778 struct vmw_gb_surface_cmd {
1779 SVGA3dCmdHeader header;
1780 SVGA3dCmdUpdateGBSurface body;
1781 } *cmd;
1782
1783 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1784
1785 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1786 user_surface_converter,
1787 &cmd->body.sid, NULL);
1788}
1789
1790/**
1791 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1792 * command
1793 *
1794 * @dev_priv: Pointer to a device private struct.
1795 * @sw_context: The software context being used for this batch.
1796 * @header: Pointer to the command header in the command stream.
1797 */
1798static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1799 struct vmw_sw_context *sw_context,
1800 SVGA3dCmdHeader *header)
1801{
1802 struct vmw_gb_surface_cmd {
1803 SVGA3dCmdHeader header;
1804 SVGA3dCmdReadbackGBImage body;
1805 } *cmd;
1806
1807 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1808
1809 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1810 user_surface_converter,
1811 &cmd->body.image.sid, NULL);
1812}
1813
1814/**
1815 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1816 * command
1817 *
1818 * @dev_priv: Pointer to a device private struct.
1819 * @sw_context: The software context being used for this batch.
1820 * @header: Pointer to the command header in the command stream.
1821 */
1822static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1823 struct vmw_sw_context *sw_context,
1824 SVGA3dCmdHeader *header)
1825{
1826 struct vmw_gb_surface_cmd {
1827 SVGA3dCmdHeader header;
1828 SVGA3dCmdReadbackGBSurface body;
1829 } *cmd;
1830
1831 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1832
1833 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1834 user_surface_converter,
1835 &cmd->body.sid, NULL);
1836}
1837
1838/**
1839 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1840 * command
1841 *
1842 * @dev_priv: Pointer to a device private struct.
1843 * @sw_context: The software context being used for this batch.
1844 * @header: Pointer to the command header in the command stream.
1845 */
1846static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1847 struct vmw_sw_context *sw_context,
1848 SVGA3dCmdHeader *header)
1849{
1850 struct vmw_gb_surface_cmd {
1851 SVGA3dCmdHeader header;
1852 SVGA3dCmdInvalidateGBImage body;
1853 } *cmd;
1854
1855 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1856
1857 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1858 user_surface_converter,
1859 &cmd->body.image.sid, NULL);
1860}
1861
1862/**
1863 * vmw_cmd_invalidate_gb_surface - Validate an
1864 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1865 *
1866 * @dev_priv: Pointer to a device private struct.
1867 * @sw_context: The software context being used for this batch.
1868 * @header: Pointer to the command header in the command stream.
1869 */
1870static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1871 struct vmw_sw_context *sw_context,
1872 SVGA3dCmdHeader *header)
1873{
1874 struct vmw_gb_surface_cmd {
1875 SVGA3dCmdHeader header;
1876 SVGA3dCmdInvalidateGBSurface body;
1877 } *cmd;
1878
1879 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1880
1881 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1882 user_surface_converter,
1883 &cmd->body.sid, NULL);
1884}
1885
d5bde956
TH
1886
1887/**
1888 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1889 * command
1890 *
1891 * @dev_priv: Pointer to a device private struct.
1892 * @sw_context: The software context being used for this batch.
1893 * @header: Pointer to the command header in the command stream.
1894 */
1895static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1896 struct vmw_sw_context *sw_context,
1897 SVGA3dCmdHeader *header)
1898{
1899 struct vmw_shader_define_cmd {
1900 SVGA3dCmdHeader header;
1901 SVGA3dCmdDefineShader body;
1902 } *cmd;
1903 int ret;
1904 size_t size;
18e4a466 1905 struct vmw_resource_val_node *val;
d5bde956
TH
1906
1907 cmd = container_of(header, struct vmw_shader_define_cmd,
1908 header);
1909
1910 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1911 user_context_converter, &cmd->body.cid,
18e4a466 1912 &val);
d5bde956
TH
1913 if (unlikely(ret != 0))
1914 return ret;
1915
1916 if (unlikely(!dev_priv->has_mob))
1917 return 0;
1918
1919 size = cmd->header.size - sizeof(cmd->body);
18e4a466
TH
1920 ret = vmw_compat_shader_add(dev_priv,
1921 vmw_context_res_man(val->res),
d5bde956
TH
1922 cmd->body.shid, cmd + 1,
1923 cmd->body.type, size,
18e4a466 1924 &sw_context->staged_cmd_res);
d5bde956
TH
1925 if (unlikely(ret != 0))
1926 return ret;
1927
1928 return vmw_resource_relocation_add(&sw_context->res_relocations,
1929 NULL, &cmd->header.id -
1930 sw_context->buf_start);
1931
1932 return 0;
1933}
1934
1935/**
1936 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1937 * command
1938 *
1939 * @dev_priv: Pointer to a device private struct.
1940 * @sw_context: The software context being used for this batch.
1941 * @header: Pointer to the command header in the command stream.
1942 */
1943static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1944 struct vmw_sw_context *sw_context,
1945 SVGA3dCmdHeader *header)
1946{
1947 struct vmw_shader_destroy_cmd {
1948 SVGA3dCmdHeader header;
1949 SVGA3dCmdDestroyShader body;
1950 } *cmd;
1951 int ret;
18e4a466 1952 struct vmw_resource_val_node *val;
d5bde956
TH
1953
1954 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1955 header);
1956
1957 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1958 user_context_converter, &cmd->body.cid,
18e4a466 1959 &val);
d5bde956
TH
1960 if (unlikely(ret != 0))
1961 return ret;
1962
1963 if (unlikely(!dev_priv->has_mob))
1964 return 0;
1965
d80efd5c
TH
1966 ret = vmw_shader_remove(vmw_context_res_man(val->res),
1967 cmd->body.shid,
1968 cmd->body.type,
1969 &sw_context->staged_cmd_res);
d5bde956
TH
1970 if (unlikely(ret != 0))
1971 return ret;
1972
1973 return vmw_resource_relocation_add(&sw_context->res_relocations,
1974 NULL, &cmd->header.id -
1975 sw_context->buf_start);
1976
1977 return 0;
1978}
1979
c0951b79
TH
1980/**
1981 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1982 * command
1983 *
1984 * @dev_priv: Pointer to a device private struct.
1985 * @sw_context: The software context being used for this batch.
1986 * @header: Pointer to the command header in the command stream.
1987 */
1988static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1989 struct vmw_sw_context *sw_context,
1990 SVGA3dCmdHeader *header)
1991{
1992 struct vmw_set_shader_cmd {
1993 SVGA3dCmdHeader header;
1994 SVGA3dCmdSetShader body;
1995 } *cmd;
18e4a466 1996 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
d80efd5c 1997 struct vmw_ctx_bindinfo_shader binding;
18e4a466 1998 struct vmw_resource *res = NULL;
c0951b79
TH
1999 int ret;
2000
2001 cmd = container_of(header, struct vmw_set_shader_cmd,
2002 header);
2003
d80efd5c
TH
2004 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2005 DRM_ERROR("Illegal shader type %u.\n",
2006 (unsigned) cmd->body.type);
2007 return -EINVAL;
2008 }
2009
b5c3b1a6
TH
2010 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2011 user_context_converter, &cmd->body.cid,
2012 &ctx_node);
c0951b79
TH
2013 if (unlikely(ret != 0))
2014 return ret;
2015
18e4a466
TH
2016 if (!dev_priv->has_mob)
2017 return 0;
2018
2019 if (cmd->body.shid != SVGA3D_INVALID_ID) {
d80efd5c
TH
2020 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2021 cmd->body.shid,
2022 cmd->body.type);
18e4a466
TH
2023
2024 if (!IS_ERR(res)) {
2025 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
18e4a466
TH
2026 &cmd->body.shid, res,
2027 &res_node);
2028 vmw_resource_unreference(&res);
2029 if (unlikely(ret != 0))
2030 return ret;
2031 }
2032 }
2033
2034 if (!res_node) {
2035 ret = vmw_cmd_res_check(dev_priv, sw_context,
2036 vmw_res_shader,
2037 user_shader_converter,
2038 &cmd->body.shid, &res_node);
b5c3b1a6
TH
2039 if (unlikely(ret != 0))
2040 return ret;
b5c3b1a6 2041 }
c74c162f 2042
d80efd5c
TH
2043 binding.bi.ctx = ctx_node->res;
2044 binding.bi.res = res_node ? res_node->res : NULL;
2045 binding.bi.bt = vmw_ctx_binding_shader;
2046 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2047 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2048 binding.shader_slot, 0);
2049 return 0;
c0951b79
TH
2050}
2051
0ccbbae4
TH
2052/**
2053 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2054 * command
2055 *
2056 * @dev_priv: Pointer to a device private struct.
2057 * @sw_context: The software context being used for this batch.
2058 * @header: Pointer to the command header in the command stream.
2059 */
2060static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2061 struct vmw_sw_context *sw_context,
2062 SVGA3dCmdHeader *header)
2063{
2064 struct vmw_set_shader_const_cmd {
2065 SVGA3dCmdHeader header;
2066 SVGA3dCmdSetShaderConst body;
2067 } *cmd;
2068 int ret;
2069
2070 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2071 header);
2072
2073 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2074 user_context_converter, &cmd->body.cid,
2075 NULL);
2076 if (unlikely(ret != 0))
2077 return ret;
2078
2079 if (dev_priv->has_mob)
2080 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2081
2082 return 0;
2083}
2084
c74c162f
TH
2085/**
2086 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2087 * command
2088 *
2089 * @dev_priv: Pointer to a device private struct.
2090 * @sw_context: The software context being used for this batch.
2091 * @header: Pointer to the command header in the command stream.
2092 */
2093static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2094 struct vmw_sw_context *sw_context,
2095 SVGA3dCmdHeader *header)
2096{
2097 struct vmw_bind_gb_shader_cmd {
2098 SVGA3dCmdHeader header;
2099 SVGA3dCmdBindGBShader body;
2100 } *cmd;
2101
2102 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2103 header);
2104
2105 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2106 user_shader_converter,
2107 &cmd->body.shid, &cmd->body.mobid,
2108 cmd->body.offsetInBytes);
2109}
2110
d80efd5c
TH
2111/**
2112 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2113 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2114 *
2115 * @dev_priv: Pointer to a device private struct.
2116 * @sw_context: The software context being used for this batch.
2117 * @header: Pointer to the command header in the command stream.
2118 */
2119static int
2120vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2121 struct vmw_sw_context *sw_context,
2122 SVGA3dCmdHeader *header)
4084fb89 2123{
d80efd5c
TH
2124 struct {
2125 SVGA3dCmdHeader header;
2126 SVGA3dCmdDXSetSingleConstantBuffer body;
2127 } *cmd;
2128 struct vmw_resource_val_node *res_node = NULL;
2129 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2130 struct vmw_ctx_bindinfo_cb binding;
2131 int ret;
4084fb89 2132
d80efd5c
TH
2133 if (unlikely(ctx_node == NULL)) {
2134 DRM_ERROR("DX Context not set.\n");
4084fb89
JB
2135 return -EINVAL;
2136 }
2137
d80efd5c
TH
2138 cmd = container_of(header, typeof(*cmd), header);
2139 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2140 user_surface_converter,
2141 &cmd->body.sid, &res_node);
2142 if (unlikely(ret != 0))
2143 return ret;
4084fb89 2144
d80efd5c
TH
2145 binding.bi.ctx = ctx_node->res;
2146 binding.bi.res = res_node ? res_node->res : NULL;
2147 binding.bi.bt = vmw_ctx_binding_cb;
2148 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2149 binding.offset = cmd->body.offsetInBytes;
2150 binding.size = cmd->body.sizeInBytes;
2151 binding.slot = cmd->body.slot;
2152
2153 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2154 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2155 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2156 (unsigned) cmd->body.type,
2157 (unsigned) binding.slot);
2158 return -EINVAL;
4084fb89
JB
2159 }
2160
d80efd5c
TH
2161 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2162 binding.shader_slot, binding.slot);
4084fb89
JB
2163
2164 return 0;
2165}
fb1d9738 2166
d80efd5c
TH
2167/**
2168 * vmw_cmd_dx_set_shader_res - Validate an
2169 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2170 *
2171 * @dev_priv: Pointer to a device private struct.
2172 * @sw_context: The software context being used for this batch.
2173 * @header: Pointer to the command header in the command stream.
2174 */
2175static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2176 struct vmw_sw_context *sw_context,
2177 SVGA3dCmdHeader *header)
2178{
2179 struct {
2180 SVGA3dCmdHeader header;
2181 SVGA3dCmdDXSetShaderResources body;
2182 } *cmd = container_of(header, typeof(*cmd), header);
2183 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2184 sizeof(SVGA3dShaderResourceViewId);
2185
2186 if ((u64) cmd->body.startView + (u64) num_sr_view >
2187 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2188 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2189 DRM_ERROR("Invalid shader binding.\n");
2190 return -EINVAL;
2191 }
2192
2193 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2194 vmw_ctx_binding_sr,
2195 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2196 (void *) &cmd[1], num_sr_view,
2197 cmd->body.startView);
2198}
2199
2200/**
2201 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2202 * command
2203 *
2204 * @dev_priv: Pointer to a device private struct.
2205 * @sw_context: The software context being used for this batch.
2206 * @header: Pointer to the command header in the command stream.
2207 */
2208static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2209 struct vmw_sw_context *sw_context,
2210 SVGA3dCmdHeader *header)
2211{
2212 struct {
2213 SVGA3dCmdHeader header;
2214 SVGA3dCmdDXSetShader body;
2215 } *cmd;
2216 struct vmw_resource *res = NULL;
2217 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2218 struct vmw_ctx_bindinfo_shader binding;
2219 int ret = 0;
2220
2221 if (unlikely(ctx_node == NULL)) {
2222 DRM_ERROR("DX Context not set.\n");
2223 return -EINVAL;
2224 }
2225
2226 cmd = container_of(header, typeof(*cmd), header);
2227
2228 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2229 DRM_ERROR("Illegal shader type %u.\n",
2230 (unsigned) cmd->body.type);
2231 return -EINVAL;
2232 }
2233
2234 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2235 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2236 if (IS_ERR(res)) {
2237 DRM_ERROR("Could not find shader for binding.\n");
2238 return PTR_ERR(res);
2239 }
2240
2241 ret = vmw_resource_val_add(sw_context, res, NULL);
2242 if (ret)
2243 goto out_unref;
2244 }
2245
2246 binding.bi.ctx = ctx_node->res;
2247 binding.bi.res = res;
2248 binding.bi.bt = vmw_ctx_binding_dx_shader;
2249 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2250
2251 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2252 binding.shader_slot, 0);
2253out_unref:
2254 if (res)
2255 vmw_resource_unreference(&res);
2256
2257 return ret;
2258}
2259
2260/**
2261 * vmw_cmd_dx_set_vertex_buffers - Validates an
2262 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2263 *
2264 * @dev_priv: Pointer to a device private struct.
2265 * @sw_context: The software context being used for this batch.
2266 * @header: Pointer to the command header in the command stream.
2267 */
2268static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2269 struct vmw_sw_context *sw_context,
2270 SVGA3dCmdHeader *header)
2271{
2272 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2273 struct vmw_ctx_bindinfo_vb binding;
2274 struct vmw_resource_val_node *res_node;
2275 struct {
2276 SVGA3dCmdHeader header;
2277 SVGA3dCmdDXSetVertexBuffers body;
2278 SVGA3dVertexBuffer buf[];
2279 } *cmd;
2280 int i, ret, num;
2281
2282 if (unlikely(ctx_node == NULL)) {
2283 DRM_ERROR("DX Context not set.\n");
2284 return -EINVAL;
2285 }
2286
2287 cmd = container_of(header, typeof(*cmd), header);
2288 num = (cmd->header.size - sizeof(cmd->body)) /
2289 sizeof(SVGA3dVertexBuffer);
2290 if ((u64)num + (u64)cmd->body.startBuffer >
2291 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2292 DRM_ERROR("Invalid number of vertex buffers.\n");
2293 return -EINVAL;
2294 }
2295
2296 for (i = 0; i < num; i++) {
2297 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2298 user_surface_converter,
2299 &cmd->buf[i].sid, &res_node);
2300 if (unlikely(ret != 0))
2301 return ret;
2302
2303 binding.bi.ctx = ctx_node->res;
2304 binding.bi.bt = vmw_ctx_binding_vb;
2305 binding.bi.res = ((res_node) ? res_node->res : NULL);
2306 binding.offset = cmd->buf[i].offset;
2307 binding.stride = cmd->buf[i].stride;
2308 binding.slot = i + cmd->body.startBuffer;
2309
2310 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2311 0, binding.slot);
2312 }
2313
2314 return 0;
2315}
2316
2317/**
2318 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2319 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2320 *
2321 * @dev_priv: Pointer to a device private struct.
2322 * @sw_context: The software context being used for this batch.
2323 * @header: Pointer to the command header in the command stream.
2324 */
2325static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2326 struct vmw_sw_context *sw_context,
2327 SVGA3dCmdHeader *header)
2328{
2329 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2330 struct vmw_ctx_bindinfo_ib binding;
2331 struct vmw_resource_val_node *res_node;
2332 struct {
2333 SVGA3dCmdHeader header;
2334 SVGA3dCmdDXSetIndexBuffer body;
2335 } *cmd;
2336 int ret;
2337
2338 if (unlikely(ctx_node == NULL)) {
2339 DRM_ERROR("DX Context not set.\n");
2340 return -EINVAL;
2341 }
2342
2343 cmd = container_of(header, typeof(*cmd), header);
2344 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2345 user_surface_converter,
2346 &cmd->body.sid, &res_node);
2347 if (unlikely(ret != 0))
2348 return ret;
2349
2350 binding.bi.ctx = ctx_node->res;
2351 binding.bi.res = ((res_node) ? res_node->res : NULL);
2352 binding.bi.bt = vmw_ctx_binding_ib;
2353 binding.offset = cmd->body.offset;
2354 binding.format = cmd->body.format;
2355
2356 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2357
2358 return 0;
2359}
2360
2361/**
2362 * vmw_cmd_dx_set_rendertarget - Validate an
2363 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2364 *
2365 * @dev_priv: Pointer to a device private struct.
2366 * @sw_context: The software context being used for this batch.
2367 * @header: Pointer to the command header in the command stream.
2368 */
2369static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2370 struct vmw_sw_context *sw_context,
2371 SVGA3dCmdHeader *header)
2372{
2373 struct {
2374 SVGA3dCmdHeader header;
2375 SVGA3dCmdDXSetRenderTargets body;
2376 } *cmd = container_of(header, typeof(*cmd), header);
2377 int ret;
2378 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2379 sizeof(SVGA3dRenderTargetViewId);
2380
2381 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2382 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2383 return -EINVAL;
2384 }
2385
2386 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2387 vmw_ctx_binding_ds, 0,
2388 &cmd->body.depthStencilViewId, 1, 0);
2389 if (ret)
2390 return ret;
2391
2392 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2393 vmw_ctx_binding_dx_rt, 0,
2394 (void *)&cmd[1], num_rt_view, 0);
2395}
2396
2397/**
2398 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2399 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2400 *
2401 * @dev_priv: Pointer to a device private struct.
2402 * @sw_context: The software context being used for this batch.
2403 * @header: Pointer to the command header in the command stream.
2404 */
2405static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2406 struct vmw_sw_context *sw_context,
2407 SVGA3dCmdHeader *header)
2408{
2409 struct {
2410 SVGA3dCmdHeader header;
2411 SVGA3dCmdDXClearRenderTargetView body;
2412 } *cmd = container_of(header, typeof(*cmd), header);
2413
2414 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2415 cmd->body.renderTargetViewId);
2416}
2417
2418/**
2419 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2420 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2421 *
2422 * @dev_priv: Pointer to a device private struct.
2423 * @sw_context: The software context being used for this batch.
2424 * @header: Pointer to the command header in the command stream.
2425 */
2426static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2427 struct vmw_sw_context *sw_context,
2428 SVGA3dCmdHeader *header)
2429{
2430 struct {
2431 SVGA3dCmdHeader header;
2432 SVGA3dCmdDXClearDepthStencilView body;
2433 } *cmd = container_of(header, typeof(*cmd), header);
2434
2435 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2436 cmd->body.depthStencilViewId);
2437}
2438
2439static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2440 struct vmw_sw_context *sw_context,
2441 SVGA3dCmdHeader *header)
2442{
2443 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2444 struct vmw_resource_val_node *srf_node;
2445 struct vmw_resource *res;
2446 enum vmw_view_type view_type;
2447 int ret;
2448 /*
2449 * This is based on the fact that all affected define commands have
2450 * the same initial command body layout.
2451 */
2452 struct {
2453 SVGA3dCmdHeader header;
2454 uint32 defined_id;
2455 uint32 sid;
2456 } *cmd;
2457
2458 if (unlikely(ctx_node == NULL)) {
2459 DRM_ERROR("DX Context not set.\n");
2460 return -EINVAL;
2461 }
2462
2463 view_type = vmw_view_cmd_to_type(header->id);
2464 cmd = container_of(header, typeof(*cmd), header);
2465 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2466 user_surface_converter,
2467 &cmd->sid, &srf_node);
2468 if (unlikely(ret != 0))
2469 return ret;
2470
2471 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2472 ret = vmw_cotable_notify(res, cmd->defined_id);
2473 vmw_resource_unreference(&res);
2474 if (unlikely(ret != 0))
2475 return ret;
2476
2477 return vmw_view_add(sw_context->man,
2478 ctx_node->res,
2479 srf_node->res,
2480 view_type,
2481 cmd->defined_id,
2482 header,
2483 header->size + sizeof(*header),
2484 &sw_context->staged_cmd_res);
2485}
2486
2487static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2488 struct vmw_sw_context *sw_context,
2489 SVGA3dCmdHeader *header)
2490{
2491 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2492 struct vmw_resource *res;
2493 /*
2494 * This is based on the fact that all affected define commands have
2495 * the same initial command body layout.
2496 */
2497 struct {
2498 SVGA3dCmdHeader header;
2499 uint32 defined_id;
2500 } *cmd;
2501 enum vmw_so_type so_type;
2502 int ret;
2503
2504 if (unlikely(ctx_node == NULL)) {
2505 DRM_ERROR("DX Context not set.\n");
2506 return -EINVAL;
2507 }
2508
2509 so_type = vmw_so_cmd_to_type(header->id);
2510 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2511 cmd = container_of(header, typeof(*cmd), header);
2512 ret = vmw_cotable_notify(res, cmd->defined_id);
2513 vmw_resource_unreference(&res);
2514
2515 return ret;
2516}
2517
2518/**
2519 * vmw_cmd_dx_check_subresource - Validate an
2520 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2521 *
2522 * @dev_priv: Pointer to a device private struct.
2523 * @sw_context: The software context being used for this batch.
2524 * @header: Pointer to the command header in the command stream.
2525 */
2526static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2527 struct vmw_sw_context *sw_context,
2528 SVGA3dCmdHeader *header)
2529{
2530 struct {
2531 SVGA3dCmdHeader header;
2532 union {
2533 SVGA3dCmdDXReadbackSubResource r_body;
2534 SVGA3dCmdDXInvalidateSubResource i_body;
2535 SVGA3dCmdDXUpdateSubResource u_body;
2536 SVGA3dSurfaceId sid;
2537 };
2538 } *cmd;
2539
2540 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2541 offsetof(typeof(*cmd), sid));
2542 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2543 offsetof(typeof(*cmd), sid));
2544 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2545 offsetof(typeof(*cmd), sid));
2546
2547 cmd = container_of(header, typeof(*cmd), header);
2548
2549 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2550 user_surface_converter,
2551 &cmd->sid, NULL);
2552}
2553
2554static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2555 struct vmw_sw_context *sw_context,
2556 SVGA3dCmdHeader *header)
2557{
2558 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2559
2560 if (unlikely(ctx_node == NULL)) {
2561 DRM_ERROR("DX Context not set.\n");
2562 return -EINVAL;
2563 }
2564
2565 return 0;
2566}
2567
2568/**
2569 * vmw_cmd_dx_view_remove - validate a view remove command and
2570 * schedule the view resource for removal.
2571 *
2572 * @dev_priv: Pointer to a device private struct.
2573 * @sw_context: The software context being used for this batch.
2574 * @header: Pointer to the command header in the command stream.
2575 *
2576 * Check that the view exists, and if it was not created using this
2577 * command batch, make sure it's validated (present in the device) so that
2578 * the remove command will not confuse the device.
2579 */
2580static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2581 struct vmw_sw_context *sw_context,
2582 SVGA3dCmdHeader *header)
2583{
2584 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2585 struct {
2586 SVGA3dCmdHeader header;
2587 union vmw_view_destroy body;
2588 } *cmd = container_of(header, typeof(*cmd), header);
2589 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2590 struct vmw_resource *view;
2591 int ret;
2592
2593 if (!ctx_node) {
2594 DRM_ERROR("DX Context not set.\n");
2595 return -EINVAL;
2596 }
2597
2598 ret = vmw_view_remove(sw_context->man,
2599 cmd->body.view_id, view_type,
2600 &sw_context->staged_cmd_res,
2601 &view);
2602 if (ret || !view)
2603 return ret;
2604
2605 /*
2606 * Add view to the validate list iff it was not created using this
2607 * command batch.
2608 */
2609 return vmw_view_res_val_add(sw_context, view);
2610}
2611
2612/**
2613 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2614 * command
2615 *
2616 * @dev_priv: Pointer to a device private struct.
2617 * @sw_context: The software context being used for this batch.
2618 * @header: Pointer to the command header in the command stream.
2619 */
2620static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2621 struct vmw_sw_context *sw_context,
2622 SVGA3dCmdHeader *header)
2623{
2624 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2625 struct vmw_resource *res;
2626 struct {
2627 SVGA3dCmdHeader header;
2628 SVGA3dCmdDXDefineShader body;
2629 } *cmd = container_of(header, typeof(*cmd), header);
2630 int ret;
2631
2632 if (!ctx_node) {
2633 DRM_ERROR("DX Context not set.\n");
2634 return -EINVAL;
2635 }
2636
2637 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2638 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2639 vmw_resource_unreference(&res);
2640 if (ret)
2641 return ret;
2642
2643 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2644 cmd->body.shaderId, cmd->body.type,
2645 &sw_context->staged_cmd_res);
2646}
2647
2648/**
2649 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2650 * command
2651 *
2652 * @dev_priv: Pointer to a device private struct.
2653 * @sw_context: The software context being used for this batch.
2654 * @header: Pointer to the command header in the command stream.
2655 */
2656static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2657 struct vmw_sw_context *sw_context,
2658 SVGA3dCmdHeader *header)
2659{
2660 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2661 struct {
2662 SVGA3dCmdHeader header;
2663 SVGA3dCmdDXDestroyShader body;
2664 } *cmd = container_of(header, typeof(*cmd), header);
2665 int ret;
2666
2667 if (!ctx_node) {
2668 DRM_ERROR("DX Context not set.\n");
2669 return -EINVAL;
2670 }
2671
2672 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2673 &sw_context->staged_cmd_res);
2674 if (ret)
2675 DRM_ERROR("Could not find shader to remove.\n");
2676
2677 return ret;
2678}
2679
2680/**
2681 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2682 * command
2683 *
2684 * @dev_priv: Pointer to a device private struct.
2685 * @sw_context: The software context being used for this batch.
2686 * @header: Pointer to the command header in the command stream.
2687 */
2688static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2689 struct vmw_sw_context *sw_context,
2690 SVGA3dCmdHeader *header)
2691{
2692 struct vmw_resource_val_node *ctx_node;
2693 struct vmw_resource_val_node *res_node;
2694 struct vmw_resource *res;
2695 struct {
2696 SVGA3dCmdHeader header;
2697 SVGA3dCmdDXBindShader body;
2698 } *cmd = container_of(header, typeof(*cmd), header);
2699 int ret;
2700
2701 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2702 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2703 user_context_converter,
2704 &cmd->body.cid, &ctx_node);
2705 if (ret)
2706 return ret;
2707 } else {
2708 ctx_node = sw_context->dx_ctx_node;
2709 if (!ctx_node) {
2710 DRM_ERROR("DX Context not set.\n");
2711 return -EINVAL;
2712 }
2713 }
2714
2715 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2716 cmd->body.shid, 0);
2717 if (IS_ERR(res)) {
2718 DRM_ERROR("Could not find shader to bind.\n");
2719 return PTR_ERR(res);
2720 }
2721
2722 ret = vmw_resource_val_add(sw_context, res, &res_node);
2723 if (ret) {
2724 DRM_ERROR("Error creating resource validation node.\n");
2725 goto out_unref;
2726 }
2727
2728
2729 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
2730 &cmd->body.mobid,
2731 cmd->body.offsetInBytes);
2732out_unref:
2733 vmw_resource_unreference(&res);
2734
2735 return ret;
2736}
2737
2738static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2739 struct vmw_sw_context *sw_context,
2740 void *buf, uint32_t *size)
2741{
2742 uint32_t size_remaining = *size;
2743 uint32_t cmd_id;
2744
2745 cmd_id = ((uint32_t *)buf)[0];
2746 switch (cmd_id) {
2747 case SVGA_CMD_UPDATE:
2748 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
2749 break;
2750 case SVGA_CMD_DEFINE_GMRFB:
2751 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2752 break;
2753 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2754 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2755 break;
2756 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2757 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2758 break;
2759 default:
2760 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
2761 return -EINVAL;
2762 }
2763
2764 if (*size > size_remaining) {
2765 DRM_ERROR("Invalid SVGA command (size mismatch):"
2766 " %u.\n", cmd_id);
2767 return -EINVAL;
2768 }
2769
2770 if (unlikely(!sw_context->kernel)) {
2771 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
2772 return -EPERM;
2773 }
2774
2775 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
2776 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
2777
2778 return 0;
2779}
2780
2781static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
2782 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
2783 false, false, false),
2784 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
2785 false, false, false),
2786 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
2787 true, false, false),
2788 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
2789 true, false, false),
2790 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
2791 true, false, false),
2792 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
2793 false, false, false),
2794 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
2795 false, false, false),
2796 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
2797 true, false, false),
2798 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
2799 true, false, false),
2800 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
2801 true, false, false),
2802 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
2803 &vmw_cmd_set_render_target_check, true, false, false),
2804 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
2805 true, false, false),
2806 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
2807 true, false, false),
2808 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
2809 true, false, false),
c373d4ea
TH
2810 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
2811 true, false, false),
2812 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
2813 true, false, false),
2814 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
2815 true, false, false),
2816 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
2817 true, false, false),
2818 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
2819 false, false, false),
d5bde956
TH
2820 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
2821 true, false, false),
2822 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
2823 true, false, false),
c373d4ea
TH
2824 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
2825 true, false, false),
0ccbbae4
TH
2826 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
2827 true, false, false),
c373d4ea
TH
2828 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
2829 true, false, false),
2830 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
2831 true, false, false),
2832 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
2833 true, false, false),
2834 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
2835 true, false, false),
2836 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
2837 true, false, false),
2838 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
2839 true, false, false),
fb1d9738 2840 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
c373d4ea
TH
2841 &vmw_cmd_blt_surf_screen_check, false, false, false),
2842 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
2843 false, false, false),
2844 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
2845 false, false, false),
2846 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
2847 false, false, false),
2848 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
2849 false, false, false),
2850 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
2851 false, false, false),
2852 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
2853 false, false, false),
2854 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
2855 false, false, false),
2856 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
2857 false, false, false),
2858 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
2859 false, false, false),
2860 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
2861 false, false, false),
2862 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
2863 false, false, false),
2864 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
2865 false, false, false),
2866 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
2867 false, false, false),
2868 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
2869 false, false, true),
2870 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
2871 false, false, true),
2872 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
2873 false, false, true),
2874 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
2875 false, false, true),
c373d4ea
TH
2876 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
2877 false, false, true),
2878 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2879 false, false, true),
2880 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2881 false, false, true),
2882 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2883 true, false, true),
2884 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2885 false, false, true),
2886 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2887 true, false, true),
a97e2192 2888 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
c373d4ea 2889 &vmw_cmd_update_gb_surface, true, false, true),
a97e2192 2890 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
c373d4ea 2891 &vmw_cmd_readback_gb_image, true, false, true),
a97e2192 2892 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
c373d4ea 2893 &vmw_cmd_readback_gb_surface, true, false, true),
a97e2192 2894 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
c373d4ea 2895 &vmw_cmd_invalidate_gb_image, true, false, true),
a97e2192 2896 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
c373d4ea
TH
2897 &vmw_cmd_invalidate_gb_surface, true, false, true),
2898 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2899 false, false, true),
2900 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2901 false, false, true),
2902 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2903 false, false, true),
2904 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2905 false, false, true),
2906 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2907 false, false, true),
2908 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2909 false, false, true),
2910 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2911 true, false, true),
2912 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2913 false, false, true),
f2a0dcb1 2914 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
8ba07315 2915 false, false, false),
c373d4ea
TH
2916 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2917 true, false, true),
2918 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2919 true, false, true),
2920 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2921 true, false, true),
2922 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2923 true, false, true),
2924 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2925 false, false, true),
2926 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2927 false, false, true),
2928 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2929 false, false, true),
2930 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2931 false, false, true),
2932 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2933 false, false, true),
2934 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2935 false, false, true),
2936 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2937 false, false, true),
2938 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2939 false, false, true),
2940 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2941 false, false, true),
2942 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2943 false, false, true),
2944 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
d80efd5c
TH
2945 true, false, true),
2946 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
2947 false, false, true),
2948 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
2949 false, false, true),
2950 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
2951 false, false, true),
2952 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
2953 false, false, true),
2954
2955 /*
2956 * DX commands
2957 */
2958 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
2959 false, false, true),
2960 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
2961 false, false, true),
2962 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
2963 false, false, true),
2964 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
2965 false, false, true),
2966 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
2967 false, false, true),
2968 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
2969 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
2970 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
2971 &vmw_cmd_dx_set_shader_res, true, false, true),
2972 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
2973 true, false, true),
2974 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_invalid,
2975 true, false, true),
2976 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, &vmw_cmd_invalid,
2977 true, false, true),
2978 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_invalid,
2979 true, false, true),
2980 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
2981 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
2982 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
2983 &vmw_cmd_dx_set_index_buffer, true, false, true),
2984 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
2985 &vmw_cmd_dx_set_rendertargets, true, false, true),
2986 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
2987 true, false, true),
2988 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, &vmw_cmd_dx_cid_check,
2989 true, false, true),
2990 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
2991 &vmw_cmd_dx_cid_check,
2992 true, false, true),
2993 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_invalid,
2994 true, false, true),
2995 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_invalid,
2996 true, false, true),
2997 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_invalid,
2998 true, false, true),
2999 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_invalid,
3000 true, false, true),
3001 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_invalid,
3002 true, false, true),
3003 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3004 true, false, true),
3005 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
3006 true, false, true),
3007 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3008 true, false, true),
3009 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3010 true, false, true),
3011 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3012 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3013 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3014 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3015 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, &vmw_cmd_invalid,
3016 true, false, true),
3017 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3018 true, false, true),
3019 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
3020 true, false, true),
3021 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3022 &vmw_cmd_dx_check_subresource, true, false, true),
3023 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3024 &vmw_cmd_dx_check_subresource, true, false, true),
3025 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3026 &vmw_cmd_dx_check_subresource, true, false, true),
3027 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3028 &vmw_cmd_dx_view_define, true, false, true),
3029 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3030 &vmw_cmd_dx_view_remove, true, false, true),
3031 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3032 &vmw_cmd_dx_view_define, true, false, true),
3033 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3034 &vmw_cmd_dx_view_remove, true, false, true),
3035 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3036 &vmw_cmd_dx_view_define, true, false, true),
3037 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3038 &vmw_cmd_dx_view_remove, true, false, true),
3039 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3040 &vmw_cmd_dx_so_define, true, false, true),
3041 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3042 &vmw_cmd_dx_cid_check, true, false, true),
3043 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3044 &vmw_cmd_dx_so_define, true, false, true),
3045 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3046 &vmw_cmd_dx_cid_check, true, false, true),
3047 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3048 &vmw_cmd_dx_so_define, true, false, true),
3049 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3050 &vmw_cmd_dx_cid_check, true, false, true),
3051 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3052 &vmw_cmd_dx_so_define, true, false, true),
3053 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3054 &vmw_cmd_dx_cid_check, true, false, true),
3055 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3056 &vmw_cmd_dx_so_define, true, false, true),
3057 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3058 &vmw_cmd_dx_cid_check, true, false, true),
3059 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3060 &vmw_cmd_dx_define_shader, true, false, true),
3061 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3062 &vmw_cmd_dx_destroy_shader, true, false, true),
3063 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3064 &vmw_cmd_dx_bind_shader, true, false, true),
3065 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3066 &vmw_cmd_dx_so_define, true, false, true),
3067 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3068 &vmw_cmd_dx_cid_check, true, false, true),
3069 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_invalid,
3070 true, false, true),
3071 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3072 &vmw_cmd_dx_cid_check, true, false, true),
3073 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3074 &vmw_cmd_dx_cid_check, true, false, true),
fb1d9738
JB
3075};
3076
3077static int vmw_cmd_check(struct vmw_private *dev_priv,
3078 struct vmw_sw_context *sw_context,
3079 void *buf, uint32_t *size)
3080{
3081 uint32_t cmd_id;
7a73ba74 3082 uint32_t size_remaining = *size;
fb1d9738
JB
3083 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3084 int ret;
c373d4ea
TH
3085 const struct vmw_cmd_entry *entry;
3086 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
fb1d9738 3087
b9eb1a61 3088 cmd_id = ((uint32_t *)buf)[0];
4084fb89
JB
3089 /* Handle any none 3D commands */
3090 if (unlikely(cmd_id < SVGA_CMD_MAX))
3091 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3092
fb1d9738 3093
b9eb1a61
TH
3094 cmd_id = header->id;
3095 *size = header->size + sizeof(SVGA3dCmdHeader);
fb1d9738
JB
3096
3097 cmd_id -= SVGA_3D_CMD_BASE;
7a73ba74 3098 if (unlikely(*size > size_remaining))
c373d4ea 3099 goto out_invalid;
7a73ba74 3100
fb1d9738 3101 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
c373d4ea
TH
3102 goto out_invalid;
3103
3104 entry = &vmw_cmd_entries[cmd_id];
36e952c1
TH
3105 if (unlikely(!entry->func))
3106 goto out_invalid;
3107
c373d4ea
TH
3108 if (unlikely(!entry->user_allow && !sw_context->kernel))
3109 goto out_privileged;
3110
3111 if (unlikely(entry->gb_disable && gb))
3112 goto out_old;
3113
3114 if (unlikely(entry->gb_enable && !gb))
3115 goto out_new;
fb1d9738 3116
c373d4ea 3117 ret = entry->func(dev_priv, sw_context, header);
fb1d9738 3118 if (unlikely(ret != 0))
c373d4ea 3119 goto out_invalid;
fb1d9738
JB
3120
3121 return 0;
c373d4ea
TH
3122out_invalid:
3123 DRM_ERROR("Invalid SVGA3D command: %d\n",
3124 cmd_id + SVGA_3D_CMD_BASE);
3125 return -EINVAL;
3126out_privileged:
3127 DRM_ERROR("Privileged SVGA3D command: %d\n",
3128 cmd_id + SVGA_3D_CMD_BASE);
3129 return -EPERM;
3130out_old:
3131 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3132 cmd_id + SVGA_3D_CMD_BASE);
3133 return -EINVAL;
3134out_new:
3135 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
fb1d9738
JB
3136 cmd_id + SVGA_3D_CMD_BASE);
3137 return -EINVAL;
3138}
3139
3140static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3141 struct vmw_sw_context *sw_context,
922ade0d 3142 void *buf,
be38ab6e 3143 uint32_t size)
fb1d9738
JB
3144{
3145 int32_t cur_size = size;
3146 int ret;
3147
c0951b79
TH
3148 sw_context->buf_start = buf;
3149
fb1d9738 3150 while (cur_size > 0) {
7a73ba74 3151 size = cur_size;
fb1d9738
JB
3152 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3153 if (unlikely(ret != 0))
3154 return ret;
3155 buf = (void *)((unsigned long) buf + size);
3156 cur_size -= size;
3157 }
3158
3159 if (unlikely(cur_size != 0)) {
3160 DRM_ERROR("Command verifier out of sync.\n");
3161 return -EINVAL;
3162 }
3163
3164 return 0;
3165}
3166
3167static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3168{
3169 sw_context->cur_reloc = 0;
3170}
3171
3172static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3173{
3174 uint32_t i;
3175 struct vmw_relocation *reloc;
3176 struct ttm_validate_buffer *validate;
3177 struct ttm_buffer_object *bo;
3178
3179 for (i = 0; i < sw_context->cur_reloc; ++i) {
3180 reloc = &sw_context->relocs[i];
c0951b79 3181 validate = &sw_context->val_bufs[reloc->index].base;
fb1d9738 3182 bo = validate->bo;
c0951b79
TH
3183 switch (bo->mem.mem_type) {
3184 case TTM_PL_VRAM:
135cba0d
TH
3185 reloc->location->offset += bo->offset;
3186 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
c0951b79
TH
3187 break;
3188 case VMW_PL_GMR:
135cba0d 3189 reloc->location->gmrId = bo->mem.start;
c0951b79 3190 break;
ddcda24e
TH
3191 case VMW_PL_MOB:
3192 *reloc->mob_loc = bo->mem.start;
3193 break;
c0951b79
TH
3194 default:
3195 BUG();
3196 }
fb1d9738
JB
3197 }
3198 vmw_free_relocations(sw_context);
3199}
3200
c0951b79
TH
3201/**
3202 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3203 * all resources referenced by it.
3204 *
3205 * @list: The resource list.
3206 */
d80efd5c
TH
3207static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3208 struct list_head *list)
c0951b79
TH
3209{
3210 struct vmw_resource_val_node *val, *val_next;
3211
3212 /*
3213 * Drop references to resources held during command submission.
3214 */
3215
3216 list_for_each_entry_safe(val, val_next, list, head) {
3217 list_del_init(&val->head);
3218 vmw_resource_unreference(&val->res);
d80efd5c
TH
3219
3220 if (val->staged_bindings) {
3221 if (val->staged_bindings != sw_context->staged_bindings)
3222 vmw_binding_state_free(val->staged_bindings);
3223 else
3224 sw_context->staged_bindings_inuse = false;
3225 val->staged_bindings = NULL;
3226 }
3227
c0951b79
TH
3228 kfree(val);
3229 }
3230}
3231
fb1d9738
JB
3232static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3233{
c0951b79
TH
3234 struct vmw_validate_buffer *entry, *next;
3235 struct vmw_resource_val_node *val;
fb1d9738 3236
be38ab6e
TH
3237 /*
3238 * Drop references to DMA buffers held during command submission.
3239 */
fb1d9738 3240 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
c0951b79
TH
3241 base.head) {
3242 list_del(&entry->base.head);
3243 ttm_bo_unref(&entry->base.bo);
3244 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
fb1d9738
JB
3245 sw_context->cur_val_buf--;
3246 }
3247 BUG_ON(sw_context->cur_val_buf != 0);
be38ab6e 3248
c0951b79
TH
3249 list_for_each_entry(val, &sw_context->resource_list, head)
3250 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
fb1d9738
JB
3251}
3252
1a4b172a
TH
3253int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3254 struct ttm_buffer_object *bo,
3255 bool interruptible,
3256 bool validate_as_mob)
fb1d9738 3257{
459d0fa7
TH
3258 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3259 base);
fb1d9738
JB
3260 int ret;
3261
459d0fa7 3262 if (vbo->pin_count > 0)
e2fa3a76
TH
3263 return 0;
3264
96c5f0df 3265 if (validate_as_mob)
1a4b172a
TH
3266 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3267 false);
96c5f0df 3268
8ba5152a 3269 /**
135cba0d
TH
3270 * Put BO in VRAM if there is space, otherwise as a GMR.
3271 * If there is no space in VRAM and GMR ids are all used up,
3272 * start evicting GMRs to make room. If the DMA buffer can't be
3273 * used as a GMR, this will return -ENOMEM.
8ba5152a
TH
3274 */
3275
1a4b172a
TH
3276 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3277 false);
3d3a5b32 3278 if (likely(ret == 0 || ret == -ERESTARTSYS))
fb1d9738
JB
3279 return ret;
3280
8ba5152a
TH
3281 /**
3282 * If that failed, try VRAM again, this time evicting
3283 * previous contents.
3284 */
fb1d9738 3285
1a4b172a 3286 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
fb1d9738
JB
3287 return ret;
3288}
3289
fb1d9738
JB
3290static int vmw_validate_buffers(struct vmw_private *dev_priv,
3291 struct vmw_sw_context *sw_context)
3292{
c0951b79 3293 struct vmw_validate_buffer *entry;
fb1d9738
JB
3294 int ret;
3295
c0951b79 3296 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
96c5f0df 3297 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1a4b172a 3298 true,
96c5f0df 3299 entry->validate_as_mob);
fb1d9738
JB
3300 if (unlikely(ret != 0))
3301 return ret;
3302 }
3303 return 0;
3304}
3305
be38ab6e
TH
3306static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3307 uint32_t size)
3308{
3309 if (likely(sw_context->cmd_bounce_size >= size))
3310 return 0;
3311
3312 if (sw_context->cmd_bounce_size == 0)
3313 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3314
3315 while (sw_context->cmd_bounce_size < size) {
3316 sw_context->cmd_bounce_size =
3317 PAGE_ALIGN(sw_context->cmd_bounce_size +
3318 (sw_context->cmd_bounce_size >> 1));
3319 }
3320
3321 if (sw_context->cmd_bounce != NULL)
3322 vfree(sw_context->cmd_bounce);
3323
3324 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3325
3326 if (sw_context->cmd_bounce == NULL) {
3327 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3328 sw_context->cmd_bounce_size = 0;
3329 return -ENOMEM;
3330 }
3331
3332 return 0;
3333}
3334
ae2a1040
TH
3335/**
3336 * vmw_execbuf_fence_commands - create and submit a command stream fence
3337 *
3338 * Creates a fence object and submits a command stream marker.
3339 * If this fails for some reason, We sync the fifo and return NULL.
3340 * It is then safe to fence buffers with a NULL pointer.
6070e9fa
JB
3341 *
3342 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3343 * a userspace handle if @p_handle is not NULL, otherwise not.
ae2a1040
TH
3344 */
3345
3346int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3347 struct vmw_private *dev_priv,
3348 struct vmw_fence_obj **p_fence,
3349 uint32_t *p_handle)
3350{
3351 uint32_t sequence;
3352 int ret;
3353 bool synced = false;
3354
6070e9fa
JB
3355 /* p_handle implies file_priv. */
3356 BUG_ON(p_handle != NULL && file_priv == NULL);
ae2a1040
TH
3357
3358 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3359 if (unlikely(ret != 0)) {
3360 DRM_ERROR("Fence submission error. Syncing.\n");
3361 synced = true;
3362 }
3363
3364 if (p_handle != NULL)
3365 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
c060a4e1 3366 sequence, p_fence, p_handle);
ae2a1040 3367 else
c060a4e1 3368 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
ae2a1040
TH
3369
3370 if (unlikely(ret != 0 && !synced)) {
3371 (void) vmw_fallback_wait(dev_priv, false, false,
3372 sequence, false,
3373 VMW_FENCE_WAIT_TIMEOUT);
3374 *p_fence = NULL;
3375 }
3376
3377 return 0;
3378}
3379
8bf445ce
TH
3380/**
3381 * vmw_execbuf_copy_fence_user - copy fence object information to
3382 * user-space.
3383 *
3384 * @dev_priv: Pointer to a vmw_private struct.
3385 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3386 * @ret: Return value from fence object creation.
3387 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3388 * which the information should be copied.
3389 * @fence: Pointer to the fenc object.
3390 * @fence_handle: User-space fence handle.
3391 *
3392 * This function copies fence information to user-space. If copying fails,
3393 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3394 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3395 * the error will hopefully be detected.
3396 * Also if copying fails, user-space will be unable to signal the fence
3397 * object so we wait for it immediately, and then unreference the
3398 * user-space reference.
3399 */
57c5ee79 3400void
8bf445ce
TH
3401vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3402 struct vmw_fpriv *vmw_fp,
3403 int ret,
3404 struct drm_vmw_fence_rep __user *user_fence_rep,
3405 struct vmw_fence_obj *fence,
3406 uint32_t fence_handle)
3407{
3408 struct drm_vmw_fence_rep fence_rep;
3409
3410 if (user_fence_rep == NULL)
3411 return;
3412
80d9b24a
DC
3413 memset(&fence_rep, 0, sizeof(fence_rep));
3414
8bf445ce
TH
3415 fence_rep.error = ret;
3416 if (ret == 0) {
3417 BUG_ON(fence == NULL);
3418
3419 fence_rep.handle = fence_handle;
2298e804 3420 fence_rep.seqno = fence->base.seqno;
8bf445ce
TH
3421 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3422 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3423 }
3424
3425 /*
3426 * copy_to_user errors will be detected by user space not
3427 * seeing fence_rep::error filled in. Typically
3428 * user-space would have pre-set that member to -EFAULT.
3429 */
3430 ret = copy_to_user(user_fence_rep, &fence_rep,
3431 sizeof(fence_rep));
3432
3433 /*
3434 * User-space lost the fence object. We need to sync
3435 * and unreference the handle.
3436 */
3437 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3438 ttm_ref_object_base_unref(vmw_fp->tfile,
3439 fence_handle, TTM_REF_USAGE);
3440 DRM_ERROR("Fence copy error. Syncing.\n");
c060a4e1 3441 (void) vmw_fence_obj_wait(fence, false, false,
8bf445ce
TH
3442 VMW_FENCE_WAIT_TIMEOUT);
3443 }
3444}
3445
3eab3d9e
TH
3446/**
3447 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3448 * the fifo.
3449 *
3450 * @dev_priv: Pointer to a device private structure.
3451 * @kernel_commands: Pointer to the unpatched command batch.
3452 * @command_size: Size of the unpatched command batch.
3453 * @sw_context: Structure holding the relocation lists.
3454 *
3455 * Side effects: If this function returns 0, then the command batch
3456 * pointed to by @kernel_commands will have been modified.
3457 */
3458static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3459 void *kernel_commands,
3460 u32 command_size,
3461 struct vmw_sw_context *sw_context)
3462{
d80efd5c 3463 void *cmd;
3eab3d9e 3464
d80efd5c
TH
3465 if (sw_context->dx_ctx_node)
3466 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3467 sw_context->dx_ctx_node->res->id);
3468 else
3469 cmd = vmw_fifo_reserve(dev_priv, command_size);
3eab3d9e
TH
3470 if (!cmd) {
3471 DRM_ERROR("Failed reserving fifo space for commands.\n");
3472 return -ENOMEM;
3473 }
18e4a466 3474
3eab3d9e
TH
3475 vmw_apply_relocations(sw_context);
3476 memcpy(cmd, kernel_commands, command_size);
3477 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3478 vmw_resource_relocations_free(&sw_context->res_relocations);
3479 vmw_fifo_commit(dev_priv, command_size);
3480
3481 return 0;
3482}
3483
3484/**
3485 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3486 * the command buffer manager.
3487 *
3488 * @dev_priv: Pointer to a device private structure.
3489 * @header: Opaque handle to the command buffer allocation.
3490 * @command_size: Size of the unpatched command batch.
3491 * @sw_context: Structure holding the relocation lists.
3492 *
3493 * Side effects: If this function returns 0, then the command buffer
3494 * represented by @header will have been modified.
3495 */
3496static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3497 struct vmw_cmdbuf_header *header,
3498 u32 command_size,
3499 struct vmw_sw_context *sw_context)
3500{
d80efd5c
TH
3501 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3502 SVGA3D_INVALID_ID);
3eab3d9e 3503 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
d80efd5c 3504 id, false, header);
3eab3d9e
TH
3505
3506 vmw_apply_relocations(sw_context);
3507 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3508 vmw_resource_relocations_free(&sw_context->res_relocations);
3509 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3510
3511 return 0;
3512}
3513
3514/**
3515 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3516 * submission using a command buffer.
3517 *
3518 * @dev_priv: Pointer to a device private structure.
3519 * @user_commands: User-space pointer to the commands to be submitted.
3520 * @command_size: Size of the unpatched command batch.
3521 * @header: Out parameter returning the opaque pointer to the command buffer.
3522 *
3523 * This function checks whether we can use the command buffer manager for
3524 * submission and if so, creates a command buffer of suitable size and
3525 * copies the user data into that buffer.
3526 *
3527 * On successful return, the function returns a pointer to the data in the
3528 * command buffer and *@header is set to non-NULL.
3529 * If command buffers could not be used, the function will return the value
3530 * of @kernel_commands on function call. That value may be NULL. In that case,
3531 * the value of *@header will be set to NULL.
3532 * If an error is encountered, the function will return a pointer error value.
3533 * If the function is interrupted by a signal while sleeping, it will return
3534 * -ERESTARTSYS casted to a pointer error value.
3535 */
b9eb1a61
TH
3536static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3537 void __user *user_commands,
3538 void *kernel_commands,
3539 u32 command_size,
3540 struct vmw_cmdbuf_header **header)
3eab3d9e
TH
3541{
3542 size_t cmdbuf_size;
3543 int ret;
3544
3545 *header = NULL;
3546 if (!dev_priv->cman || kernel_commands)
3547 return kernel_commands;
3548
3549 if (command_size > SVGA_CB_MAX_SIZE) {
3550 DRM_ERROR("Command buffer is too large.\n");
3551 return ERR_PTR(-EINVAL);
3552 }
3553
3554 /* If possible, add a little space for fencing. */
3555 cmdbuf_size = command_size + 512;
3556 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3557 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3558 true, header);
3559 if (IS_ERR(kernel_commands))
3560 return kernel_commands;
3561
3562 ret = copy_from_user(kernel_commands, user_commands,
3563 command_size);
3564 if (ret) {
3565 DRM_ERROR("Failed copying commands.\n");
3566 vmw_cmdbuf_header_free(*header);
3567 *header = NULL;
3568 return ERR_PTR(-EFAULT);
3569 }
3570
3571 return kernel_commands;
3572}
18e4a466 3573
d80efd5c
TH
3574static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3575 struct vmw_sw_context *sw_context,
3576 uint32_t handle)
3577{
3578 struct vmw_resource_val_node *ctx_node;
3579 struct vmw_resource *res;
3580 int ret;
3581
3582 if (handle == SVGA3D_INVALID_ID)
3583 return 0;
3584
3585 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3586 handle, user_context_converter,
3587 &res);
3588 if (unlikely(ret != 0)) {
3589 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3590 (unsigned) handle);
3591 return ret;
3592 }
3593
3594 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3595 if (unlikely(ret != 0))
3596 goto out_err;
3597
3598 sw_context->dx_ctx_node = ctx_node;
3599 sw_context->man = vmw_context_res_man(res);
3600out_err:
3601 vmw_resource_unreference(&res);
3602 return ret;
3603}
3604
922ade0d
TH
3605int vmw_execbuf_process(struct drm_file *file_priv,
3606 struct vmw_private *dev_priv,
3607 void __user *user_commands,
3608 void *kernel_commands,
3609 uint32_t command_size,
3610 uint64_t throttle_us,
d80efd5c 3611 uint32_t dx_context_handle,
bb1bd2f4
JB
3612 struct drm_vmw_fence_rep __user *user_fence_rep,
3613 struct vmw_fence_obj **out_fence)
fb1d9738 3614{
fb1d9738 3615 struct vmw_sw_context *sw_context = &dev_priv->ctx;
bb1bd2f4 3616 struct vmw_fence_obj *fence = NULL;
c0951b79
TH
3617 struct vmw_resource *error_resource;
3618 struct list_head resource_list;
3eab3d9e 3619 struct vmw_cmdbuf_header *header;
ecff665f 3620 struct ww_acquire_ctx ticket;
ae2a1040 3621 uint32_t handle;
922ade0d 3622 int ret;
fb1d9738 3623
3eab3d9e
TH
3624 if (throttle_us) {
3625 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3626 throttle_us);
3627
3628 if (ret)
3629 return ret;
3630 }
3631
3632 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3633 kernel_commands, command_size,
3634 &header);
3635 if (IS_ERR(kernel_commands))
3636 return PTR_ERR(kernel_commands);
3637
922ade0d 3638 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3eab3d9e
TH
3639 if (ret) {
3640 ret = -ERESTARTSYS;
3641 goto out_free_header;
3642 }
fb1d9738 3643
3eab3d9e 3644 sw_context->kernel = false;
922ade0d 3645 if (kernel_commands == NULL) {
922ade0d
TH
3646 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3647 if (unlikely(ret != 0))
3648 goto out_unlock;
fb1d9738 3649
fb1d9738 3650
922ade0d
TH
3651 ret = copy_from_user(sw_context->cmd_bounce,
3652 user_commands, command_size);
3653
3654 if (unlikely(ret != 0)) {
3655 ret = -EFAULT;
3656 DRM_ERROR("Failed copying commands.\n");
3657 goto out_unlock;
3658 }
3659 kernel_commands = sw_context->cmd_bounce;
3eab3d9e 3660 } else if (!header)
922ade0d 3661 sw_context->kernel = true;
fb1d9738 3662
d5bde956 3663 sw_context->fp = vmw_fpriv(file_priv);
fb1d9738
JB
3664 sw_context->cur_reloc = 0;
3665 sw_context->cur_val_buf = 0;
f18c8840 3666 INIT_LIST_HEAD(&sw_context->resource_list);
d80efd5c 3667 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
e2fa3a76 3668 sw_context->cur_query_bo = dev_priv->pinned_bo;
c0951b79
TH
3669 sw_context->last_query_ctx = NULL;
3670 sw_context->needs_post_query_barrier = false;
d80efd5c 3671 sw_context->dx_ctx_node = NULL;
c0951b79 3672 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
fb1d9738 3673 INIT_LIST_HEAD(&sw_context->validate_nodes);
c0951b79 3674 INIT_LIST_HEAD(&sw_context->res_relocations);
d80efd5c
TH
3675 if (sw_context->staged_bindings)
3676 vmw_binding_state_reset(sw_context->staged_bindings);
3677
c0951b79
TH
3678 if (!sw_context->res_ht_initialized) {
3679 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3680 if (unlikely(ret != 0))
3681 goto out_unlock;
3682 sw_context->res_ht_initialized = true;
3683 }
18e4a466 3684 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
c0951b79 3685 INIT_LIST_HEAD(&resource_list);
d80efd5c
TH
3686 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3687 if (unlikely(ret != 0)) {
3688 list_splice_init(&sw_context->ctx_resource_list,
3689 &sw_context->resource_list);
3690 goto out_err_nores;
3691 }
3692
922ade0d
TH
3693 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3694 command_size);
fb1d9738 3695 if (unlikely(ret != 0))
cf5e3413 3696 goto out_err_nores;
be38ab6e 3697
d80efd5c
TH
3698 list_splice_init(&sw_context->ctx_resource_list,
3699 &sw_context->resource_list);
c0951b79
TH
3700 ret = vmw_resources_reserve(sw_context);
3701 if (unlikely(ret != 0))
cf5e3413 3702 goto out_err_nores;
c0951b79 3703
aa35071c
CK
3704 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
3705 true, NULL);
fb1d9738 3706 if (unlikely(ret != 0))
d80efd5c 3707 goto out_err_nores;
fb1d9738
JB
3708
3709 ret = vmw_validate_buffers(dev_priv, sw_context);
3710 if (unlikely(ret != 0))
3711 goto out_err;
3712
c0951b79
TH
3713 ret = vmw_resources_validate(sw_context);
3714 if (unlikely(ret != 0))
3715 goto out_err;
1925d456 3716
173fb7d4
TH
3717 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3718 if (unlikely(ret != 0)) {
3719 ret = -ERESTARTSYS;
3720 goto out_err;
3721 }
3722
30f82d81
TH
3723 if (dev_priv->has_mob) {
3724 ret = vmw_rebind_contexts(sw_context);
3725 if (unlikely(ret != 0))
b2ad9881 3726 goto out_unlock_binding;
30f82d81
TH
3727 }
3728
3eab3d9e
TH
3729 if (!header) {
3730 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3731 command_size, sw_context);
3732 } else {
3733 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3734 sw_context);
3735 header = NULL;
1925d456 3736 }
d80efd5c 3737 mutex_unlock(&dev_priv->binding_mutex);
3eab3d9e 3738 if (ret)
d80efd5c 3739 goto out_err;
fb1d9738 3740
e2fa3a76 3741 vmw_query_bo_switch_commit(dev_priv, sw_context);
ae2a1040
TH
3742 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
3743 &fence,
3744 (user_fence_rep) ? &handle : NULL);
fb1d9738
JB
3745 /*
3746 * This error is harmless, because if fence submission fails,
ae2a1040
TH
3747 * vmw_fifo_send_fence will sync. The error will be propagated to
3748 * user-space in @fence_rep
fb1d9738
JB
3749 */
3750
3751 if (ret != 0)
3752 DRM_ERROR("Fence submission error. Syncing.\n");
3753
d80efd5c
TH
3754 vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
3755 false);
173fb7d4 3756
ecff665f 3757 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
ae2a1040 3758 (void *) fence);
fb1d9738 3759
c0951b79
TH
3760 if (unlikely(dev_priv->pinned_bo != NULL &&
3761 !dev_priv->query_cid_valid))
3762 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
3763
ae2a1040 3764 vmw_clear_validations(sw_context);
8bf445ce
TH
3765 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
3766 user_fence_rep, fence, handle);
fb1d9738 3767
bb1bd2f4
JB
3768 /* Don't unreference when handing fence out */
3769 if (unlikely(out_fence != NULL)) {
3770 *out_fence = fence;
3771 fence = NULL;
3772 } else if (likely(fence != NULL)) {
ae2a1040 3773 vmw_fence_obj_unreference(&fence);
bb1bd2f4 3774 }
fb1d9738 3775
c0951b79 3776 list_splice_init(&sw_context->resource_list, &resource_list);
18e4a466 3777 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
922ade0d 3778 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
3779
3780 /*
3781 * Unreference resources outside of the cmdbuf_mutex to
3782 * avoid deadlocks in resource destruction paths.
3783 */
d80efd5c 3784 vmw_resource_list_unreference(sw_context, &resource_list);
c0951b79 3785
fb1d9738 3786 return 0;
922ade0d 3787
173fb7d4
TH
3788out_unlock_binding:
3789 mutex_unlock(&dev_priv->binding_mutex);
fb1d9738 3790out_err:
ecff665f 3791 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
cf5e3413 3792out_err_nores:
d80efd5c
TH
3793 vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
3794 true);
cf5e3413
TH
3795 vmw_resource_relocations_free(&sw_context->res_relocations);
3796 vmw_free_relocations(sw_context);
fb1d9738 3797 vmw_clear_validations(sw_context);
c0951b79
TH
3798 if (unlikely(dev_priv->pinned_bo != NULL &&
3799 !dev_priv->query_cid_valid))
3800 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
fb1d9738 3801out_unlock:
c0951b79
TH
3802 list_splice_init(&sw_context->resource_list, &resource_list);
3803 error_resource = sw_context->error_resource;
3804 sw_context->error_resource = NULL;
18e4a466 3805 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
fb1d9738 3806 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
3807
3808 /*
3809 * Unreference resources outside of the cmdbuf_mutex to
3810 * avoid deadlocks in resource destruction paths.
3811 */
d80efd5c 3812 vmw_resource_list_unreference(sw_context, &resource_list);
c0951b79
TH
3813 if (unlikely(error_resource != NULL))
3814 vmw_resource_unreference(&error_resource);
3eab3d9e
TH
3815out_free_header:
3816 if (header)
3817 vmw_cmdbuf_header_free(header);
c0951b79 3818
922ade0d
TH
3819 return ret;
3820}
3821
e2fa3a76
TH
3822/**
3823 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3824 *
3825 * @dev_priv: The device private structure.
3826 *
3827 * This function is called to idle the fifo and unpin the query buffer
3828 * if the normal way to do this hits an error, which should typically be
3829 * extremely rare.
3830 */
3831static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3832{
3833 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
3834
3835 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
459d0fa7
TH
3836 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3837 if (dev_priv->dummy_query_bo_pinned) {
3838 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3839 dev_priv->dummy_query_bo_pinned = false;
3840 }
e2fa3a76
TH
3841}
3842
3843
3844/**
c0951b79 3845 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
e2fa3a76
TH
3846 * query bo.
3847 *
3848 * @dev_priv: The device private structure.
c0951b79
TH
3849 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
3850 * _after_ a query barrier that flushes all queries touching the current
3851 * buffer pointed to by @dev_priv->pinned_bo
e2fa3a76
TH
3852 *
3853 * This function should be used to unpin the pinned query bo, or
3854 * as a query barrier when we need to make sure that all queries have
3855 * finished before the next fifo command. (For example on hardware
3856 * context destructions where the hardware may otherwise leak unfinished
3857 * queries).
3858 *
3859 * This function does not return any failure codes, but make attempts
3860 * to do safe unpinning in case of errors.
3861 *
3862 * The function will synchronize on the previous query barrier, and will
3863 * thus not finish until that barrier has executed.
c0951b79
TH
3864 *
3865 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
3866 * before calling this function.
e2fa3a76 3867 */
c0951b79
TH
3868void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
3869 struct vmw_fence_obj *fence)
e2fa3a76
TH
3870{
3871 int ret = 0;
3872 struct list_head validate_list;
3873 struct ttm_validate_buffer pinned_val, query_val;
c0951b79 3874 struct vmw_fence_obj *lfence = NULL;
ecff665f 3875 struct ww_acquire_ctx ticket;
e2fa3a76
TH
3876
3877 if (dev_priv->pinned_bo == NULL)
3878 goto out_unlock;
3879
e2fa3a76
TH
3880 INIT_LIST_HEAD(&validate_list);
3881
459d0fa7 3882 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
ae9c0af2 3883 pinned_val.shared = false;
e2fa3a76
TH
3884 list_add_tail(&pinned_val.head, &validate_list);
3885
459d0fa7 3886 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
ae9c0af2 3887 query_val.shared = false;
e2fa3a76
TH
3888 list_add_tail(&query_val.head, &validate_list);
3889
aa35071c
CK
3890 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
3891 false, NULL);
e2fa3a76
TH
3892 if (unlikely(ret != 0)) {
3893 vmw_execbuf_unpin_panic(dev_priv);
3894 goto out_no_reserve;
3895 }
3896
c0951b79
TH
3897 if (dev_priv->query_cid_valid) {
3898 BUG_ON(fence != NULL);
3899 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
3900 if (unlikely(ret != 0)) {
3901 vmw_execbuf_unpin_panic(dev_priv);
3902 goto out_no_emit;
3903 }
3904 dev_priv->query_cid_valid = false;
e2fa3a76
TH
3905 }
3906
459d0fa7
TH
3907 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3908 if (dev_priv->dummy_query_bo_pinned) {
3909 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3910 dev_priv->dummy_query_bo_pinned = false;
3911 }
c0951b79
TH
3912 if (fence == NULL) {
3913 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
3914 NULL);
3915 fence = lfence;
3916 }
ecff665f 3917 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
c0951b79
TH
3918 if (lfence != NULL)
3919 vmw_fence_obj_unreference(&lfence);
e2fa3a76
TH
3920
3921 ttm_bo_unref(&query_val.bo);
3922 ttm_bo_unref(&pinned_val.bo);
459d0fa7
TH
3923 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
3924 DRM_INFO("Dummy query bo pin count: %d\n",
3925 dev_priv->dummy_query_bo->pin_count);
e2fa3a76
TH
3926
3927out_unlock:
e2fa3a76
TH
3928 return;
3929
3930out_no_emit:
ecff665f 3931 ttm_eu_backoff_reservation(&ticket, &validate_list);
e2fa3a76
TH
3932out_no_reserve:
3933 ttm_bo_unref(&query_val.bo);
3934 ttm_bo_unref(&pinned_val.bo);
459d0fa7 3935 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
c0951b79
TH
3936}
3937
3938/**
3939 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
3940 * query bo.
3941 *
3942 * @dev_priv: The device private structure.
3943 *
3944 * This function should be used to unpin the pinned query bo, or
3945 * as a query barrier when we need to make sure that all queries have
3946 * finished before the next fifo command. (For example on hardware
3947 * context destructions where the hardware may otherwise leak unfinished
3948 * queries).
3949 *
3950 * This function does not return any failure codes, but make attempts
3951 * to do safe unpinning in case of errors.
3952 *
3953 * The function will synchronize on the previous query barrier, and will
3954 * thus not finish until that barrier has executed.
3955 */
3956void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
3957{
3958 mutex_lock(&dev_priv->cmdbuf_mutex);
3959 if (dev_priv->query_cid_valid)
3960 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
e2fa3a76
TH
3961 mutex_unlock(&dev_priv->cmdbuf_mutex);
3962}
3963
d80efd5c
TH
3964int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
3965 struct drm_file *file_priv, size_t size)
922ade0d
TH
3966{
3967 struct vmw_private *dev_priv = vmw_priv(dev);
d80efd5c 3968 struct drm_vmw_execbuf_arg arg;
922ade0d 3969 int ret;
d80efd5c
TH
3970 static const size_t copy_offset[] = {
3971 offsetof(struct drm_vmw_execbuf_arg, context_handle),
3972 sizeof(struct drm_vmw_execbuf_arg)};
3973
3974 if (unlikely(size < copy_offset[0])) {
3975 DRM_ERROR("Invalid command size, ioctl %d\n",
3976 DRM_VMW_EXECBUF);
3977 return -EINVAL;
3978 }
3979
3980 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
3981 return -EFAULT;
922ade0d
TH
3982
3983 /*
d80efd5c 3984 * Extend the ioctl argument while
922ade0d
TH
3985 * maintaining backwards compatibility:
3986 * We take different code paths depending on the value of
d80efd5c 3987 * arg.version.
922ade0d
TH
3988 */
3989
d80efd5c
TH
3990 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
3991 arg.version == 0)) {
922ade0d 3992 DRM_ERROR("Incorrect execbuf version.\n");
922ade0d
TH
3993 return -EINVAL;
3994 }
3995
d80efd5c
TH
3996 if (arg.version > 1 &&
3997 copy_from_user(&arg.context_handle,
3998 (void __user *) (data + copy_offset[0]),
3999 copy_offset[arg.version - 1] -
4000 copy_offset[0]) != 0)
4001 return -EFAULT;
4002
4003 switch (arg.version) {
4004 case 1:
4005 arg.context_handle = (uint32_t) -1;
4006 break;
4007 case 2:
4008 if (arg.pad64 != 0) {
4009 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4010 return -EINVAL;
4011 }
4012 break;
4013 default:
4014 break;
4015 }
4016
294adf7d 4017 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
922ade0d
TH
4018 if (unlikely(ret != 0))
4019 return ret;
4020
4021 ret = vmw_execbuf_process(file_priv, dev_priv,
d80efd5c
TH
4022 (void __user *)(unsigned long)arg.commands,
4023 NULL, arg.command_size, arg.throttle_us,
4024 arg.context_handle,
4025 (void __user *)(unsigned long)arg.fence_rep,
bb1bd2f4 4026 NULL);
5151adb3 4027 ttm_read_unlock(&dev_priv->reservation_sem);
922ade0d 4028 if (unlikely(ret != 0))
5151adb3 4029 return ret;
922ade0d
TH
4030
4031 vmw_kms_cursor_post_execbuf(dev_priv);
4032
5151adb3 4033 return 0;
fb1d9738 4034}