drm/vmwgfx: Detect old user-space drivers and set up legacy emulation v2
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
760285e7
DH
30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
fb1d9738 32
c0951b79
TH
33#define VMW_RES_HT_ORDER 12
34
35/**
36 * struct vmw_resource_relocation - Relocation info for resources
37 *
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
42 */
43struct vmw_resource_relocation {
44 struct list_head head;
45 const struct vmw_resource *res;
46 unsigned long offset;
47};
48
49/**
50 * struct vmw_resource_val_node - Validation info for resources
51 *
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
b5c3b1a6
TH
57 * @staged_bindings: If @res is a context, tracks bindings set up during
58 * the command batch. Otherwise NULL.
c0951b79
TH
59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60 * @first_usage: Set to true the first time the resource is referenced in
61 * the command stream.
62 * @no_buffer_needed: Resources do not need to allocate buffer backup on
63 * reservation. The command stream will provide one.
64 */
65struct vmw_resource_val_node {
66 struct list_head head;
67 struct drm_hash_item hash;
68 struct vmw_resource *res;
69 struct vmw_dma_buffer *new_backup;
b5c3b1a6 70 struct vmw_ctx_binding_state *staged_bindings;
c0951b79
TH
71 unsigned long new_backup_offset;
72 bool first_usage;
73 bool no_buffer_needed;
74};
75
c373d4ea
TH
76/**
77 * struct vmw_cmd_entry - Describe a command for the verifier
78 *
79 * @user_allow: Whether allowed from the execbuf ioctl.
80 * @gb_disable: Whether disabled if guest-backed objects are available.
81 * @gb_enable: Whether enabled iff guest-backed objects are available.
82 */
83struct vmw_cmd_entry {
84 int (*func) (struct vmw_private *, struct vmw_sw_context *,
85 SVGA3dCmdHeader *);
86 bool user_allow;
87 bool gb_disable;
88 bool gb_enable;
89};
90
91#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 (_gb_disable), (_gb_enable)}
94
c0951b79
TH
95/**
96 * vmw_resource_unreserve - unreserve resources previously reserved for
97 * command submission.
98 *
99 * @list_head: list of resources to unreserve.
100 * @backoff: Whether command submission failed.
101 */
102static void vmw_resource_list_unreserve(struct list_head *list,
103 bool backoff)
104{
105 struct vmw_resource_val_node *val;
106
107 list_for_each_entry(val, list, head) {
108 struct vmw_resource *res = val->res;
109 struct vmw_dma_buffer *new_backup =
110 backoff ? NULL : val->new_backup;
111
173fb7d4
TH
112 /*
113 * Transfer staged context bindings to the
114 * persistent context binding tracker.
115 */
b5c3b1a6 116 if (unlikely(val->staged_bindings)) {
76c7d18b
TH
117 if (!backoff) {
118 vmw_context_binding_state_transfer
119 (val->res, val->staged_bindings);
120 }
b5c3b1a6
TH
121 kfree(val->staged_bindings);
122 val->staged_bindings = NULL;
123 }
c0951b79
TH
124 vmw_resource_unreserve(res, new_backup,
125 val->new_backup_offset);
126 vmw_dmabuf_unreference(&val->new_backup);
127 }
128}
129
130
131/**
132 * vmw_resource_val_add - Add a resource to the software context's
133 * resource list if it's not already on it.
134 *
135 * @sw_context: Pointer to the software context.
136 * @res: Pointer to the resource.
137 * @p_node On successful return points to a valid pointer to a
138 * struct vmw_resource_val_node, if non-NULL on entry.
139 */
140static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141 struct vmw_resource *res,
142 struct vmw_resource_val_node **p_node)
143{
144 struct vmw_resource_val_node *node;
145 struct drm_hash_item *hash;
146 int ret;
147
148 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
149 &hash) == 0)) {
150 node = container_of(hash, struct vmw_resource_val_node, hash);
151 node->first_usage = false;
152 if (unlikely(p_node != NULL))
153 *p_node = node;
154 return 0;
155 }
156
157 node = kzalloc(sizeof(*node), GFP_KERNEL);
158 if (unlikely(node == NULL)) {
159 DRM_ERROR("Failed to allocate a resource validation "
160 "entry.\n");
161 return -ENOMEM;
162 }
163
164 node->hash.key = (unsigned long) res;
165 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166 if (unlikely(ret != 0)) {
167 DRM_ERROR("Failed to initialize a resource validation "
168 "entry.\n");
169 kfree(node);
170 return ret;
171 }
172 list_add_tail(&node->head, &sw_context->resource_list);
173 node->res = vmw_resource_reference(res);
174 node->first_usage = true;
175
176 if (unlikely(p_node != NULL))
177 *p_node = node;
178
179 return 0;
180}
181
182/**
183 * vmw_resource_relocation_add - Add a relocation to the relocation list
184 *
185 * @list: Pointer to head of relocation list.
186 * @res: The resource.
187 * @offset: Offset into the command buffer currently being parsed where the
188 * id that needs fixup is located. Granularity is 4 bytes.
189 */
190static int vmw_resource_relocation_add(struct list_head *list,
191 const struct vmw_resource *res,
192 unsigned long offset)
193{
194 struct vmw_resource_relocation *rel;
195
196 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
197 if (unlikely(rel == NULL)) {
198 DRM_ERROR("Failed to allocate a resource relocation.\n");
199 return -ENOMEM;
200 }
201
202 rel->res = res;
203 rel->offset = offset;
204 list_add_tail(&rel->head, list);
205
206 return 0;
207}
208
209/**
210 * vmw_resource_relocations_free - Free all relocations on a list
211 *
212 * @list: Pointer to the head of the relocation list.
213 */
214static void vmw_resource_relocations_free(struct list_head *list)
215{
216 struct vmw_resource_relocation *rel, *n;
217
218 list_for_each_entry_safe(rel, n, list, head) {
219 list_del(&rel->head);
220 kfree(rel);
221 }
222}
223
224/**
225 * vmw_resource_relocations_apply - Apply all relocations on a list
226 *
227 * @cb: Pointer to the start of the command buffer bein patch. This need
228 * not be the same buffer as the one being parsed when the relocation
229 * list was built, but the contents must be the same modulo the
230 * resource ids.
231 * @list: Pointer to the head of the relocation list.
232 */
233static void vmw_resource_relocations_apply(uint32_t *cb,
234 struct list_head *list)
235{
236 struct vmw_resource_relocation *rel;
237
d5bde956
TH
238 list_for_each_entry(rel, list, head) {
239 if (likely(rel->res != NULL))
240 cb[rel->offset] = rel->res->id;
241 else
242 cb[rel->offset] = SVGA_3D_CMD_NOP;
243 }
c0951b79
TH
244}
245
fb1d9738
JB
246static int vmw_cmd_invalid(struct vmw_private *dev_priv,
247 struct vmw_sw_context *sw_context,
248 SVGA3dCmdHeader *header)
249{
250 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
251}
252
253static int vmw_cmd_ok(struct vmw_private *dev_priv,
254 struct vmw_sw_context *sw_context,
255 SVGA3dCmdHeader *header)
256{
257 return 0;
258}
259
e2fa3a76
TH
260/**
261 * vmw_bo_to_validate_list - add a bo to a validate list
262 *
263 * @sw_context: The software context used for this command submission batch.
264 * @bo: The buffer object to add.
96c5f0df 265 * @validate_as_mob: Validate this buffer as a MOB.
e2fa3a76
TH
266 * @p_val_node: If non-NULL Will be updated with the validate node number
267 * on return.
268 *
269 * Returns -EINVAL if the limit of number of buffer objects per command
270 * submission is reached.
271 */
272static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
273 struct ttm_buffer_object *bo,
96c5f0df 274 bool validate_as_mob,
e2fa3a76
TH
275 uint32_t *p_val_node)
276{
277 uint32_t val_node;
c0951b79 278 struct vmw_validate_buffer *vval_buf;
e2fa3a76 279 struct ttm_validate_buffer *val_buf;
c0951b79
TH
280 struct drm_hash_item *hash;
281 int ret;
e2fa3a76 282
c0951b79
TH
283 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
284 &hash) == 0)) {
285 vval_buf = container_of(hash, struct vmw_validate_buffer,
286 hash);
96c5f0df
TH
287 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
288 DRM_ERROR("Inconsistent buffer usage.\n");
289 return -EINVAL;
290 }
c0951b79
TH
291 val_buf = &vval_buf->base;
292 val_node = vval_buf - sw_context->val_bufs;
293 } else {
294 val_node = sw_context->cur_val_buf;
295 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
296 DRM_ERROR("Max number of DMA buffers per submission "
297 "exceeded.\n");
298 return -EINVAL;
299 }
300 vval_buf = &sw_context->val_bufs[val_node];
301 vval_buf->hash.key = (unsigned long) bo;
302 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
303 if (unlikely(ret != 0)) {
304 DRM_ERROR("Failed to initialize a buffer validation "
305 "entry.\n");
306 return ret;
307 }
308 ++sw_context->cur_val_buf;
309 val_buf = &vval_buf->base;
e2fa3a76 310 val_buf->bo = ttm_bo_reference(bo);
c0951b79 311 val_buf->reserved = false;
e2fa3a76 312 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
96c5f0df 313 vval_buf->validate_as_mob = validate_as_mob;
e2fa3a76
TH
314 }
315
be013367 316 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
e2fa3a76
TH
317
318 if (p_val_node)
319 *p_val_node = val_node;
320
321 return 0;
322}
323
c0951b79
TH
324/**
325 * vmw_resources_reserve - Reserve all resources on the sw_context's
326 * resource list.
327 *
328 * @sw_context: Pointer to the software context.
329 *
330 * Note that since vmware's command submission currently is protected by
331 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
332 * since only a single thread at once will attempt this.
333 */
334static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
fb1d9738 335{
c0951b79 336 struct vmw_resource_val_node *val;
fb1d9738
JB
337 int ret;
338
c0951b79
TH
339 list_for_each_entry(val, &sw_context->resource_list, head) {
340 struct vmw_resource *res = val->res;
fb1d9738 341
c0951b79
TH
342 ret = vmw_resource_reserve(res, val->no_buffer_needed);
343 if (unlikely(ret != 0))
344 return ret;
345
346 if (res->backup) {
347 struct ttm_buffer_object *bo = &res->backup->base;
348
349 ret = vmw_bo_to_validate_list
96c5f0df
TH
350 (sw_context, bo,
351 vmw_resource_needs_backup(res), NULL);
c0951b79
TH
352
353 if (unlikely(ret != 0))
354 return ret;
355 }
fb1d9738 356 }
c0951b79
TH
357 return 0;
358}
fb1d9738 359
c0951b79
TH
360/**
361 * vmw_resources_validate - Validate all resources on the sw_context's
362 * resource list.
363 *
364 * @sw_context: Pointer to the software context.
365 *
366 * Before this function is called, all resource backup buffers must have
367 * been validated.
368 */
369static int vmw_resources_validate(struct vmw_sw_context *sw_context)
370{
371 struct vmw_resource_val_node *val;
372 int ret;
373
374 list_for_each_entry(val, &sw_context->resource_list, head) {
375 struct vmw_resource *res = val->res;
f18c8840 376
c0951b79
TH
377 ret = vmw_resource_validate(res);
378 if (unlikely(ret != 0)) {
379 if (ret != -ERESTARTSYS)
380 DRM_ERROR("Failed to validate resource.\n");
381 return ret;
382 }
383 }
f18c8840 384 return 0;
fb1d9738
JB
385}
386
c0951b79 387/**
d5bde956 388 * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
c0951b79
TH
389 * on the resource validate list unless it's already there.
390 *
391 * @dev_priv: Pointer to a device private structure.
392 * @sw_context: Pointer to the software context.
393 * @res_type: Resource type.
394 * @converter: User-space visisble type specific information.
d5bde956
TH
395 * @id: user-space resource id handle.
396 * @id_loc: Pointer to the location in the command buffer currently being
c0951b79 397 * parsed from where the user-space resource id handle is located.
d5bde956
TH
398 * @p_val: Pointer to pointer to resource validalidation node. Populated
399 * on exit.
c0951b79 400 */
d5bde956
TH
401static int
402vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
403 struct vmw_sw_context *sw_context,
404 enum vmw_res_type res_type,
405 const struct vmw_user_resource_conv *converter,
406 uint32_t id,
407 uint32_t *id_loc,
408 struct vmw_resource_val_node **p_val)
fb1d9738 409{
c0951b79
TH
410 struct vmw_res_cache_entry *rcache =
411 &sw_context->res_cache[res_type];
be38ab6e 412 struct vmw_resource *res;
c0951b79
TH
413 struct vmw_resource_val_node *node;
414 int ret;
be38ab6e 415
d5bde956 416 if (id == SVGA3D_INVALID_ID) {
b5c3b1a6
TH
417 if (p_val)
418 *p_val = NULL;
419 if (res_type == vmw_res_context) {
420 DRM_ERROR("Illegal context invalid id.\n");
421 return -EINVAL;
422 }
7a73ba74 423 return 0;
b5c3b1a6 424 }
7a73ba74 425
c0951b79
TH
426 /*
427 * Fastpath in case of repeated commands referencing the same
428 * resource
429 */
7a73ba74 430
d5bde956 431 if (likely(rcache->valid && id == rcache->handle)) {
c0951b79
TH
432 const struct vmw_resource *res = rcache->res;
433
434 rcache->node->first_usage = false;
435 if (p_val)
436 *p_val = rcache->node;
437
438 return vmw_resource_relocation_add
439 (&sw_context->res_relocations, res,
d5bde956 440 id_loc - sw_context->buf_start);
be38ab6e
TH
441 }
442
c0951b79 443 ret = vmw_user_resource_lookup_handle(dev_priv,
d5bde956
TH
444 sw_context->fp->tfile,
445 id,
c0951b79
TH
446 converter,
447 &res);
5bb39e81 448 if (unlikely(ret != 0)) {
c0951b79 449 DRM_ERROR("Could not find or use resource 0x%08x.\n",
d5bde956 450 (unsigned) id);
c0951b79 451 dump_stack();
5bb39e81
TH
452 return ret;
453 }
454
c0951b79
TH
455 rcache->valid = true;
456 rcache->res = res;
d5bde956 457 rcache->handle = id;
be38ab6e 458
c0951b79
TH
459 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
460 res,
d5bde956 461 id_loc - sw_context->buf_start);
c0951b79
TH
462 if (unlikely(ret != 0))
463 goto out_no_reloc;
464
465 ret = vmw_resource_val_add(sw_context, res, &node);
466 if (unlikely(ret != 0))
467 goto out_no_reloc;
f18c8840 468
c0951b79
TH
469 rcache->node = node;
470 if (p_val)
471 *p_val = node;
b5c3b1a6
TH
472
473 if (node->first_usage && res_type == vmw_res_context) {
474 node->staged_bindings =
475 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
476 if (node->staged_bindings == NULL) {
477 DRM_ERROR("Failed to allocate context binding "
478 "information.\n");
479 goto out_no_reloc;
480 }
481 INIT_LIST_HEAD(&node->staged_bindings->list);
482 }
483
c0951b79 484 vmw_resource_unreference(&res);
f18c8840 485 return 0;
c0951b79
TH
486
487out_no_reloc:
488 BUG_ON(sw_context->error_resource != NULL);
489 sw_context->error_resource = res;
490
491 return ret;
fb1d9738
JB
492}
493
d5bde956
TH
494/**
495 * vmw_cmd_res_check - Check that a resource is present and if so, put it
496 * on the resource validate list unless it's already there.
497 *
498 * @dev_priv: Pointer to a device private structure.
499 * @sw_context: Pointer to the software context.
500 * @res_type: Resource type.
501 * @converter: User-space visisble type specific information.
502 * @id_loc: Pointer to the location in the command buffer currently being
503 * parsed from where the user-space resource id handle is located.
504 * @p_val: Pointer to pointer to resource validalidation node. Populated
505 * on exit.
506 */
507static int
508vmw_cmd_res_check(struct vmw_private *dev_priv,
509 struct vmw_sw_context *sw_context,
510 enum vmw_res_type res_type,
511 const struct vmw_user_resource_conv *converter,
512 uint32_t *id_loc,
513 struct vmw_resource_val_node **p_val)
514{
515 return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
516 converter, *id_loc, id_loc, p_val);
517}
518
c0951b79
TH
519/**
520 * vmw_cmd_cid_check - Check a command header for valid context information.
521 *
522 * @dev_priv: Pointer to a device private structure.
523 * @sw_context: Pointer to the software context.
524 * @header: A command header with an embedded user-space context handle.
525 *
526 * Convenience function: Call vmw_cmd_res_check with the user-space context
527 * handle embedded in @header.
528 */
529static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
530 struct vmw_sw_context *sw_context,
531 SVGA3dCmdHeader *header)
532{
533 struct vmw_cid_cmd {
534 SVGA3dCmdHeader header;
535 __le32 cid;
536 } *cmd;
537
538 cmd = container_of(header, struct vmw_cid_cmd, header);
539 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
540 user_context_converter, &cmd->cid, NULL);
541}
fb1d9738
JB
542
543static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
544 struct vmw_sw_context *sw_context,
545 SVGA3dCmdHeader *header)
546{
547 struct vmw_sid_cmd {
548 SVGA3dCmdHeader header;
549 SVGA3dCmdSetRenderTarget body;
550 } *cmd;
b5c3b1a6 551 struct vmw_resource_val_node *ctx_node;
173fb7d4 552 struct vmw_resource_val_node *res_node;
fb1d9738
JB
553 int ret;
554
b5c3b1a6
TH
555 cmd = container_of(header, struct vmw_sid_cmd, header);
556
557 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
558 user_context_converter, &cmd->body.cid,
559 &ctx_node);
fb1d9738
JB
560 if (unlikely(ret != 0))
561 return ret;
562
c0951b79
TH
563 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
564 user_surface_converter,
173fb7d4 565 &cmd->body.target.sid, &res_node);
b5c3b1a6
TH
566 if (unlikely(ret != 0))
567 return ret;
568
569 if (dev_priv->has_mob) {
570 struct vmw_ctx_bindinfo bi;
571
572 bi.ctx = ctx_node->res;
173fb7d4 573 bi.res = res_node ? res_node->res : NULL;
b5c3b1a6
TH
574 bi.bt = vmw_ctx_binding_rt;
575 bi.i1.rt_type = cmd->body.type;
576 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
577 }
578
579 return 0;
fb1d9738
JB
580}
581
582static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
583 struct vmw_sw_context *sw_context,
584 SVGA3dCmdHeader *header)
585{
586 struct vmw_sid_cmd {
587 SVGA3dCmdHeader header;
588 SVGA3dCmdSurfaceCopy body;
589 } *cmd;
590 int ret;
591
592 cmd = container_of(header, struct vmw_sid_cmd, header);
c0951b79
TH
593 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
594 user_surface_converter,
595 &cmd->body.src.sid, NULL);
fb1d9738
JB
596 if (unlikely(ret != 0))
597 return ret;
c0951b79
TH
598 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
599 user_surface_converter,
600 &cmd->body.dest.sid, NULL);
fb1d9738
JB
601}
602
603static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
604 struct vmw_sw_context *sw_context,
605 SVGA3dCmdHeader *header)
606{
607 struct vmw_sid_cmd {
608 SVGA3dCmdHeader header;
609 SVGA3dCmdSurfaceStretchBlt body;
610 } *cmd;
611 int ret;
612
613 cmd = container_of(header, struct vmw_sid_cmd, header);
c0951b79
TH
614 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
615 user_surface_converter,
616 &cmd->body.src.sid, NULL);
fb1d9738
JB
617 if (unlikely(ret != 0))
618 return ret;
c0951b79
TH
619 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
620 user_surface_converter,
621 &cmd->body.dest.sid, NULL);
fb1d9738
JB
622}
623
624static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
625 struct vmw_sw_context *sw_context,
626 SVGA3dCmdHeader *header)
627{
628 struct vmw_sid_cmd {
629 SVGA3dCmdHeader header;
630 SVGA3dCmdBlitSurfaceToScreen body;
631 } *cmd;
632
633 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6 634
c0951b79
TH
635 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
636 user_surface_converter,
637 &cmd->body.srcImage.sid, NULL);
fb1d9738
JB
638}
639
640static int vmw_cmd_present_check(struct vmw_private *dev_priv,
641 struct vmw_sw_context *sw_context,
642 SVGA3dCmdHeader *header)
643{
644 struct vmw_sid_cmd {
645 SVGA3dCmdHeader header;
646 SVGA3dCmdPresent body;
647 } *cmd;
648
5bb39e81 649
fb1d9738 650 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6 651
c0951b79
TH
652 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
653 user_surface_converter, &cmd->body.sid,
654 NULL);
fb1d9738
JB
655}
656
e2fa3a76
TH
657/**
658 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
659 *
660 * @dev_priv: The device private structure.
e2fa3a76
TH
661 * @new_query_bo: The new buffer holding query results.
662 * @sw_context: The software context used for this command submission.
663 *
664 * This function checks whether @new_query_bo is suitable for holding
665 * query results, and if another buffer currently is pinned for query
666 * results. If so, the function prepares the state of @sw_context for
667 * switching pinned buffers after successful submission of the current
c0951b79 668 * command batch.
e2fa3a76
TH
669 */
670static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
e2fa3a76
TH
671 struct ttm_buffer_object *new_query_bo,
672 struct vmw_sw_context *sw_context)
673{
c0951b79
TH
674 struct vmw_res_cache_entry *ctx_entry =
675 &sw_context->res_cache[vmw_res_context];
e2fa3a76 676 int ret;
c0951b79
TH
677
678 BUG_ON(!ctx_entry->valid);
679 sw_context->last_query_ctx = ctx_entry->res;
e2fa3a76
TH
680
681 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
682
683 if (unlikely(new_query_bo->num_pages > 4)) {
684 DRM_ERROR("Query buffer too large.\n");
685 return -EINVAL;
686 }
687
688 if (unlikely(sw_context->cur_query_bo != NULL)) {
c0951b79 689 sw_context->needs_post_query_barrier = true;
e2fa3a76
TH
690 ret = vmw_bo_to_validate_list(sw_context,
691 sw_context->cur_query_bo,
96c5f0df 692 dev_priv->has_mob, NULL);
e2fa3a76
TH
693 if (unlikely(ret != 0))
694 return ret;
695 }
696 sw_context->cur_query_bo = new_query_bo;
697
698 ret = vmw_bo_to_validate_list(sw_context,
699 dev_priv->dummy_query_bo,
96c5f0df 700 dev_priv->has_mob, NULL);
e2fa3a76
TH
701 if (unlikely(ret != 0))
702 return ret;
703
704 }
705
e2fa3a76
TH
706 return 0;
707}
708
709
710/**
711 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
712 *
713 * @dev_priv: The device private structure.
714 * @sw_context: The software context used for this command submission batch.
715 *
716 * This function will check if we're switching query buffers, and will then,
e2fa3a76
TH
717 * issue a dummy occlusion query wait used as a query barrier. When the fence
718 * object following that query wait has signaled, we are sure that all
c0951b79 719 * preceding queries have finished, and the old query buffer can be unpinned.
e2fa3a76
TH
720 * However, since both the new query buffer and the old one are fenced with
721 * that fence, we can do an asynchronus unpin now, and be sure that the
722 * old query buffer won't be moved until the fence has signaled.
723 *
724 * As mentioned above, both the new - and old query buffers need to be fenced
725 * using a sequence emitted *after* calling this function.
726 */
727static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
728 struct vmw_sw_context *sw_context)
729{
e2fa3a76
TH
730 /*
731 * The validate list should still hold references to all
732 * contexts here.
733 */
734
c0951b79
TH
735 if (sw_context->needs_post_query_barrier) {
736 struct vmw_res_cache_entry *ctx_entry =
737 &sw_context->res_cache[vmw_res_context];
738 struct vmw_resource *ctx;
739 int ret;
e2fa3a76 740
c0951b79
TH
741 BUG_ON(!ctx_entry->valid);
742 ctx = ctx_entry->res;
e2fa3a76
TH
743
744 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
745
746 if (unlikely(ret != 0))
747 DRM_ERROR("Out of fifo space for dummy query.\n");
748 }
749
750 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
751 if (dev_priv->pinned_bo) {
752 vmw_bo_pin(dev_priv->pinned_bo, false);
753 ttm_bo_unref(&dev_priv->pinned_bo);
754 }
755
c0951b79
TH
756 if (!sw_context->needs_post_query_barrier) {
757 vmw_bo_pin(sw_context->cur_query_bo, true);
e2fa3a76 758
c0951b79
TH
759 /*
760 * We pin also the dummy_query_bo buffer so that we
761 * don't need to validate it when emitting
762 * dummy queries in context destroy paths.
763 */
e2fa3a76 764
c0951b79
TH
765 vmw_bo_pin(dev_priv->dummy_query_bo, true);
766 dev_priv->dummy_query_bo_pinned = true;
e2fa3a76 767
c0951b79
TH
768 BUG_ON(sw_context->last_query_ctx == NULL);
769 dev_priv->query_cid = sw_context->last_query_ctx->id;
770 dev_priv->query_cid_valid = true;
771 dev_priv->pinned_bo =
772 ttm_bo_reference(sw_context->cur_query_bo);
773 }
e2fa3a76
TH
774 }
775}
776
ddcda24e
TH
777/**
778 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
779 * handle to a MOB id.
780 *
781 * @dev_priv: Pointer to a device private structure.
782 * @sw_context: The software context used for this command batch validation.
783 * @id: Pointer to the user-space handle to be translated.
784 * @vmw_bo_p: Points to a location that, on successful return will carry
785 * a reference-counted pointer to the DMA buffer identified by the
786 * user-space handle in @id.
787 *
788 * This function saves information needed to translate a user-space buffer
789 * handle to a MOB id. The translation does not take place immediately, but
790 * during a call to vmw_apply_relocations(). This function builds a relocation
791 * list and a list of buffers to validate. The former needs to be freed using
792 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
793 * needs to be freed using vmw_clear_validations.
794 */
795static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
796 struct vmw_sw_context *sw_context,
797 SVGAMobId *id,
798 struct vmw_dma_buffer **vmw_bo_p)
799{
800 struct vmw_dma_buffer *vmw_bo = NULL;
801 struct ttm_buffer_object *bo;
802 uint32_t handle = *id;
803 struct vmw_relocation *reloc;
804 int ret;
805
d5bde956 806 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
ddcda24e
TH
807 if (unlikely(ret != 0)) {
808 DRM_ERROR("Could not find or use MOB buffer.\n");
809 return -EINVAL;
810 }
811 bo = &vmw_bo->base;
812
813 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
814 DRM_ERROR("Max number relocations per submission"
815 " exceeded\n");
816 ret = -EINVAL;
817 goto out_no_reloc;
818 }
819
820 reloc = &sw_context->relocs[sw_context->cur_reloc++];
821 reloc->mob_loc = id;
822 reloc->location = NULL;
823
824 ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
825 if (unlikely(ret != 0))
826 goto out_no_reloc;
827
828 *vmw_bo_p = vmw_bo;
829 return 0;
830
831out_no_reloc:
832 vmw_dmabuf_unreference(&vmw_bo);
833 vmw_bo_p = NULL;
834 return ret;
835}
836
e2fa3a76 837/**
c0951b79
TH
838 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
839 * handle to a valid SVGAGuestPtr
e2fa3a76 840 *
c0951b79
TH
841 * @dev_priv: Pointer to a device private structure.
842 * @sw_context: The software context used for this command batch validation.
843 * @ptr: Pointer to the user-space handle to be translated.
844 * @vmw_bo_p: Points to a location that, on successful return will carry
845 * a reference-counted pointer to the DMA buffer identified by the
846 * user-space handle in @id.
e2fa3a76 847 *
c0951b79
TH
848 * This function saves information needed to translate a user-space buffer
849 * handle to a valid SVGAGuestPtr. The translation does not take place
850 * immediately, but during a call to vmw_apply_relocations().
851 * This function builds a relocation list and a list of buffers to validate.
852 * The former needs to be freed using either vmw_apply_relocations() or
853 * vmw_free_relocations(). The latter needs to be freed using
854 * vmw_clear_validations.
e2fa3a76 855 */
4e4ddd47
TH
856static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
857 struct vmw_sw_context *sw_context,
858 SVGAGuestPtr *ptr,
859 struct vmw_dma_buffer **vmw_bo_p)
fb1d9738 860{
fb1d9738
JB
861 struct vmw_dma_buffer *vmw_bo = NULL;
862 struct ttm_buffer_object *bo;
4e4ddd47 863 uint32_t handle = ptr->gmrId;
fb1d9738 864 struct vmw_relocation *reloc;
4e4ddd47 865 int ret;
fb1d9738 866
d5bde956 867 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
fb1d9738
JB
868 if (unlikely(ret != 0)) {
869 DRM_ERROR("Could not find or use GMR region.\n");
870 return -EINVAL;
871 }
872 bo = &vmw_bo->base;
873
874 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
4e4ddd47 875 DRM_ERROR("Max number relocations per submission"
fb1d9738
JB
876 " exceeded\n");
877 ret = -EINVAL;
878 goto out_no_reloc;
879 }
880
881 reloc = &sw_context->relocs[sw_context->cur_reloc++];
4e4ddd47 882 reloc->location = ptr;
fb1d9738 883
96c5f0df 884 ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
e2fa3a76 885 if (unlikely(ret != 0))
fb1d9738 886 goto out_no_reloc;
fb1d9738 887
4e4ddd47
TH
888 *vmw_bo_p = vmw_bo;
889 return 0;
890
891out_no_reloc:
892 vmw_dmabuf_unreference(&vmw_bo);
893 vmw_bo_p = NULL;
894 return ret;
895}
896
ddcda24e
TH
897/**
898 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
899 *
900 * @dev_priv: Pointer to a device private struct.
901 * @sw_context: The software context used for this command submission.
902 * @header: Pointer to the command header in the command stream.
903 */
904static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
905 struct vmw_sw_context *sw_context,
906 SVGA3dCmdHeader *header)
907{
908 struct vmw_begin_gb_query_cmd {
909 SVGA3dCmdHeader header;
910 SVGA3dCmdBeginGBQuery q;
911 } *cmd;
912
913 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
914 header);
915
916 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
917 user_context_converter, &cmd->q.cid,
918 NULL);
919}
920
c0951b79
TH
921/**
922 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
923 *
924 * @dev_priv: Pointer to a device private struct.
925 * @sw_context: The software context used for this command submission.
926 * @header: Pointer to the command header in the command stream.
927 */
928static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
929 struct vmw_sw_context *sw_context,
930 SVGA3dCmdHeader *header)
931{
932 struct vmw_begin_query_cmd {
933 SVGA3dCmdHeader header;
934 SVGA3dCmdBeginQuery q;
935 } *cmd;
936
937 cmd = container_of(header, struct vmw_begin_query_cmd,
938 header);
939
ddcda24e
TH
940 if (unlikely(dev_priv->has_mob)) {
941 struct {
942 SVGA3dCmdHeader header;
943 SVGA3dCmdBeginGBQuery q;
944 } gb_cmd;
945
946 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
947
948 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
949 gb_cmd.header.size = cmd->header.size;
950 gb_cmd.q.cid = cmd->q.cid;
951 gb_cmd.q.type = cmd->q.type;
952
953 memcpy(cmd, &gb_cmd, sizeof(*cmd));
954 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
955 }
956
c0951b79
TH
957 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
958 user_context_converter, &cmd->q.cid,
959 NULL);
960}
961
ddcda24e
TH
962/**
963 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
964 *
965 * @dev_priv: Pointer to a device private struct.
966 * @sw_context: The software context used for this command submission.
967 * @header: Pointer to the command header in the command stream.
968 */
969static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
970 struct vmw_sw_context *sw_context,
971 SVGA3dCmdHeader *header)
972{
973 struct vmw_dma_buffer *vmw_bo;
974 struct vmw_query_cmd {
975 SVGA3dCmdHeader header;
976 SVGA3dCmdEndGBQuery q;
977 } *cmd;
978 int ret;
979
980 cmd = container_of(header, struct vmw_query_cmd, header);
981 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
982 if (unlikely(ret != 0))
983 return ret;
984
985 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
986 &cmd->q.mobid,
987 &vmw_bo);
988 if (unlikely(ret != 0))
989 return ret;
990
991 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
992
993 vmw_dmabuf_unreference(&vmw_bo);
994 return ret;
995}
996
c0951b79
TH
997/**
998 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
999 *
1000 * @dev_priv: Pointer to a device private struct.
1001 * @sw_context: The software context used for this command submission.
1002 * @header: Pointer to the command header in the command stream.
1003 */
4e4ddd47
TH
1004static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1005 struct vmw_sw_context *sw_context,
1006 SVGA3dCmdHeader *header)
1007{
1008 struct vmw_dma_buffer *vmw_bo;
1009 struct vmw_query_cmd {
1010 SVGA3dCmdHeader header;
1011 SVGA3dCmdEndQuery q;
1012 } *cmd;
1013 int ret;
1014
1015 cmd = container_of(header, struct vmw_query_cmd, header);
ddcda24e
TH
1016 if (dev_priv->has_mob) {
1017 struct {
1018 SVGA3dCmdHeader header;
1019 SVGA3dCmdEndGBQuery q;
1020 } gb_cmd;
1021
1022 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1023
1024 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1025 gb_cmd.header.size = cmd->header.size;
1026 gb_cmd.q.cid = cmd->q.cid;
1027 gb_cmd.q.type = cmd->q.type;
1028 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1029 gb_cmd.q.offset = cmd->q.guestResult.offset;
1030
1031 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1032 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1033 }
1034
4e4ddd47
TH
1035 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1036 if (unlikely(ret != 0))
1037 return ret;
1038
1039 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1040 &cmd->q.guestResult,
1041 &vmw_bo);
1042 if (unlikely(ret != 0))
1043 return ret;
1044
c0951b79 1045 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
e2fa3a76 1046
4e4ddd47 1047 vmw_dmabuf_unreference(&vmw_bo);
e2fa3a76 1048 return ret;
4e4ddd47 1049}
fb1d9738 1050
ddcda24e
TH
1051/**
1052 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1053 *
1054 * @dev_priv: Pointer to a device private struct.
1055 * @sw_context: The software context used for this command submission.
1056 * @header: Pointer to the command header in the command stream.
1057 */
1058static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1059 struct vmw_sw_context *sw_context,
1060 SVGA3dCmdHeader *header)
1061{
1062 struct vmw_dma_buffer *vmw_bo;
1063 struct vmw_query_cmd {
1064 SVGA3dCmdHeader header;
1065 SVGA3dCmdWaitForGBQuery q;
1066 } *cmd;
1067 int ret;
1068
1069 cmd = container_of(header, struct vmw_query_cmd, header);
1070 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1071 if (unlikely(ret != 0))
1072 return ret;
1073
1074 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1075 &cmd->q.mobid,
1076 &vmw_bo);
1077 if (unlikely(ret != 0))
1078 return ret;
1079
1080 vmw_dmabuf_unreference(&vmw_bo);
1081 return 0;
1082}
1083
1084/**
c0951b79
TH
1085 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1086 *
1087 * @dev_priv: Pointer to a device private struct.
1088 * @sw_context: The software context used for this command submission.
1089 * @header: Pointer to the command header in the command stream.
1090 */
4e4ddd47
TH
1091static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1092 struct vmw_sw_context *sw_context,
1093 SVGA3dCmdHeader *header)
1094{
1095 struct vmw_dma_buffer *vmw_bo;
1096 struct vmw_query_cmd {
1097 SVGA3dCmdHeader header;
1098 SVGA3dCmdWaitForQuery q;
1099 } *cmd;
1100 int ret;
1101
1102 cmd = container_of(header, struct vmw_query_cmd, header);
ddcda24e
TH
1103 if (dev_priv->has_mob) {
1104 struct {
1105 SVGA3dCmdHeader header;
1106 SVGA3dCmdWaitForGBQuery q;
1107 } gb_cmd;
1108
1109 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1110
1111 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1112 gb_cmd.header.size = cmd->header.size;
1113 gb_cmd.q.cid = cmd->q.cid;
1114 gb_cmd.q.type = cmd->q.type;
1115 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1116 gb_cmd.q.offset = cmd->q.guestResult.offset;
1117
1118 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1119 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1120 }
1121
4e4ddd47
TH
1122 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1123 if (unlikely(ret != 0))
1124 return ret;
1125
1126 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1127 &cmd->q.guestResult,
1128 &vmw_bo);
1129 if (unlikely(ret != 0))
1130 return ret;
1131
1132 vmw_dmabuf_unreference(&vmw_bo);
1133 return 0;
1134}
1135
4e4ddd47
TH
1136static int vmw_cmd_dma(struct vmw_private *dev_priv,
1137 struct vmw_sw_context *sw_context,
1138 SVGA3dCmdHeader *header)
1139{
1140 struct vmw_dma_buffer *vmw_bo = NULL;
4e4ddd47
TH
1141 struct vmw_surface *srf = NULL;
1142 struct vmw_dma_cmd {
1143 SVGA3dCmdHeader header;
1144 SVGA3dCmdSurfaceDMA dma;
1145 } *cmd;
1146 int ret;
1147
1148 cmd = container_of(header, struct vmw_dma_cmd, header);
1149 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1150 &cmd->dma.guest.ptr,
1151 &vmw_bo);
1152 if (unlikely(ret != 0))
1153 return ret;
1154
c0951b79
TH
1155 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1156 user_surface_converter, &cmd->dma.host.sid,
1157 NULL);
5bb39e81 1158 if (unlikely(ret != 0)) {
c0951b79
TH
1159 if (unlikely(ret != -ERESTARTSYS))
1160 DRM_ERROR("could not find surface for DMA.\n");
1161 goto out_no_surface;
5bb39e81
TH
1162 }
1163
c0951b79 1164 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
f18c8840 1165
d5bde956
TH
1166 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1167 header);
fb1d9738 1168
c0951b79 1169out_no_surface:
fb1d9738
JB
1170 vmw_dmabuf_unreference(&vmw_bo);
1171 return ret;
1172}
1173
7a73ba74
TH
1174static int vmw_cmd_draw(struct vmw_private *dev_priv,
1175 struct vmw_sw_context *sw_context,
1176 SVGA3dCmdHeader *header)
1177{
1178 struct vmw_draw_cmd {
1179 SVGA3dCmdHeader header;
1180 SVGA3dCmdDrawPrimitives body;
1181 } *cmd;
1182 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1183 (unsigned long)header + sizeof(*cmd));
1184 SVGA3dPrimitiveRange *range;
1185 uint32_t i;
1186 uint32_t maxnum;
1187 int ret;
1188
1189 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1190 if (unlikely(ret != 0))
1191 return ret;
1192
1193 cmd = container_of(header, struct vmw_draw_cmd, header);
1194 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1195
1196 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1197 DRM_ERROR("Illegal number of vertex declarations.\n");
1198 return -EINVAL;
1199 }
1200
1201 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
c0951b79
TH
1202 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1203 user_surface_converter,
1204 &decl->array.surfaceId, NULL);
7a73ba74
TH
1205 if (unlikely(ret != 0))
1206 return ret;
1207 }
1208
1209 maxnum = (header->size - sizeof(cmd->body) -
1210 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1211 if (unlikely(cmd->body.numRanges > maxnum)) {
1212 DRM_ERROR("Illegal number of index ranges.\n");
1213 return -EINVAL;
1214 }
1215
1216 range = (SVGA3dPrimitiveRange *) decl;
1217 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
c0951b79
TH
1218 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1219 user_surface_converter,
1220 &range->indexArray.surfaceId, NULL);
7a73ba74
TH
1221 if (unlikely(ret != 0))
1222 return ret;
1223 }
1224 return 0;
1225}
1226
1227
1228static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1229 struct vmw_sw_context *sw_context,
1230 SVGA3dCmdHeader *header)
1231{
1232 struct vmw_tex_state_cmd {
1233 SVGA3dCmdHeader header;
1234 SVGA3dCmdSetTextureState state;
b5c3b1a6 1235 } *cmd;
7a73ba74
TH
1236
1237 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1238 ((unsigned long) header + header->size + sizeof(header));
1239 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1240 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
b5c3b1a6 1241 struct vmw_resource_val_node *ctx_node;
173fb7d4 1242 struct vmw_resource_val_node *res_node;
7a73ba74
TH
1243 int ret;
1244
b5c3b1a6
TH
1245 cmd = container_of(header, struct vmw_tex_state_cmd,
1246 header);
1247
1248 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1249 user_context_converter, &cmd->state.cid,
1250 &ctx_node);
7a73ba74
TH
1251 if (unlikely(ret != 0))
1252 return ret;
1253
1254 for (; cur_state < last_state; ++cur_state) {
1255 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1256 continue;
1257
c0951b79
TH
1258 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1259 user_surface_converter,
173fb7d4 1260 &cur_state->value, &res_node);
7a73ba74
TH
1261 if (unlikely(ret != 0))
1262 return ret;
b5c3b1a6
TH
1263
1264 if (dev_priv->has_mob) {
1265 struct vmw_ctx_bindinfo bi;
1266
1267 bi.ctx = ctx_node->res;
173fb7d4 1268 bi.res = res_node ? res_node->res : NULL;
b5c3b1a6
TH
1269 bi.bt = vmw_ctx_binding_tex;
1270 bi.i1.texture_stage = cur_state->stage;
1271 vmw_context_binding_add(ctx_node->staged_bindings,
1272 &bi);
1273 }
7a73ba74
TH
1274 }
1275
1276 return 0;
1277}
1278
4084fb89
JB
1279static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1280 struct vmw_sw_context *sw_context,
1281 void *buf)
1282{
1283 struct vmw_dma_buffer *vmw_bo;
1284 int ret;
1285
1286 struct {
1287 uint32_t header;
1288 SVGAFifoCmdDefineGMRFB body;
1289 } *cmd = buf;
1290
1291 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1292 &cmd->body.ptr,
1293 &vmw_bo);
1294 if (unlikely(ret != 0))
1295 return ret;
1296
1297 vmw_dmabuf_unreference(&vmw_bo);
1298
1299 return ret;
1300}
1301
a97e2192
TH
1302/**
1303 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1304 *
1305 * @dev_priv: Pointer to a device private struct.
1306 * @sw_context: The software context being used for this batch.
1307 * @res_type: The resource type.
1308 * @converter: Information about user-space binding for this resource type.
1309 * @res_id: Pointer to the user-space resource handle in the command stream.
1310 * @buf_id: Pointer to the user-space backup buffer handle in the command
1311 * stream.
1312 * @backup_offset: Offset of backup into MOB.
1313 *
1314 * This function prepares for registering a switch of backup buffers
1315 * in the resource metadata just prior to unreserving.
1316 */
1317static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1318 struct vmw_sw_context *sw_context,
1319 enum vmw_res_type res_type,
1320 const struct vmw_user_resource_conv
1321 *converter,
1322 uint32_t *res_id,
1323 uint32_t *buf_id,
1324 unsigned long backup_offset)
1325{
1326 int ret;
1327 struct vmw_dma_buffer *dma_buf;
1328 struct vmw_resource_val_node *val_node;
1329
1330 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1331 converter, res_id, &val_node);
1332 if (unlikely(ret != 0))
1333 return ret;
1334
1335 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1336 if (unlikely(ret != 0))
1337 return ret;
1338
1339 if (val_node->first_usage)
1340 val_node->no_buffer_needed = true;
1341
1342 vmw_dmabuf_unreference(&val_node->new_backup);
1343 val_node->new_backup = dma_buf;
1344 val_node->new_backup_offset = backup_offset;
1345
1346 return 0;
1347}
1348
1349/**
1350 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1351 * command
1352 *
1353 * @dev_priv: Pointer to a device private struct.
1354 * @sw_context: The software context being used for this batch.
1355 * @header: Pointer to the command header in the command stream.
1356 */
1357static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1358 struct vmw_sw_context *sw_context,
1359 SVGA3dCmdHeader *header)
1360{
1361 struct vmw_bind_gb_surface_cmd {
1362 SVGA3dCmdHeader header;
1363 SVGA3dCmdBindGBSurface body;
1364 } *cmd;
1365
1366 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1367
1368 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1369 user_surface_converter,
1370 &cmd->body.sid, &cmd->body.mobid,
1371 0);
1372}
1373
1374/**
1375 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1376 * command
1377 *
1378 * @dev_priv: Pointer to a device private struct.
1379 * @sw_context: The software context being used for this batch.
1380 * @header: Pointer to the command header in the command stream.
1381 */
1382static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1383 struct vmw_sw_context *sw_context,
1384 SVGA3dCmdHeader *header)
1385{
1386 struct vmw_gb_surface_cmd {
1387 SVGA3dCmdHeader header;
1388 SVGA3dCmdUpdateGBImage body;
1389 } *cmd;
1390
1391 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1392
1393 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1394 user_surface_converter,
1395 &cmd->body.image.sid, NULL);
1396}
1397
1398/**
1399 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1400 * command
1401 *
1402 * @dev_priv: Pointer to a device private struct.
1403 * @sw_context: The software context being used for this batch.
1404 * @header: Pointer to the command header in the command stream.
1405 */
1406static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1407 struct vmw_sw_context *sw_context,
1408 SVGA3dCmdHeader *header)
1409{
1410 struct vmw_gb_surface_cmd {
1411 SVGA3dCmdHeader header;
1412 SVGA3dCmdUpdateGBSurface body;
1413 } *cmd;
1414
1415 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1416
1417 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1418 user_surface_converter,
1419 &cmd->body.sid, NULL);
1420}
1421
1422/**
1423 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1424 * command
1425 *
1426 * @dev_priv: Pointer to a device private struct.
1427 * @sw_context: The software context being used for this batch.
1428 * @header: Pointer to the command header in the command stream.
1429 */
1430static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1431 struct vmw_sw_context *sw_context,
1432 SVGA3dCmdHeader *header)
1433{
1434 struct vmw_gb_surface_cmd {
1435 SVGA3dCmdHeader header;
1436 SVGA3dCmdReadbackGBImage body;
1437 } *cmd;
1438
1439 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1440
1441 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1442 user_surface_converter,
1443 &cmd->body.image.sid, NULL);
1444}
1445
1446/**
1447 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1448 * command
1449 *
1450 * @dev_priv: Pointer to a device private struct.
1451 * @sw_context: The software context being used for this batch.
1452 * @header: Pointer to the command header in the command stream.
1453 */
1454static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1455 struct vmw_sw_context *sw_context,
1456 SVGA3dCmdHeader *header)
1457{
1458 struct vmw_gb_surface_cmd {
1459 SVGA3dCmdHeader header;
1460 SVGA3dCmdReadbackGBSurface body;
1461 } *cmd;
1462
1463 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1464
1465 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1466 user_surface_converter,
1467 &cmd->body.sid, NULL);
1468}
1469
1470/**
1471 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1472 * command
1473 *
1474 * @dev_priv: Pointer to a device private struct.
1475 * @sw_context: The software context being used for this batch.
1476 * @header: Pointer to the command header in the command stream.
1477 */
1478static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1479 struct vmw_sw_context *sw_context,
1480 SVGA3dCmdHeader *header)
1481{
1482 struct vmw_gb_surface_cmd {
1483 SVGA3dCmdHeader header;
1484 SVGA3dCmdInvalidateGBImage body;
1485 } *cmd;
1486
1487 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1488
1489 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1490 user_surface_converter,
1491 &cmd->body.image.sid, NULL);
1492}
1493
1494/**
1495 * vmw_cmd_invalidate_gb_surface - Validate an
1496 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1497 *
1498 * @dev_priv: Pointer to a device private struct.
1499 * @sw_context: The software context being used for this batch.
1500 * @header: Pointer to the command header in the command stream.
1501 */
1502static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1503 struct vmw_sw_context *sw_context,
1504 SVGA3dCmdHeader *header)
1505{
1506 struct vmw_gb_surface_cmd {
1507 SVGA3dCmdHeader header;
1508 SVGA3dCmdInvalidateGBSurface body;
1509 } *cmd;
1510
1511 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1512
1513 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1514 user_surface_converter,
1515 &cmd->body.sid, NULL);
1516}
1517
d5bde956
TH
1518
1519/**
1520 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1521 * command
1522 *
1523 * @dev_priv: Pointer to a device private struct.
1524 * @sw_context: The software context being used for this batch.
1525 * @header: Pointer to the command header in the command stream.
1526 */
1527static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1528 struct vmw_sw_context *sw_context,
1529 SVGA3dCmdHeader *header)
1530{
1531 struct vmw_shader_define_cmd {
1532 SVGA3dCmdHeader header;
1533 SVGA3dCmdDefineShader body;
1534 } *cmd;
1535 int ret;
1536 size_t size;
1537
1538 cmd = container_of(header, struct vmw_shader_define_cmd,
1539 header);
1540
1541 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1542 user_context_converter, &cmd->body.cid,
1543 NULL);
1544 if (unlikely(ret != 0))
1545 return ret;
1546
1547 if (unlikely(!dev_priv->has_mob))
1548 return 0;
1549
1550 size = cmd->header.size - sizeof(cmd->body);
1551 ret = vmw_compat_shader_add(sw_context->fp->shman,
1552 cmd->body.shid, cmd + 1,
1553 cmd->body.type, size,
1554 sw_context->fp->tfile,
1555 &sw_context->staged_shaders);
1556 if (unlikely(ret != 0))
1557 return ret;
1558
1559 return vmw_resource_relocation_add(&sw_context->res_relocations,
1560 NULL, &cmd->header.id -
1561 sw_context->buf_start);
1562
1563 return 0;
1564}
1565
1566/**
1567 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1568 * command
1569 *
1570 * @dev_priv: Pointer to a device private struct.
1571 * @sw_context: The software context being used for this batch.
1572 * @header: Pointer to the command header in the command stream.
1573 */
1574static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1575 struct vmw_sw_context *sw_context,
1576 SVGA3dCmdHeader *header)
1577{
1578 struct vmw_shader_destroy_cmd {
1579 SVGA3dCmdHeader header;
1580 SVGA3dCmdDestroyShader body;
1581 } *cmd;
1582 int ret;
1583
1584 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1585 header);
1586
1587 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1588 user_context_converter, &cmd->body.cid,
1589 NULL);
1590 if (unlikely(ret != 0))
1591 return ret;
1592
1593 if (unlikely(!dev_priv->has_mob))
1594 return 0;
1595
1596 ret = vmw_compat_shader_remove(sw_context->fp->shman,
1597 cmd->body.shid,
1598 cmd->body.type,
1599 &sw_context->staged_shaders);
1600 if (unlikely(ret != 0))
1601 return ret;
1602
1603 return vmw_resource_relocation_add(&sw_context->res_relocations,
1604 NULL, &cmd->header.id -
1605 sw_context->buf_start);
1606
1607 return 0;
1608}
1609
c0951b79
TH
1610/**
1611 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1612 * command
1613 *
1614 * @dev_priv: Pointer to a device private struct.
1615 * @sw_context: The software context being used for this batch.
1616 * @header: Pointer to the command header in the command stream.
1617 */
1618static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1619 struct vmw_sw_context *sw_context,
1620 SVGA3dCmdHeader *header)
1621{
1622 struct vmw_set_shader_cmd {
1623 SVGA3dCmdHeader header;
1624 SVGA3dCmdSetShader body;
1625 } *cmd;
b5c3b1a6 1626 struct vmw_resource_val_node *ctx_node;
c0951b79
TH
1627 int ret;
1628
1629 cmd = container_of(header, struct vmw_set_shader_cmd,
1630 header);
1631
b5c3b1a6
TH
1632 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1633 user_context_converter, &cmd->body.cid,
1634 &ctx_node);
c0951b79
TH
1635 if (unlikely(ret != 0))
1636 return ret;
1637
b5c3b1a6
TH
1638 if (dev_priv->has_mob) {
1639 struct vmw_ctx_bindinfo bi;
173fb7d4 1640 struct vmw_resource_val_node *res_node;
d5bde956
TH
1641 u32 shid = cmd->body.shid;
1642
1643 (void) vmw_compat_shader_lookup(sw_context->fp->shman,
1644 cmd->body.type,
1645 &shid);
b5c3b1a6 1646
d5bde956
TH
1647 ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
1648 vmw_res_shader,
1649 user_shader_converter,
1650 shid,
1651 &cmd->body.shid, &res_node);
b5c3b1a6
TH
1652 if (unlikely(ret != 0))
1653 return ret;
c74c162f 1654
b5c3b1a6 1655 bi.ctx = ctx_node->res;
173fb7d4 1656 bi.res = res_node ? res_node->res : NULL;
b5c3b1a6
TH
1657 bi.bt = vmw_ctx_binding_shader;
1658 bi.i1.shader_type = cmd->body.type;
1659 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1660 }
c74c162f 1661
c0951b79
TH
1662 return 0;
1663}
1664
0ccbbae4
TH
1665/**
1666 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1667 * command
1668 *
1669 * @dev_priv: Pointer to a device private struct.
1670 * @sw_context: The software context being used for this batch.
1671 * @header: Pointer to the command header in the command stream.
1672 */
1673static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1674 struct vmw_sw_context *sw_context,
1675 SVGA3dCmdHeader *header)
1676{
1677 struct vmw_set_shader_const_cmd {
1678 SVGA3dCmdHeader header;
1679 SVGA3dCmdSetShaderConst body;
1680 } *cmd;
1681 int ret;
1682
1683 cmd = container_of(header, struct vmw_set_shader_const_cmd,
1684 header);
1685
1686 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1687 user_context_converter, &cmd->body.cid,
1688 NULL);
1689 if (unlikely(ret != 0))
1690 return ret;
1691
1692 if (dev_priv->has_mob)
1693 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1694
1695 return 0;
1696}
1697
c74c162f
TH
1698/**
1699 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1700 * command
1701 *
1702 * @dev_priv: Pointer to a device private struct.
1703 * @sw_context: The software context being used for this batch.
1704 * @header: Pointer to the command header in the command stream.
1705 */
1706static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1707 struct vmw_sw_context *sw_context,
1708 SVGA3dCmdHeader *header)
1709{
1710 struct vmw_bind_gb_shader_cmd {
1711 SVGA3dCmdHeader header;
1712 SVGA3dCmdBindGBShader body;
1713 } *cmd;
1714
1715 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1716 header);
1717
1718 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1719 user_shader_converter,
1720 &cmd->body.shid, &cmd->body.mobid,
1721 cmd->body.offsetInBytes);
1722}
1723
4084fb89
JB
1724static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1725 struct vmw_sw_context *sw_context,
1726 void *buf, uint32_t *size)
1727{
1728 uint32_t size_remaining = *size;
4084fb89
JB
1729 uint32_t cmd_id;
1730
1731 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1732 switch (cmd_id) {
1733 case SVGA_CMD_UPDATE:
1734 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
4084fb89
JB
1735 break;
1736 case SVGA_CMD_DEFINE_GMRFB:
1737 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1738 break;
1739 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1740 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1741 break;
1742 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1743 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1744 break;
1745 default:
1746 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1747 return -EINVAL;
1748 }
1749
1750 if (*size > size_remaining) {
1751 DRM_ERROR("Invalid SVGA command (size mismatch):"
1752 " %u.\n", cmd_id);
1753 return -EINVAL;
1754 }
1755
0cff60c6 1756 if (unlikely(!sw_context->kernel)) {
4084fb89
JB
1757 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1758 return -EPERM;
1759 }
1760
1761 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1762 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1763
1764 return 0;
1765}
fb1d9738 1766
c373d4ea
TH
1767static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1768 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1769 false, false, false),
1770 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1771 false, false, false),
1772 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1773 true, false, false),
1774 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1775 true, false, false),
1776 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1777 true, false, false),
1778 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1779 false, false, false),
1780 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1781 false, false, false),
1782 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1783 true, false, false),
1784 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1785 true, false, false),
1786 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1787 true, false, false),
fb1d9738 1788 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
c373d4ea
TH
1789 &vmw_cmd_set_render_target_check, true, false, false),
1790 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1791 true, false, false),
1792 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1793 true, false, false),
1794 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1795 true, false, false),
1796 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1797 true, false, false),
1798 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1799 true, false, false),
1800 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1801 true, false, false),
1802 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1803 true, false, false),
1804 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1805 false, false, false),
d5bde956
TH
1806 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1807 true, false, false),
1808 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1809 true, false, false),
c373d4ea
TH
1810 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1811 true, false, false),
0ccbbae4
TH
1812 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1813 true, false, false),
c373d4ea
TH
1814 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1815 true, false, false),
1816 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1817 true, false, false),
1818 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1819 true, false, false),
1820 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1821 true, false, false),
1822 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1823 true, false, false),
1824 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1825 true, false, false),
fb1d9738 1826 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
c373d4ea
TH
1827 &vmw_cmd_blt_surf_screen_check, false, false, false),
1828 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1829 false, false, false),
1830 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1831 false, false, false),
1832 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1833 false, false, false),
1834 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1835 false, false, false),
1836 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1837 false, false, false),
1838 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1839 false, false, false),
1840 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1841 false, false, false),
1842 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1843 false, false, false),
1844 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1845 false, false, false),
1846 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1847 false, false, false),
1848 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1849 false, false, false),
1850 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1851 false, false, false),
1852 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1853 false, false, false),
1854 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1855 false, false, true),
1856 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1857 false, false, true),
1858 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1859 false, false, true),
1860 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1861 false, false, true),
1862 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1863 false, false, true),
1864 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1865 false, false, true),
1866 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1867 false, false, true),
1868 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1869 false, false, true),
1870 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1871 true, false, true),
1872 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1873 false, false, true),
1874 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1875 true, false, true),
a97e2192 1876 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
c373d4ea 1877 &vmw_cmd_update_gb_surface, true, false, true),
a97e2192 1878 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
c373d4ea 1879 &vmw_cmd_readback_gb_image, true, false, true),
a97e2192 1880 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
c373d4ea 1881 &vmw_cmd_readback_gb_surface, true, false, true),
a97e2192 1882 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
c373d4ea 1883 &vmw_cmd_invalidate_gb_image, true, false, true),
a97e2192 1884 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
c373d4ea
TH
1885 &vmw_cmd_invalidate_gb_surface, true, false, true),
1886 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
1887 false, false, true),
1888 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
1889 false, false, true),
1890 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
1891 false, false, true),
1892 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
1893 false, false, true),
1894 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1895 false, false, true),
1896 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1897 false, false, true),
1898 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1899 true, false, true),
1900 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1901 false, false, true),
f2a0dcb1 1902 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
8ba07315 1903 false, false, false),
c373d4ea
TH
1904 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1905 true, false, true),
1906 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
1907 true, false, true),
1908 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
1909 true, false, true),
1910 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
1911 true, false, true),
1912 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
1913 false, false, true),
1914 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
1915 false, false, true),
1916 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
1917 false, false, true),
1918 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
1919 false, false, true),
1920 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
1921 false, false, true),
1922 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
1923 false, false, true),
1924 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
1925 false, false, true),
1926 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
1927 false, false, true),
1928 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1929 false, false, true),
1930 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1931 false, false, true),
1932 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
1933 true, false, true)
fb1d9738
JB
1934};
1935
1936static int vmw_cmd_check(struct vmw_private *dev_priv,
1937 struct vmw_sw_context *sw_context,
1938 void *buf, uint32_t *size)
1939{
1940 uint32_t cmd_id;
7a73ba74 1941 uint32_t size_remaining = *size;
fb1d9738
JB
1942 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
1943 int ret;
c373d4ea
TH
1944 const struct vmw_cmd_entry *entry;
1945 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
fb1d9738 1946
4084fb89
JB
1947 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1948 /* Handle any none 3D commands */
1949 if (unlikely(cmd_id < SVGA_CMD_MAX))
1950 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
1951
fb1d9738
JB
1952
1953 cmd_id = le32_to_cpu(header->id);
1954 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
1955
1956 cmd_id -= SVGA_3D_CMD_BASE;
7a73ba74 1957 if (unlikely(*size > size_remaining))
c373d4ea 1958 goto out_invalid;
7a73ba74 1959
fb1d9738 1960 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
c373d4ea
TH
1961 goto out_invalid;
1962
1963 entry = &vmw_cmd_entries[cmd_id];
1964 if (unlikely(!entry->user_allow && !sw_context->kernel))
1965 goto out_privileged;
1966
1967 if (unlikely(entry->gb_disable && gb))
1968 goto out_old;
1969
1970 if (unlikely(entry->gb_enable && !gb))
1971 goto out_new;
fb1d9738 1972
c373d4ea 1973 ret = entry->func(dev_priv, sw_context, header);
fb1d9738 1974 if (unlikely(ret != 0))
c373d4ea 1975 goto out_invalid;
fb1d9738
JB
1976
1977 return 0;
c373d4ea
TH
1978out_invalid:
1979 DRM_ERROR("Invalid SVGA3D command: %d\n",
1980 cmd_id + SVGA_3D_CMD_BASE);
1981 return -EINVAL;
1982out_privileged:
1983 DRM_ERROR("Privileged SVGA3D command: %d\n",
1984 cmd_id + SVGA_3D_CMD_BASE);
1985 return -EPERM;
1986out_old:
1987 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
1988 cmd_id + SVGA_3D_CMD_BASE);
1989 return -EINVAL;
1990out_new:
1991 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
fb1d9738
JB
1992 cmd_id + SVGA_3D_CMD_BASE);
1993 return -EINVAL;
1994}
1995
1996static int vmw_cmd_check_all(struct vmw_private *dev_priv,
1997 struct vmw_sw_context *sw_context,
922ade0d 1998 void *buf,
be38ab6e 1999 uint32_t size)
fb1d9738
JB
2000{
2001 int32_t cur_size = size;
2002 int ret;
2003
c0951b79
TH
2004 sw_context->buf_start = buf;
2005
fb1d9738 2006 while (cur_size > 0) {
7a73ba74 2007 size = cur_size;
fb1d9738
JB
2008 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2009 if (unlikely(ret != 0))
2010 return ret;
2011 buf = (void *)((unsigned long) buf + size);
2012 cur_size -= size;
2013 }
2014
2015 if (unlikely(cur_size != 0)) {
2016 DRM_ERROR("Command verifier out of sync.\n");
2017 return -EINVAL;
2018 }
2019
2020 return 0;
2021}
2022
2023static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2024{
2025 sw_context->cur_reloc = 0;
2026}
2027
2028static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2029{
2030 uint32_t i;
2031 struct vmw_relocation *reloc;
2032 struct ttm_validate_buffer *validate;
2033 struct ttm_buffer_object *bo;
2034
2035 for (i = 0; i < sw_context->cur_reloc; ++i) {
2036 reloc = &sw_context->relocs[i];
c0951b79 2037 validate = &sw_context->val_bufs[reloc->index].base;
fb1d9738 2038 bo = validate->bo;
c0951b79
TH
2039 switch (bo->mem.mem_type) {
2040 case TTM_PL_VRAM:
135cba0d
TH
2041 reloc->location->offset += bo->offset;
2042 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
c0951b79
TH
2043 break;
2044 case VMW_PL_GMR:
135cba0d 2045 reloc->location->gmrId = bo->mem.start;
c0951b79 2046 break;
ddcda24e
TH
2047 case VMW_PL_MOB:
2048 *reloc->mob_loc = bo->mem.start;
2049 break;
c0951b79
TH
2050 default:
2051 BUG();
2052 }
fb1d9738
JB
2053 }
2054 vmw_free_relocations(sw_context);
2055}
2056
c0951b79
TH
2057/**
2058 * vmw_resource_list_unrefererence - Free up a resource list and unreference
2059 * all resources referenced by it.
2060 *
2061 * @list: The resource list.
2062 */
2063static void vmw_resource_list_unreference(struct list_head *list)
2064{
2065 struct vmw_resource_val_node *val, *val_next;
2066
2067 /*
2068 * Drop references to resources held during command submission.
2069 */
2070
2071 list_for_each_entry_safe(val, val_next, list, head) {
2072 list_del_init(&val->head);
2073 vmw_resource_unreference(&val->res);
b5c3b1a6
TH
2074 if (unlikely(val->staged_bindings))
2075 kfree(val->staged_bindings);
c0951b79
TH
2076 kfree(val);
2077 }
2078}
2079
fb1d9738
JB
2080static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2081{
c0951b79
TH
2082 struct vmw_validate_buffer *entry, *next;
2083 struct vmw_resource_val_node *val;
fb1d9738 2084
be38ab6e
TH
2085 /*
2086 * Drop references to DMA buffers held during command submission.
2087 */
fb1d9738 2088 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
c0951b79
TH
2089 base.head) {
2090 list_del(&entry->base.head);
2091 ttm_bo_unref(&entry->base.bo);
2092 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
fb1d9738
JB
2093 sw_context->cur_val_buf--;
2094 }
2095 BUG_ON(sw_context->cur_val_buf != 0);
be38ab6e 2096
c0951b79
TH
2097 list_for_each_entry(val, &sw_context->resource_list, head)
2098 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
fb1d9738
JB
2099}
2100
2101static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
96c5f0df
TH
2102 struct ttm_buffer_object *bo,
2103 bool validate_as_mob)
fb1d9738
JB
2104{
2105 int ret;
2106
e2fa3a76
TH
2107
2108 /*
2109 * Don't validate pinned buffers.
2110 */
2111
2112 if (bo == dev_priv->pinned_bo ||
2113 (bo == dev_priv->dummy_query_bo &&
2114 dev_priv->dummy_query_bo_pinned))
2115 return 0;
2116
96c5f0df
TH
2117 if (validate_as_mob)
2118 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2119
8ba5152a 2120 /**
135cba0d
TH
2121 * Put BO in VRAM if there is space, otherwise as a GMR.
2122 * If there is no space in VRAM and GMR ids are all used up,
2123 * start evicting GMRs to make room. If the DMA buffer can't be
2124 * used as a GMR, this will return -ENOMEM.
8ba5152a
TH
2125 */
2126
97a875cb 2127 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
3d3a5b32 2128 if (likely(ret == 0 || ret == -ERESTARTSYS))
fb1d9738
JB
2129 return ret;
2130
8ba5152a
TH
2131 /**
2132 * If that failed, try VRAM again, this time evicting
2133 * previous contents.
2134 */
fb1d9738 2135
135cba0d 2136 DRM_INFO("Falling through to VRAM.\n");
97a875cb 2137 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
fb1d9738
JB
2138 return ret;
2139}
2140
fb1d9738
JB
2141static int vmw_validate_buffers(struct vmw_private *dev_priv,
2142 struct vmw_sw_context *sw_context)
2143{
c0951b79 2144 struct vmw_validate_buffer *entry;
fb1d9738
JB
2145 int ret;
2146
c0951b79 2147 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
96c5f0df
TH
2148 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2149 entry->validate_as_mob);
fb1d9738
JB
2150 if (unlikely(ret != 0))
2151 return ret;
2152 }
2153 return 0;
2154}
2155
be38ab6e
TH
2156static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2157 uint32_t size)
2158{
2159 if (likely(sw_context->cmd_bounce_size >= size))
2160 return 0;
2161
2162 if (sw_context->cmd_bounce_size == 0)
2163 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2164
2165 while (sw_context->cmd_bounce_size < size) {
2166 sw_context->cmd_bounce_size =
2167 PAGE_ALIGN(sw_context->cmd_bounce_size +
2168 (sw_context->cmd_bounce_size >> 1));
2169 }
2170
2171 if (sw_context->cmd_bounce != NULL)
2172 vfree(sw_context->cmd_bounce);
2173
2174 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2175
2176 if (sw_context->cmd_bounce == NULL) {
2177 DRM_ERROR("Failed to allocate command bounce buffer.\n");
2178 sw_context->cmd_bounce_size = 0;
2179 return -ENOMEM;
2180 }
2181
2182 return 0;
2183}
2184
ae2a1040
TH
2185/**
2186 * vmw_execbuf_fence_commands - create and submit a command stream fence
2187 *
2188 * Creates a fence object and submits a command stream marker.
2189 * If this fails for some reason, We sync the fifo and return NULL.
2190 * It is then safe to fence buffers with a NULL pointer.
6070e9fa
JB
2191 *
2192 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2193 * a userspace handle if @p_handle is not NULL, otherwise not.
ae2a1040
TH
2194 */
2195
2196int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2197 struct vmw_private *dev_priv,
2198 struct vmw_fence_obj **p_fence,
2199 uint32_t *p_handle)
2200{
2201 uint32_t sequence;
2202 int ret;
2203 bool synced = false;
2204
6070e9fa
JB
2205 /* p_handle implies file_priv. */
2206 BUG_ON(p_handle != NULL && file_priv == NULL);
ae2a1040
TH
2207
2208 ret = vmw_fifo_send_fence(dev_priv, &sequence);
2209 if (unlikely(ret != 0)) {
2210 DRM_ERROR("Fence submission error. Syncing.\n");
2211 synced = true;
2212 }
2213
2214 if (p_handle != NULL)
2215 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2216 sequence,
2217 DRM_VMW_FENCE_FLAG_EXEC,
2218 p_fence, p_handle);
2219 else
2220 ret = vmw_fence_create(dev_priv->fman, sequence,
2221 DRM_VMW_FENCE_FLAG_EXEC,
2222 p_fence);
2223
2224 if (unlikely(ret != 0 && !synced)) {
2225 (void) vmw_fallback_wait(dev_priv, false, false,
2226 sequence, false,
2227 VMW_FENCE_WAIT_TIMEOUT);
2228 *p_fence = NULL;
2229 }
2230
2231 return 0;
2232}
2233
8bf445ce
TH
2234/**
2235 * vmw_execbuf_copy_fence_user - copy fence object information to
2236 * user-space.
2237 *
2238 * @dev_priv: Pointer to a vmw_private struct.
2239 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2240 * @ret: Return value from fence object creation.
2241 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2242 * which the information should be copied.
2243 * @fence: Pointer to the fenc object.
2244 * @fence_handle: User-space fence handle.
2245 *
2246 * This function copies fence information to user-space. If copying fails,
2247 * The user-space struct drm_vmw_fence_rep::error member is hopefully
2248 * left untouched, and if it's preloaded with an -EFAULT by user-space,
2249 * the error will hopefully be detected.
2250 * Also if copying fails, user-space will be unable to signal the fence
2251 * object so we wait for it immediately, and then unreference the
2252 * user-space reference.
2253 */
57c5ee79 2254void
8bf445ce
TH
2255vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2256 struct vmw_fpriv *vmw_fp,
2257 int ret,
2258 struct drm_vmw_fence_rep __user *user_fence_rep,
2259 struct vmw_fence_obj *fence,
2260 uint32_t fence_handle)
2261{
2262 struct drm_vmw_fence_rep fence_rep;
2263
2264 if (user_fence_rep == NULL)
2265 return;
2266
80d9b24a
DC
2267 memset(&fence_rep, 0, sizeof(fence_rep));
2268
8bf445ce
TH
2269 fence_rep.error = ret;
2270 if (ret == 0) {
2271 BUG_ON(fence == NULL);
2272
2273 fence_rep.handle = fence_handle;
2274 fence_rep.seqno = fence->seqno;
2275 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2276 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2277 }
2278
2279 /*
2280 * copy_to_user errors will be detected by user space not
2281 * seeing fence_rep::error filled in. Typically
2282 * user-space would have pre-set that member to -EFAULT.
2283 */
2284 ret = copy_to_user(user_fence_rep, &fence_rep,
2285 sizeof(fence_rep));
2286
2287 /*
2288 * User-space lost the fence object. We need to sync
2289 * and unreference the handle.
2290 */
2291 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2292 ttm_ref_object_base_unref(vmw_fp->tfile,
2293 fence_handle, TTM_REF_USAGE);
2294 DRM_ERROR("Fence copy error. Syncing.\n");
2295 (void) vmw_fence_obj_wait(fence, fence->signal_mask,
2296 false, false,
2297 VMW_FENCE_WAIT_TIMEOUT);
2298 }
2299}
2300
922ade0d
TH
2301int vmw_execbuf_process(struct drm_file *file_priv,
2302 struct vmw_private *dev_priv,
2303 void __user *user_commands,
2304 void *kernel_commands,
2305 uint32_t command_size,
2306 uint64_t throttle_us,
bb1bd2f4
JB
2307 struct drm_vmw_fence_rep __user *user_fence_rep,
2308 struct vmw_fence_obj **out_fence)
fb1d9738 2309{
fb1d9738 2310 struct vmw_sw_context *sw_context = &dev_priv->ctx;
bb1bd2f4 2311 struct vmw_fence_obj *fence = NULL;
c0951b79
TH
2312 struct vmw_resource *error_resource;
2313 struct list_head resource_list;
ecff665f 2314 struct ww_acquire_ctx ticket;
ae2a1040 2315 uint32_t handle;
922ade0d
TH
2316 void *cmd;
2317 int ret;
fb1d9738 2318
922ade0d 2319 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
fb1d9738 2320 if (unlikely(ret != 0))
922ade0d 2321 return -ERESTARTSYS;
fb1d9738 2322
922ade0d
TH
2323 if (kernel_commands == NULL) {
2324 sw_context->kernel = false;
fb1d9738 2325
922ade0d
TH
2326 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2327 if (unlikely(ret != 0))
2328 goto out_unlock;
fb1d9738 2329
fb1d9738 2330
922ade0d
TH
2331 ret = copy_from_user(sw_context->cmd_bounce,
2332 user_commands, command_size);
2333
2334 if (unlikely(ret != 0)) {
2335 ret = -EFAULT;
2336 DRM_ERROR("Failed copying commands.\n");
2337 goto out_unlock;
2338 }
2339 kernel_commands = sw_context->cmd_bounce;
2340 } else
2341 sw_context->kernel = true;
fb1d9738 2342
d5bde956 2343 sw_context->fp = vmw_fpriv(file_priv);
fb1d9738
JB
2344 sw_context->cur_reloc = 0;
2345 sw_context->cur_val_buf = 0;
e2fa3a76 2346 sw_context->fence_flags = 0;
f18c8840 2347 INIT_LIST_HEAD(&sw_context->resource_list);
e2fa3a76 2348 sw_context->cur_query_bo = dev_priv->pinned_bo;
c0951b79
TH
2349 sw_context->last_query_ctx = NULL;
2350 sw_context->needs_post_query_barrier = false;
2351 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
fb1d9738 2352 INIT_LIST_HEAD(&sw_context->validate_nodes);
c0951b79
TH
2353 INIT_LIST_HEAD(&sw_context->res_relocations);
2354 if (!sw_context->res_ht_initialized) {
2355 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2356 if (unlikely(ret != 0))
2357 goto out_unlock;
2358 sw_context->res_ht_initialized = true;
2359 }
d5bde956 2360 INIT_LIST_HEAD(&sw_context->staged_shaders);
fb1d9738 2361
c0951b79 2362 INIT_LIST_HEAD(&resource_list);
922ade0d
TH
2363 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2364 command_size);
fb1d9738 2365 if (unlikely(ret != 0))
cf5e3413 2366 goto out_err_nores;
be38ab6e 2367
c0951b79
TH
2368 ret = vmw_resources_reserve(sw_context);
2369 if (unlikely(ret != 0))
cf5e3413 2370 goto out_err_nores;
c0951b79 2371
ecff665f 2372 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
fb1d9738
JB
2373 if (unlikely(ret != 0))
2374 goto out_err;
2375
2376 ret = vmw_validate_buffers(dev_priv, sw_context);
2377 if (unlikely(ret != 0))
2378 goto out_err;
2379
c0951b79
TH
2380 ret = vmw_resources_validate(sw_context);
2381 if (unlikely(ret != 0))
2382 goto out_err;
1925d456 2383
922ade0d 2384 if (throttle_us) {
6bcd8d3c 2385 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
922ade0d 2386 throttle_us);
1925d456
TH
2387
2388 if (unlikely(ret != 0))
c0951b79 2389 goto out_err;
be38ab6e
TH
2390 }
2391
173fb7d4
TH
2392 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2393 if (unlikely(ret != 0)) {
2394 ret = -ERESTARTSYS;
2395 goto out_err;
2396 }
2397
922ade0d 2398 cmd = vmw_fifo_reserve(dev_priv, command_size);
be38ab6e
TH
2399 if (unlikely(cmd == NULL)) {
2400 DRM_ERROR("Failed reserving fifo space for commands.\n");
2401 ret = -ENOMEM;
173fb7d4 2402 goto out_unlock_binding;
1925d456
TH
2403 }
2404
c0951b79 2405 vmw_apply_relocations(sw_context);
922ade0d 2406 memcpy(cmd, kernel_commands, command_size);
c0951b79
TH
2407
2408 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2409 vmw_resource_relocations_free(&sw_context->res_relocations);
2410
922ade0d 2411 vmw_fifo_commit(dev_priv, command_size);
fb1d9738 2412
e2fa3a76 2413 vmw_query_bo_switch_commit(dev_priv, sw_context);
ae2a1040
TH
2414 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2415 &fence,
2416 (user_fence_rep) ? &handle : NULL);
fb1d9738
JB
2417 /*
2418 * This error is harmless, because if fence submission fails,
ae2a1040
TH
2419 * vmw_fifo_send_fence will sync. The error will be propagated to
2420 * user-space in @fence_rep
fb1d9738
JB
2421 */
2422
2423 if (ret != 0)
2424 DRM_ERROR("Fence submission error. Syncing.\n");
2425
c0951b79 2426 vmw_resource_list_unreserve(&sw_context->resource_list, false);
173fb7d4
TH
2427 mutex_unlock(&dev_priv->binding_mutex);
2428
ecff665f 2429 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
ae2a1040 2430 (void *) fence);
fb1d9738 2431
c0951b79
TH
2432 if (unlikely(dev_priv->pinned_bo != NULL &&
2433 !dev_priv->query_cid_valid))
2434 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2435
ae2a1040 2436 vmw_clear_validations(sw_context);
8bf445ce
TH
2437 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2438 user_fence_rep, fence, handle);
fb1d9738 2439
bb1bd2f4
JB
2440 /* Don't unreference when handing fence out */
2441 if (unlikely(out_fence != NULL)) {
2442 *out_fence = fence;
2443 fence = NULL;
2444 } else if (likely(fence != NULL)) {
ae2a1040 2445 vmw_fence_obj_unreference(&fence);
bb1bd2f4 2446 }
fb1d9738 2447
c0951b79 2448 list_splice_init(&sw_context->resource_list, &resource_list);
d5bde956
TH
2449 vmw_compat_shaders_commit(sw_context->fp->shman,
2450 &sw_context->staged_shaders);
922ade0d 2451 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
2452
2453 /*
2454 * Unreference resources outside of the cmdbuf_mutex to
2455 * avoid deadlocks in resource destruction paths.
2456 */
2457 vmw_resource_list_unreference(&resource_list);
2458
fb1d9738 2459 return 0;
922ade0d 2460
173fb7d4
TH
2461out_unlock_binding:
2462 mutex_unlock(&dev_priv->binding_mutex);
fb1d9738 2463out_err:
ecff665f 2464 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
cf5e3413 2465out_err_nores:
c0951b79 2466 vmw_resource_list_unreserve(&sw_context->resource_list, true);
cf5e3413
TH
2467 vmw_resource_relocations_free(&sw_context->res_relocations);
2468 vmw_free_relocations(sw_context);
fb1d9738 2469 vmw_clear_validations(sw_context);
c0951b79
TH
2470 if (unlikely(dev_priv->pinned_bo != NULL &&
2471 !dev_priv->query_cid_valid))
2472 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
fb1d9738 2473out_unlock:
c0951b79
TH
2474 list_splice_init(&sw_context->resource_list, &resource_list);
2475 error_resource = sw_context->error_resource;
2476 sw_context->error_resource = NULL;
d5bde956
TH
2477 vmw_compat_shaders_revert(sw_context->fp->shman,
2478 &sw_context->staged_shaders);
fb1d9738 2479 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
2480
2481 /*
2482 * Unreference resources outside of the cmdbuf_mutex to
2483 * avoid deadlocks in resource destruction paths.
2484 */
2485 vmw_resource_list_unreference(&resource_list);
2486 if (unlikely(error_resource != NULL))
2487 vmw_resource_unreference(&error_resource);
2488
922ade0d
TH
2489 return ret;
2490}
2491
e2fa3a76
TH
2492/**
2493 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2494 *
2495 * @dev_priv: The device private structure.
2496 *
2497 * This function is called to idle the fifo and unpin the query buffer
2498 * if the normal way to do this hits an error, which should typically be
2499 * extremely rare.
2500 */
2501static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2502{
2503 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2504
2505 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2506 vmw_bo_pin(dev_priv->pinned_bo, false);
2507 vmw_bo_pin(dev_priv->dummy_query_bo, false);
2508 dev_priv->dummy_query_bo_pinned = false;
2509}
2510
2511
2512/**
c0951b79 2513 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
e2fa3a76
TH
2514 * query bo.
2515 *
2516 * @dev_priv: The device private structure.
c0951b79
TH
2517 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2518 * _after_ a query barrier that flushes all queries touching the current
2519 * buffer pointed to by @dev_priv->pinned_bo
e2fa3a76
TH
2520 *
2521 * This function should be used to unpin the pinned query bo, or
2522 * as a query barrier when we need to make sure that all queries have
2523 * finished before the next fifo command. (For example on hardware
2524 * context destructions where the hardware may otherwise leak unfinished
2525 * queries).
2526 *
2527 * This function does not return any failure codes, but make attempts
2528 * to do safe unpinning in case of errors.
2529 *
2530 * The function will synchronize on the previous query barrier, and will
2531 * thus not finish until that barrier has executed.
c0951b79
TH
2532 *
2533 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2534 * before calling this function.
e2fa3a76 2535 */
c0951b79
TH
2536void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2537 struct vmw_fence_obj *fence)
e2fa3a76
TH
2538{
2539 int ret = 0;
2540 struct list_head validate_list;
2541 struct ttm_validate_buffer pinned_val, query_val;
c0951b79 2542 struct vmw_fence_obj *lfence = NULL;
ecff665f 2543 struct ww_acquire_ctx ticket;
e2fa3a76
TH
2544
2545 if (dev_priv->pinned_bo == NULL)
2546 goto out_unlock;
2547
e2fa3a76
TH
2548 INIT_LIST_HEAD(&validate_list);
2549
e2fa3a76
TH
2550 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2551 list_add_tail(&pinned_val.head, &validate_list);
2552
e2fa3a76
TH
2553 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2554 list_add_tail(&query_val.head, &validate_list);
2555
2556 do {
ecff665f 2557 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
e2fa3a76
TH
2558 } while (ret == -ERESTARTSYS);
2559
2560 if (unlikely(ret != 0)) {
2561 vmw_execbuf_unpin_panic(dev_priv);
2562 goto out_no_reserve;
2563 }
2564
c0951b79
TH
2565 if (dev_priv->query_cid_valid) {
2566 BUG_ON(fence != NULL);
2567 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2568 if (unlikely(ret != 0)) {
2569 vmw_execbuf_unpin_panic(dev_priv);
2570 goto out_no_emit;
2571 }
2572 dev_priv->query_cid_valid = false;
e2fa3a76
TH
2573 }
2574
2575 vmw_bo_pin(dev_priv->pinned_bo, false);
2576 vmw_bo_pin(dev_priv->dummy_query_bo, false);
2577 dev_priv->dummy_query_bo_pinned = false;
2578
c0951b79
TH
2579 if (fence == NULL) {
2580 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2581 NULL);
2582 fence = lfence;
2583 }
ecff665f 2584 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
c0951b79
TH
2585 if (lfence != NULL)
2586 vmw_fence_obj_unreference(&lfence);
e2fa3a76
TH
2587
2588 ttm_bo_unref(&query_val.bo);
2589 ttm_bo_unref(&pinned_val.bo);
2590 ttm_bo_unref(&dev_priv->pinned_bo);
2591
2592out_unlock:
e2fa3a76
TH
2593 return;
2594
2595out_no_emit:
ecff665f 2596 ttm_eu_backoff_reservation(&ticket, &validate_list);
e2fa3a76
TH
2597out_no_reserve:
2598 ttm_bo_unref(&query_val.bo);
2599 ttm_bo_unref(&pinned_val.bo);
2600 ttm_bo_unref(&dev_priv->pinned_bo);
c0951b79
TH
2601}
2602
2603/**
2604 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2605 * query bo.
2606 *
2607 * @dev_priv: The device private structure.
2608 *
2609 * This function should be used to unpin the pinned query bo, or
2610 * as a query barrier when we need to make sure that all queries have
2611 * finished before the next fifo command. (For example on hardware
2612 * context destructions where the hardware may otherwise leak unfinished
2613 * queries).
2614 *
2615 * This function does not return any failure codes, but make attempts
2616 * to do safe unpinning in case of errors.
2617 *
2618 * The function will synchronize on the previous query barrier, and will
2619 * thus not finish until that barrier has executed.
2620 */
2621void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2622{
2623 mutex_lock(&dev_priv->cmdbuf_mutex);
2624 if (dev_priv->query_cid_valid)
2625 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
e2fa3a76
TH
2626 mutex_unlock(&dev_priv->cmdbuf_mutex);
2627}
2628
922ade0d
TH
2629
2630int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2631 struct drm_file *file_priv)
2632{
2633 struct vmw_private *dev_priv = vmw_priv(dev);
2634 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2635 struct vmw_master *vmaster = vmw_master(file_priv->master);
2636 int ret;
2637
2638 /*
2639 * This will allow us to extend the ioctl argument while
2640 * maintaining backwards compatibility:
2641 * We take different code paths depending on the value of
2642 * arg->version.
2643 */
2644
2645 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2646 DRM_ERROR("Incorrect execbuf version.\n");
2647 DRM_ERROR("You're running outdated experimental "
2648 "vmwgfx user-space drivers.");
2649 return -EINVAL;
2650 }
2651
2652 ret = ttm_read_lock(&vmaster->lock, true);
2653 if (unlikely(ret != 0))
2654 return ret;
2655
2656 ret = vmw_execbuf_process(file_priv, dev_priv,
2657 (void __user *)(unsigned long)arg->commands,
2658 NULL, arg->command_size, arg->throttle_us,
bb1bd2f4
JB
2659 (void __user *)(unsigned long)arg->fence_rep,
2660 NULL);
922ade0d
TH
2661
2662 if (unlikely(ret != 0))
2663 goto out_unlock;
2664
2665 vmw_kms_cursor_post_execbuf(dev_priv);
2666
2667out_unlock:
fb1d9738
JB
2668 ttm_read_unlock(&vmaster->lock);
2669 return ret;
2670}