drm/vmwgfx: Introduce a pin count to allow for recursive pinning v2
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32
33 #define VMW_RES_HT_ORDER 12
34
35 /**
36  * struct vmw_resource_relocation - Relocation info for resources
37  *
38  * @head: List head for the software context's relocation list.
39  * @res: Non-ref-counted pointer to the resource.
40  * @offset: Offset of 4 byte entries into the command buffer where the
41  * id that needs fixup is located.
42  */
43 struct vmw_resource_relocation {
44         struct list_head head;
45         const struct vmw_resource *res;
46         unsigned long offset;
47 };
48
49 /**
50  * struct vmw_resource_val_node - Validation info for resources
51  *
52  * @head: List head for the software context's resource list.
53  * @hash: Hash entry for quick resouce to val_node lookup.
54  * @res: Ref-counted pointer to the resource.
55  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56  * @new_backup: Refcounted pointer to the new backup buffer.
57  * @staged_bindings: If @res is a context, tracks bindings set up during
58  * the command batch. Otherwise NULL.
59  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60  * @first_usage: Set to true the first time the resource is referenced in
61  * the command stream.
62  * @no_buffer_needed: Resources do not need to allocate buffer backup on
63  * reservation. The command stream will provide one.
64  */
65 struct vmw_resource_val_node {
66         struct list_head head;
67         struct drm_hash_item hash;
68         struct vmw_resource *res;
69         struct vmw_dma_buffer *new_backup;
70         struct vmw_ctx_binding_state *staged_bindings;
71         unsigned long new_backup_offset;
72         bool first_usage;
73         bool no_buffer_needed;
74 };
75
76 /**
77  * struct vmw_cmd_entry - Describe a command for the verifier
78  *
79  * @user_allow: Whether allowed from the execbuf ioctl.
80  * @gb_disable: Whether disabled if guest-backed objects are available.
81  * @gb_enable: Whether enabled iff guest-backed objects are available.
82  */
83 struct vmw_cmd_entry {
84         int (*func) (struct vmw_private *, struct vmw_sw_context *,
85                      SVGA3dCmdHeader *);
86         bool user_allow;
87         bool gb_disable;
88         bool gb_enable;
89 };
90
91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
92         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93                                        (_gb_disable), (_gb_enable)}
94
95 /**
96  * vmw_resource_unreserve - unreserve resources previously reserved for
97  * command submission.
98  *
99  * @list_head: list of resources to unreserve.
100  * @backoff: Whether command submission failed.
101  */
102 static void vmw_resource_list_unreserve(struct list_head *list,
103                                         bool backoff)
104 {
105         struct vmw_resource_val_node *val;
106
107         list_for_each_entry(val, list, head) {
108                 struct vmw_resource *res = val->res;
109                 struct vmw_dma_buffer *new_backup =
110                         backoff ? NULL : val->new_backup;
111
112                 /*
113                  * Transfer staged context bindings to the
114                  * persistent context binding tracker.
115                  */
116                 if (unlikely(val->staged_bindings)) {
117                         if (!backoff) {
118                                 vmw_context_binding_state_transfer
119                                         (val->res, val->staged_bindings);
120                         }
121                         kfree(val->staged_bindings);
122                         val->staged_bindings = NULL;
123                 }
124                 vmw_resource_unreserve(res, new_backup,
125                         val->new_backup_offset);
126                 vmw_dmabuf_unreference(&val->new_backup);
127         }
128 }
129
130
131 /**
132  * vmw_resource_val_add - Add a resource to the software context's
133  * resource list if it's not already on it.
134  *
135  * @sw_context: Pointer to the software context.
136  * @res: Pointer to the resource.
137  * @p_node On successful return points to a valid pointer to a
138  * struct vmw_resource_val_node, if non-NULL on entry.
139  */
140 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141                                 struct vmw_resource *res,
142                                 struct vmw_resource_val_node **p_node)
143 {
144         struct vmw_resource_val_node *node;
145         struct drm_hash_item *hash;
146         int ret;
147
148         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
149                                     &hash) == 0)) {
150                 node = container_of(hash, struct vmw_resource_val_node, hash);
151                 node->first_usage = false;
152                 if (unlikely(p_node != NULL))
153                         *p_node = node;
154                 return 0;
155         }
156
157         node = kzalloc(sizeof(*node), GFP_KERNEL);
158         if (unlikely(node == NULL)) {
159                 DRM_ERROR("Failed to allocate a resource validation "
160                           "entry.\n");
161                 return -ENOMEM;
162         }
163
164         node->hash.key = (unsigned long) res;
165         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166         if (unlikely(ret != 0)) {
167                 DRM_ERROR("Failed to initialize a resource validation "
168                           "entry.\n");
169                 kfree(node);
170                 return ret;
171         }
172         list_add_tail(&node->head, &sw_context->resource_list);
173         node->res = vmw_resource_reference(res);
174         node->first_usage = true;
175
176         if (unlikely(p_node != NULL))
177                 *p_node = node;
178
179         return 0;
180 }
181
182 /**
183  * vmw_resource_context_res_add - Put resources previously bound to a context on
184  * the validation list
185  *
186  * @dev_priv: Pointer to a device private structure
187  * @sw_context: Pointer to a software context used for this command submission
188  * @ctx: Pointer to the context resource
189  *
190  * This function puts all resources that were previously bound to @ctx on
191  * the resource validation list. This is part of the context state reemission
192  */
193 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194                                         struct vmw_sw_context *sw_context,
195                                         struct vmw_resource *ctx)
196 {
197         struct list_head *binding_list;
198         struct vmw_ctx_binding *entry;
199         int ret = 0;
200         struct vmw_resource *res;
201
202         mutex_lock(&dev_priv->binding_mutex);
203         binding_list = vmw_context_binding_list(ctx);
204
205         list_for_each_entry(entry, binding_list, ctx_list) {
206                 res = vmw_resource_reference_unless_doomed(entry->bi.res);
207                 if (unlikely(res == NULL))
208                         continue;
209
210                 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211                 vmw_resource_unreference(&res);
212                 if (unlikely(ret != 0))
213                         break;
214         }
215
216         mutex_unlock(&dev_priv->binding_mutex);
217         return ret;
218 }
219
220 /**
221  * vmw_resource_relocation_add - Add a relocation to the relocation list
222  *
223  * @list: Pointer to head of relocation list.
224  * @res: The resource.
225  * @offset: Offset into the command buffer currently being parsed where the
226  * id that needs fixup is located. Granularity is 4 bytes.
227  */
228 static int vmw_resource_relocation_add(struct list_head *list,
229                                        const struct vmw_resource *res,
230                                        unsigned long offset)
231 {
232         struct vmw_resource_relocation *rel;
233
234         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
235         if (unlikely(rel == NULL)) {
236                 DRM_ERROR("Failed to allocate a resource relocation.\n");
237                 return -ENOMEM;
238         }
239
240         rel->res = res;
241         rel->offset = offset;
242         list_add_tail(&rel->head, list);
243
244         return 0;
245 }
246
247 /**
248  * vmw_resource_relocations_free - Free all relocations on a list
249  *
250  * @list: Pointer to the head of the relocation list.
251  */
252 static void vmw_resource_relocations_free(struct list_head *list)
253 {
254         struct vmw_resource_relocation *rel, *n;
255
256         list_for_each_entry_safe(rel, n, list, head) {
257                 list_del(&rel->head);
258                 kfree(rel);
259         }
260 }
261
262 /**
263  * vmw_resource_relocations_apply - Apply all relocations on a list
264  *
265  * @cb: Pointer to the start of the command buffer bein patch. This need
266  * not be the same buffer as the one being parsed when the relocation
267  * list was built, but the contents must be the same modulo the
268  * resource ids.
269  * @list: Pointer to the head of the relocation list.
270  */
271 static void vmw_resource_relocations_apply(uint32_t *cb,
272                                            struct list_head *list)
273 {
274         struct vmw_resource_relocation *rel;
275
276         list_for_each_entry(rel, list, head) {
277                 if (likely(rel->res != NULL))
278                         cb[rel->offset] = rel->res->id;
279                 else
280                         cb[rel->offset] = SVGA_3D_CMD_NOP;
281         }
282 }
283
284 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
285                            struct vmw_sw_context *sw_context,
286                            SVGA3dCmdHeader *header)
287 {
288         return capable(CAP_SYS_ADMIN) ? : -EINVAL;
289 }
290
291 static int vmw_cmd_ok(struct vmw_private *dev_priv,
292                       struct vmw_sw_context *sw_context,
293                       SVGA3dCmdHeader *header)
294 {
295         return 0;
296 }
297
298 /**
299  * vmw_bo_to_validate_list - add a bo to a validate list
300  *
301  * @sw_context: The software context used for this command submission batch.
302  * @bo: The buffer object to add.
303  * @validate_as_mob: Validate this buffer as a MOB.
304  * @p_val_node: If non-NULL Will be updated with the validate node number
305  * on return.
306  *
307  * Returns -EINVAL if the limit of number of buffer objects per command
308  * submission is reached.
309  */
310 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311                                    struct vmw_dma_buffer *vbo,
312                                    bool validate_as_mob,
313                                    uint32_t *p_val_node)
314 {
315         uint32_t val_node;
316         struct vmw_validate_buffer *vval_buf;
317         struct ttm_validate_buffer *val_buf;
318         struct drm_hash_item *hash;
319         int ret;
320
321         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
322                                     &hash) == 0)) {
323                 vval_buf = container_of(hash, struct vmw_validate_buffer,
324                                         hash);
325                 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
326                         DRM_ERROR("Inconsistent buffer usage.\n");
327                         return -EINVAL;
328                 }
329                 val_buf = &vval_buf->base;
330                 val_node = vval_buf - sw_context->val_bufs;
331         } else {
332                 val_node = sw_context->cur_val_buf;
333                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
334                         DRM_ERROR("Max number of DMA buffers per submission "
335                                   "exceeded.\n");
336                         return -EINVAL;
337                 }
338                 vval_buf = &sw_context->val_bufs[val_node];
339                 vval_buf->hash.key = (unsigned long) vbo;
340                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341                 if (unlikely(ret != 0)) {
342                         DRM_ERROR("Failed to initialize a buffer validation "
343                                   "entry.\n");
344                         return ret;
345                 }
346                 ++sw_context->cur_val_buf;
347                 val_buf = &vval_buf->base;
348                 val_buf->bo = ttm_bo_reference(&vbo->base);
349                 val_buf->shared = false;
350                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351                 vval_buf->validate_as_mob = validate_as_mob;
352         }
353
354         if (p_val_node)
355                 *p_val_node = val_node;
356
357         return 0;
358 }
359
360 /**
361  * vmw_resources_reserve - Reserve all resources on the sw_context's
362  * resource list.
363  *
364  * @sw_context: Pointer to the software context.
365  *
366  * Note that since vmware's command submission currently is protected by
367  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
368  * since only a single thread at once will attempt this.
369  */
370 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
371 {
372         struct vmw_resource_val_node *val;
373         int ret;
374
375         list_for_each_entry(val, &sw_context->resource_list, head) {
376                 struct vmw_resource *res = val->res;
377
378                 ret = vmw_resource_reserve(res, val->no_buffer_needed);
379                 if (unlikely(ret != 0))
380                         return ret;
381
382                 if (res->backup) {
383                         struct vmw_dma_buffer *vbo = res->backup;
384
385                         ret = vmw_bo_to_validate_list
386                                 (sw_context, vbo,
387                                  vmw_resource_needs_backup(res), NULL);
388
389                         if (unlikely(ret != 0))
390                                 return ret;
391                 }
392         }
393         return 0;
394 }
395
396 /**
397  * vmw_resources_validate - Validate all resources on the sw_context's
398  * resource list.
399  *
400  * @sw_context: Pointer to the software context.
401  *
402  * Before this function is called, all resource backup buffers must have
403  * been validated.
404  */
405 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
406 {
407         struct vmw_resource_val_node *val;
408         int ret;
409
410         list_for_each_entry(val, &sw_context->resource_list, head) {
411                 struct vmw_resource *res = val->res;
412
413                 ret = vmw_resource_validate(res);
414                 if (unlikely(ret != 0)) {
415                         if (ret != -ERESTARTSYS)
416                                 DRM_ERROR("Failed to validate resource.\n");
417                         return ret;
418                 }
419         }
420         return 0;
421 }
422
423
424 /**
425  * vmw_cmd_res_reloc_add - Add a resource to a software context's
426  * relocation- and validation lists.
427  *
428  * @dev_priv: Pointer to a struct vmw_private identifying the device.
429  * @sw_context: Pointer to the software context.
430  * @res_type: Resource type.
431  * @id_loc: Pointer to where the id that needs translation is located.
432  * @res: Valid pointer to a struct vmw_resource.
433  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
434  * used for this resource is returned here.
435  */
436 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
437                                  struct vmw_sw_context *sw_context,
438                                  enum vmw_res_type res_type,
439                                  uint32_t *id_loc,
440                                  struct vmw_resource *res,
441                                  struct vmw_resource_val_node **p_val)
442 {
443         int ret;
444         struct vmw_resource_val_node *node;
445
446         *p_val = NULL;
447         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
448                                           res,
449                                           id_loc - sw_context->buf_start);
450         if (unlikely(ret != 0))
451                 return ret;
452
453         ret = vmw_resource_val_add(sw_context, res, &node);
454         if (unlikely(ret != 0))
455                 return ret;
456
457         if (res_type == vmw_res_context && dev_priv->has_mob &&
458             node->first_usage) {
459
460                 /*
461                  * Put contexts first on the list to be able to exit
462                  * list traversal for contexts early.
463                  */
464                 list_del(&node->head);
465                 list_add(&node->head, &sw_context->resource_list);
466
467                 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
468                 if (unlikely(ret != 0))
469                         return ret;
470                 node->staged_bindings =
471                         kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
472                 if (node->staged_bindings == NULL) {
473                         DRM_ERROR("Failed to allocate context binding "
474                                   "information.\n");
475                         return -ENOMEM;
476                 }
477                 INIT_LIST_HEAD(&node->staged_bindings->list);
478         }
479
480         if (p_val)
481                 *p_val = node;
482
483         return 0;
484 }
485
486
487 /**
488  * vmw_cmd_res_check - Check that a resource is present and if so, put it
489  * on the resource validate list unless it's already there.
490  *
491  * @dev_priv: Pointer to a device private structure.
492  * @sw_context: Pointer to the software context.
493  * @res_type: Resource type.
494  * @converter: User-space visisble type specific information.
495  * @id_loc: Pointer to the location in the command buffer currently being
496  * parsed from where the user-space resource id handle is located.
497  * @p_val: Pointer to pointer to resource validalidation node. Populated
498  * on exit.
499  */
500 static int
501 vmw_cmd_res_check(struct vmw_private *dev_priv,
502                   struct vmw_sw_context *sw_context,
503                   enum vmw_res_type res_type,
504                   const struct vmw_user_resource_conv *converter,
505                   uint32_t *id_loc,
506                   struct vmw_resource_val_node **p_val)
507 {
508         struct vmw_res_cache_entry *rcache =
509                 &sw_context->res_cache[res_type];
510         struct vmw_resource *res;
511         struct vmw_resource_val_node *node;
512         int ret;
513
514         if (*id_loc == SVGA3D_INVALID_ID) {
515                 if (p_val)
516                         *p_val = NULL;
517                 if (res_type == vmw_res_context) {
518                         DRM_ERROR("Illegal context invalid id.\n");
519                         return -EINVAL;
520                 }
521                 return 0;
522         }
523
524         /*
525          * Fastpath in case of repeated commands referencing the same
526          * resource
527          */
528
529         if (likely(rcache->valid && *id_loc == rcache->handle)) {
530                 const struct vmw_resource *res = rcache->res;
531
532                 rcache->node->first_usage = false;
533                 if (p_val)
534                         *p_val = rcache->node;
535
536                 return vmw_resource_relocation_add
537                         (&sw_context->res_relocations, res,
538                          id_loc - sw_context->buf_start);
539         }
540
541         ret = vmw_user_resource_lookup_handle(dev_priv,
542                                               sw_context->fp->tfile,
543                                               *id_loc,
544                                               converter,
545                                               &res);
546         if (unlikely(ret != 0)) {
547                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
548                           (unsigned) *id_loc);
549                 dump_stack();
550                 return ret;
551         }
552
553         rcache->valid = true;
554         rcache->res = res;
555         rcache->handle = *id_loc;
556
557         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
558                                     res, &node);
559         if (unlikely(ret != 0))
560                 goto out_no_reloc;
561
562         rcache->node = node;
563         if (p_val)
564                 *p_val = node;
565         vmw_resource_unreference(&res);
566         return 0;
567
568 out_no_reloc:
569         BUG_ON(sw_context->error_resource != NULL);
570         sw_context->error_resource = res;
571
572         return ret;
573 }
574
575 /**
576  * vmw_rebind_contexts - Rebind all resources previously bound to
577  * referenced contexts.
578  *
579  * @sw_context: Pointer to the software context.
580  *
581  * Rebind context binding points that have been scrubbed because of eviction.
582  */
583 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
584 {
585         struct vmw_resource_val_node *val;
586         int ret;
587
588         list_for_each_entry(val, &sw_context->resource_list, head) {
589                 if (unlikely(!val->staged_bindings))
590                         break;
591
592                 ret = vmw_context_rebind_all(val->res);
593                 if (unlikely(ret != 0)) {
594                         if (ret != -ERESTARTSYS)
595                                 DRM_ERROR("Failed to rebind context.\n");
596                         return ret;
597                 }
598         }
599
600         return 0;
601 }
602
603 /**
604  * vmw_cmd_cid_check - Check a command header for valid context information.
605  *
606  * @dev_priv: Pointer to a device private structure.
607  * @sw_context: Pointer to the software context.
608  * @header: A command header with an embedded user-space context handle.
609  *
610  * Convenience function: Call vmw_cmd_res_check with the user-space context
611  * handle embedded in @header.
612  */
613 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
614                              struct vmw_sw_context *sw_context,
615                              SVGA3dCmdHeader *header)
616 {
617         struct vmw_cid_cmd {
618                 SVGA3dCmdHeader header;
619                 uint32_t cid;
620         } *cmd;
621
622         cmd = container_of(header, struct vmw_cid_cmd, header);
623         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
624                                  user_context_converter, &cmd->cid, NULL);
625 }
626
627 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
628                                            struct vmw_sw_context *sw_context,
629                                            SVGA3dCmdHeader *header)
630 {
631         struct vmw_sid_cmd {
632                 SVGA3dCmdHeader header;
633                 SVGA3dCmdSetRenderTarget body;
634         } *cmd;
635         struct vmw_resource_val_node *ctx_node;
636         struct vmw_resource_val_node *res_node;
637         int ret;
638
639         cmd = container_of(header, struct vmw_sid_cmd, header);
640
641         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
642                                 user_context_converter, &cmd->body.cid,
643                                 &ctx_node);
644         if (unlikely(ret != 0))
645                 return ret;
646
647         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
648                                 user_surface_converter,
649                                 &cmd->body.target.sid, &res_node);
650         if (unlikely(ret != 0))
651                 return ret;
652
653         if (dev_priv->has_mob) {
654                 struct vmw_ctx_bindinfo bi;
655
656                 bi.ctx = ctx_node->res;
657                 bi.res = res_node ? res_node->res : NULL;
658                 bi.bt = vmw_ctx_binding_rt;
659                 bi.i1.rt_type = cmd->body.type;
660                 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
661         }
662
663         return 0;
664 }
665
666 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
667                                       struct vmw_sw_context *sw_context,
668                                       SVGA3dCmdHeader *header)
669 {
670         struct vmw_sid_cmd {
671                 SVGA3dCmdHeader header;
672                 SVGA3dCmdSurfaceCopy body;
673         } *cmd;
674         int ret;
675
676         cmd = container_of(header, struct vmw_sid_cmd, header);
677
678         if (!(sw_context->quirks & VMW_QUIRK_SRC_SID_OK)) {
679                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
680                                         user_surface_converter,
681                                         &cmd->body.src.sid, NULL);
682                 if (ret != 0)
683                         return ret;
684         }
685
686         if (sw_context->quirks & VMW_QUIRK_DST_SID_OK)
687                 return 0;
688
689         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
690                                  user_surface_converter,
691                                  &cmd->body.dest.sid, NULL);
692 }
693
694 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
695                                      struct vmw_sw_context *sw_context,
696                                      SVGA3dCmdHeader *header)
697 {
698         struct vmw_sid_cmd {
699                 SVGA3dCmdHeader header;
700                 SVGA3dCmdSurfaceStretchBlt body;
701         } *cmd;
702         int ret;
703
704         cmd = container_of(header, struct vmw_sid_cmd, header);
705         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
706                                 user_surface_converter,
707                                 &cmd->body.src.sid, NULL);
708         if (unlikely(ret != 0))
709                 return ret;
710         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
711                                  user_surface_converter,
712                                  &cmd->body.dest.sid, NULL);
713 }
714
715 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
716                                          struct vmw_sw_context *sw_context,
717                                          SVGA3dCmdHeader *header)
718 {
719         struct vmw_sid_cmd {
720                 SVGA3dCmdHeader header;
721                 SVGA3dCmdBlitSurfaceToScreen body;
722         } *cmd;
723
724         cmd = container_of(header, struct vmw_sid_cmd, header);
725
726         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
727                                  user_surface_converter,
728                                  &cmd->body.srcImage.sid, NULL);
729 }
730
731 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
732                                  struct vmw_sw_context *sw_context,
733                                  SVGA3dCmdHeader *header)
734 {
735         struct vmw_sid_cmd {
736                 SVGA3dCmdHeader header;
737                 SVGA3dCmdPresent body;
738         } *cmd;
739
740
741         cmd = container_of(header, struct vmw_sid_cmd, header);
742
743         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
744                                  user_surface_converter, &cmd->body.sid,
745                                  NULL);
746 }
747
748 /**
749  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
750  *
751  * @dev_priv: The device private structure.
752  * @new_query_bo: The new buffer holding query results.
753  * @sw_context: The software context used for this command submission.
754  *
755  * This function checks whether @new_query_bo is suitable for holding
756  * query results, and if another buffer currently is pinned for query
757  * results. If so, the function prepares the state of @sw_context for
758  * switching pinned buffers after successful submission of the current
759  * command batch.
760  */
761 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
762                                        struct vmw_dma_buffer *new_query_bo,
763                                        struct vmw_sw_context *sw_context)
764 {
765         struct vmw_res_cache_entry *ctx_entry =
766                 &sw_context->res_cache[vmw_res_context];
767         int ret;
768
769         BUG_ON(!ctx_entry->valid);
770         sw_context->last_query_ctx = ctx_entry->res;
771
772         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
773
774                 if (unlikely(new_query_bo->base.num_pages > 4)) {
775                         DRM_ERROR("Query buffer too large.\n");
776                         return -EINVAL;
777                 }
778
779                 if (unlikely(sw_context->cur_query_bo != NULL)) {
780                         sw_context->needs_post_query_barrier = true;
781                         ret = vmw_bo_to_validate_list(sw_context,
782                                                       sw_context->cur_query_bo,
783                                                       dev_priv->has_mob, NULL);
784                         if (unlikely(ret != 0))
785                                 return ret;
786                 }
787                 sw_context->cur_query_bo = new_query_bo;
788
789                 ret = vmw_bo_to_validate_list(sw_context,
790                                               dev_priv->dummy_query_bo,
791                                               dev_priv->has_mob, NULL);
792                 if (unlikely(ret != 0))
793                         return ret;
794
795         }
796
797         return 0;
798 }
799
800
801 /**
802  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
803  *
804  * @dev_priv: The device private structure.
805  * @sw_context: The software context used for this command submission batch.
806  *
807  * This function will check if we're switching query buffers, and will then,
808  * issue a dummy occlusion query wait used as a query barrier. When the fence
809  * object following that query wait has signaled, we are sure that all
810  * preceding queries have finished, and the old query buffer can be unpinned.
811  * However, since both the new query buffer and the old one are fenced with
812  * that fence, we can do an asynchronus unpin now, and be sure that the
813  * old query buffer won't be moved until the fence has signaled.
814  *
815  * As mentioned above, both the new - and old query buffers need to be fenced
816  * using a sequence emitted *after* calling this function.
817  */
818 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
819                                      struct vmw_sw_context *sw_context)
820 {
821         /*
822          * The validate list should still hold references to all
823          * contexts here.
824          */
825
826         if (sw_context->needs_post_query_barrier) {
827                 struct vmw_res_cache_entry *ctx_entry =
828                         &sw_context->res_cache[vmw_res_context];
829                 struct vmw_resource *ctx;
830                 int ret;
831
832                 BUG_ON(!ctx_entry->valid);
833                 ctx = ctx_entry->res;
834
835                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
836
837                 if (unlikely(ret != 0))
838                         DRM_ERROR("Out of fifo space for dummy query.\n");
839         }
840
841         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
842                 if (dev_priv->pinned_bo) {
843                         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
844                         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
845                 }
846
847                 if (!sw_context->needs_post_query_barrier) {
848                         vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
849
850                         /*
851                          * We pin also the dummy_query_bo buffer so that we
852                          * don't need to validate it when emitting
853                          * dummy queries in context destroy paths.
854                          */
855
856                         if (!dev_priv->dummy_query_bo_pinned) {
857                                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
858                                                     true);
859                                 dev_priv->dummy_query_bo_pinned = true;
860                         }
861
862                         BUG_ON(sw_context->last_query_ctx == NULL);
863                         dev_priv->query_cid = sw_context->last_query_ctx->id;
864                         dev_priv->query_cid_valid = true;
865                         dev_priv->pinned_bo =
866                                 vmw_dmabuf_reference(sw_context->cur_query_bo);
867                 }
868         }
869 }
870
871 /**
872  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
873  * handle to a MOB id.
874  *
875  * @dev_priv: Pointer to a device private structure.
876  * @sw_context: The software context used for this command batch validation.
877  * @id: Pointer to the user-space handle to be translated.
878  * @vmw_bo_p: Points to a location that, on successful return will carry
879  * a reference-counted pointer to the DMA buffer identified by the
880  * user-space handle in @id.
881  *
882  * This function saves information needed to translate a user-space buffer
883  * handle to a MOB id. The translation does not take place immediately, but
884  * during a call to vmw_apply_relocations(). This function builds a relocation
885  * list and a list of buffers to validate. The former needs to be freed using
886  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
887  * needs to be freed using vmw_clear_validations.
888  */
889 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
890                                  struct vmw_sw_context *sw_context,
891                                  SVGAMobId *id,
892                                  struct vmw_dma_buffer **vmw_bo_p)
893 {
894         struct vmw_dma_buffer *vmw_bo = NULL;
895         uint32_t handle = *id;
896         struct vmw_relocation *reloc;
897         int ret;
898
899         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
900         if (unlikely(ret != 0)) {
901                 DRM_ERROR("Could not find or use MOB buffer.\n");
902                 ret = -EINVAL;
903                 goto out_no_reloc;
904         }
905
906         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
907                 DRM_ERROR("Max number relocations per submission"
908                           " exceeded\n");
909                 ret = -EINVAL;
910                 goto out_no_reloc;
911         }
912
913         reloc = &sw_context->relocs[sw_context->cur_reloc++];
914         reloc->mob_loc = id;
915         reloc->location = NULL;
916
917         ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
918         if (unlikely(ret != 0))
919                 goto out_no_reloc;
920
921         *vmw_bo_p = vmw_bo;
922         return 0;
923
924 out_no_reloc:
925         vmw_dmabuf_unreference(&vmw_bo);
926         *vmw_bo_p = NULL;
927         return ret;
928 }
929
930 /**
931  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
932  * handle to a valid SVGAGuestPtr
933  *
934  * @dev_priv: Pointer to a device private structure.
935  * @sw_context: The software context used for this command batch validation.
936  * @ptr: Pointer to the user-space handle to be translated.
937  * @vmw_bo_p: Points to a location that, on successful return will carry
938  * a reference-counted pointer to the DMA buffer identified by the
939  * user-space handle in @id.
940  *
941  * This function saves information needed to translate a user-space buffer
942  * handle to a valid SVGAGuestPtr. The translation does not take place
943  * immediately, but during a call to vmw_apply_relocations().
944  * This function builds a relocation list and a list of buffers to validate.
945  * The former needs to be freed using either vmw_apply_relocations() or
946  * vmw_free_relocations(). The latter needs to be freed using
947  * vmw_clear_validations.
948  */
949 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
950                                    struct vmw_sw_context *sw_context,
951                                    SVGAGuestPtr *ptr,
952                                    struct vmw_dma_buffer **vmw_bo_p)
953 {
954         struct vmw_dma_buffer *vmw_bo = NULL;
955         uint32_t handle = ptr->gmrId;
956         struct vmw_relocation *reloc;
957         int ret;
958
959         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
960         if (unlikely(ret != 0)) {
961                 DRM_ERROR("Could not find or use GMR region.\n");
962                 ret = -EINVAL;
963                 goto out_no_reloc;
964         }
965
966         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
967                 DRM_ERROR("Max number relocations per submission"
968                           " exceeded\n");
969                 ret = -EINVAL;
970                 goto out_no_reloc;
971         }
972
973         reloc = &sw_context->relocs[sw_context->cur_reloc++];
974         reloc->location = ptr;
975
976         ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
977         if (unlikely(ret != 0))
978                 goto out_no_reloc;
979
980         *vmw_bo_p = vmw_bo;
981         return 0;
982
983 out_no_reloc:
984         vmw_dmabuf_unreference(&vmw_bo);
985         *vmw_bo_p = NULL;
986         return ret;
987 }
988
989 /**
990  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
991  *
992  * @dev_priv: Pointer to a device private struct.
993  * @sw_context: The software context used for this command submission.
994  * @header: Pointer to the command header in the command stream.
995  */
996 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
997                                   struct vmw_sw_context *sw_context,
998                                   SVGA3dCmdHeader *header)
999 {
1000         struct vmw_begin_gb_query_cmd {
1001                 SVGA3dCmdHeader header;
1002                 SVGA3dCmdBeginGBQuery q;
1003         } *cmd;
1004
1005         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1006                            header);
1007
1008         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1009                                  user_context_converter, &cmd->q.cid,
1010                                  NULL);
1011 }
1012
1013 /**
1014  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1015  *
1016  * @dev_priv: Pointer to a device private struct.
1017  * @sw_context: The software context used for this command submission.
1018  * @header: Pointer to the command header in the command stream.
1019  */
1020 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1021                                struct vmw_sw_context *sw_context,
1022                                SVGA3dCmdHeader *header)
1023 {
1024         struct vmw_begin_query_cmd {
1025                 SVGA3dCmdHeader header;
1026                 SVGA3dCmdBeginQuery q;
1027         } *cmd;
1028
1029         cmd = container_of(header, struct vmw_begin_query_cmd,
1030                            header);
1031
1032         if (unlikely(dev_priv->has_mob)) {
1033                 struct {
1034                         SVGA3dCmdHeader header;
1035                         SVGA3dCmdBeginGBQuery q;
1036                 } gb_cmd;
1037
1038                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1039
1040                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1041                 gb_cmd.header.size = cmd->header.size;
1042                 gb_cmd.q.cid = cmd->q.cid;
1043                 gb_cmd.q.type = cmd->q.type;
1044
1045                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1046                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1047         }
1048
1049         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1050                                  user_context_converter, &cmd->q.cid,
1051                                  NULL);
1052 }
1053
1054 /**
1055  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1056  *
1057  * @dev_priv: Pointer to a device private struct.
1058  * @sw_context: The software context used for this command submission.
1059  * @header: Pointer to the command header in the command stream.
1060  */
1061 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1062                                 struct vmw_sw_context *sw_context,
1063                                 SVGA3dCmdHeader *header)
1064 {
1065         struct vmw_dma_buffer *vmw_bo;
1066         struct vmw_query_cmd {
1067                 SVGA3dCmdHeader header;
1068                 SVGA3dCmdEndGBQuery q;
1069         } *cmd;
1070         int ret;
1071
1072         cmd = container_of(header, struct vmw_query_cmd, header);
1073         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1074         if (unlikely(ret != 0))
1075                 return ret;
1076
1077         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1078                                     &cmd->q.mobid,
1079                                     &vmw_bo);
1080         if (unlikely(ret != 0))
1081                 return ret;
1082
1083         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1084
1085         vmw_dmabuf_unreference(&vmw_bo);
1086         return ret;
1087 }
1088
1089 /**
1090  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1091  *
1092  * @dev_priv: Pointer to a device private struct.
1093  * @sw_context: The software context used for this command submission.
1094  * @header: Pointer to the command header in the command stream.
1095  */
1096 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1097                              struct vmw_sw_context *sw_context,
1098                              SVGA3dCmdHeader *header)
1099 {
1100         struct vmw_dma_buffer *vmw_bo;
1101         struct vmw_query_cmd {
1102                 SVGA3dCmdHeader header;
1103                 SVGA3dCmdEndQuery q;
1104         } *cmd;
1105         int ret;
1106
1107         cmd = container_of(header, struct vmw_query_cmd, header);
1108         if (dev_priv->has_mob) {
1109                 struct {
1110                         SVGA3dCmdHeader header;
1111                         SVGA3dCmdEndGBQuery q;
1112                 } gb_cmd;
1113
1114                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1115
1116                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1117                 gb_cmd.header.size = cmd->header.size;
1118                 gb_cmd.q.cid = cmd->q.cid;
1119                 gb_cmd.q.type = cmd->q.type;
1120                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1121                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1122
1123                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1124                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1125         }
1126
1127         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1128         if (unlikely(ret != 0))
1129                 return ret;
1130
1131         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1132                                       &cmd->q.guestResult,
1133                                       &vmw_bo);
1134         if (unlikely(ret != 0))
1135                 return ret;
1136
1137         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1138
1139         vmw_dmabuf_unreference(&vmw_bo);
1140         return ret;
1141 }
1142
1143 /**
1144  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1145  *
1146  * @dev_priv: Pointer to a device private struct.
1147  * @sw_context: The software context used for this command submission.
1148  * @header: Pointer to the command header in the command stream.
1149  */
1150 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1151                                  struct vmw_sw_context *sw_context,
1152                                  SVGA3dCmdHeader *header)
1153 {
1154         struct vmw_dma_buffer *vmw_bo;
1155         struct vmw_query_cmd {
1156                 SVGA3dCmdHeader header;
1157                 SVGA3dCmdWaitForGBQuery q;
1158         } *cmd;
1159         int ret;
1160
1161         cmd = container_of(header, struct vmw_query_cmd, header);
1162         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1163         if (unlikely(ret != 0))
1164                 return ret;
1165
1166         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1167                                     &cmd->q.mobid,
1168                                     &vmw_bo);
1169         if (unlikely(ret != 0))
1170                 return ret;
1171
1172         vmw_dmabuf_unreference(&vmw_bo);
1173         return 0;
1174 }
1175
1176 /**
1177  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1178  *
1179  * @dev_priv: Pointer to a device private struct.
1180  * @sw_context: The software context used for this command submission.
1181  * @header: Pointer to the command header in the command stream.
1182  */
1183 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1184                               struct vmw_sw_context *sw_context,
1185                               SVGA3dCmdHeader *header)
1186 {
1187         struct vmw_dma_buffer *vmw_bo;
1188         struct vmw_query_cmd {
1189                 SVGA3dCmdHeader header;
1190                 SVGA3dCmdWaitForQuery q;
1191         } *cmd;
1192         int ret;
1193
1194         cmd = container_of(header, struct vmw_query_cmd, header);
1195         if (dev_priv->has_mob) {
1196                 struct {
1197                         SVGA3dCmdHeader header;
1198                         SVGA3dCmdWaitForGBQuery q;
1199                 } gb_cmd;
1200
1201                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1202
1203                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1204                 gb_cmd.header.size = cmd->header.size;
1205                 gb_cmd.q.cid = cmd->q.cid;
1206                 gb_cmd.q.type = cmd->q.type;
1207                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1208                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1209
1210                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1211                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1212         }
1213
1214         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1215         if (unlikely(ret != 0))
1216                 return ret;
1217
1218         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1219                                       &cmd->q.guestResult,
1220                                       &vmw_bo);
1221         if (unlikely(ret != 0))
1222                 return ret;
1223
1224         vmw_dmabuf_unreference(&vmw_bo);
1225         return 0;
1226 }
1227
1228 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1229                        struct vmw_sw_context *sw_context,
1230                        SVGA3dCmdHeader *header)
1231 {
1232         struct vmw_dma_buffer *vmw_bo = NULL;
1233         struct vmw_surface *srf = NULL;
1234         struct vmw_dma_cmd {
1235                 SVGA3dCmdHeader header;
1236                 SVGA3dCmdSurfaceDMA dma;
1237         } *cmd;
1238         int ret;
1239         SVGA3dCmdSurfaceDMASuffix *suffix;
1240         uint32_t bo_size;
1241
1242         cmd = container_of(header, struct vmw_dma_cmd, header);
1243         suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1244                                                header->size - sizeof(*suffix));
1245
1246         /* Make sure device and verifier stays in sync. */
1247         if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1248                 DRM_ERROR("Invalid DMA suffix size.\n");
1249                 return -EINVAL;
1250         }
1251
1252         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1253                                       &cmd->dma.guest.ptr,
1254                                       &vmw_bo);
1255         if (unlikely(ret != 0))
1256                 return ret;
1257
1258         /* Make sure DMA doesn't cross BO boundaries. */
1259         bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1260         if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1261                 DRM_ERROR("Invalid DMA offset.\n");
1262                 return -EINVAL;
1263         }
1264
1265         bo_size -= cmd->dma.guest.ptr.offset;
1266         if (unlikely(suffix->maximumOffset > bo_size))
1267                 suffix->maximumOffset = bo_size;
1268
1269         if (sw_context->quirks & VMW_QUIRK_DST_SID_OK)
1270                 goto out_no_surface;
1271
1272         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1273                                 user_surface_converter, &cmd->dma.host.sid,
1274                                 NULL);
1275         if (unlikely(ret != 0)) {
1276                 if (unlikely(ret != -ERESTARTSYS))
1277                         DRM_ERROR("could not find surface for DMA.\n");
1278                 goto out_no_surface;
1279         }
1280
1281         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1282
1283         vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1284                              header);
1285
1286 out_no_surface:
1287         vmw_dmabuf_unreference(&vmw_bo);
1288         return ret;
1289 }
1290
1291 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1292                         struct vmw_sw_context *sw_context,
1293                         SVGA3dCmdHeader *header)
1294 {
1295         struct vmw_draw_cmd {
1296                 SVGA3dCmdHeader header;
1297                 SVGA3dCmdDrawPrimitives body;
1298         } *cmd;
1299         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1300                 (unsigned long)header + sizeof(*cmd));
1301         SVGA3dPrimitiveRange *range;
1302         uint32_t i;
1303         uint32_t maxnum;
1304         int ret;
1305
1306         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1307         if (unlikely(ret != 0))
1308                 return ret;
1309
1310         cmd = container_of(header, struct vmw_draw_cmd, header);
1311         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1312
1313         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1314                 DRM_ERROR("Illegal number of vertex declarations.\n");
1315                 return -EINVAL;
1316         }
1317
1318         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1319                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1320                                         user_surface_converter,
1321                                         &decl->array.surfaceId, NULL);
1322                 if (unlikely(ret != 0))
1323                         return ret;
1324         }
1325
1326         maxnum = (header->size - sizeof(cmd->body) -
1327                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1328         if (unlikely(cmd->body.numRanges > maxnum)) {
1329                 DRM_ERROR("Illegal number of index ranges.\n");
1330                 return -EINVAL;
1331         }
1332
1333         range = (SVGA3dPrimitiveRange *) decl;
1334         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1335                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1336                                         user_surface_converter,
1337                                         &range->indexArray.surfaceId, NULL);
1338                 if (unlikely(ret != 0))
1339                         return ret;
1340         }
1341         return 0;
1342 }
1343
1344
1345 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1346                              struct vmw_sw_context *sw_context,
1347                              SVGA3dCmdHeader *header)
1348 {
1349         struct vmw_tex_state_cmd {
1350                 SVGA3dCmdHeader header;
1351                 SVGA3dCmdSetTextureState state;
1352         } *cmd;
1353
1354         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1355           ((unsigned long) header + header->size + sizeof(header));
1356         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1357                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1358         struct vmw_resource_val_node *ctx_node;
1359         struct vmw_resource_val_node *res_node;
1360         int ret;
1361
1362         cmd = container_of(header, struct vmw_tex_state_cmd,
1363                            header);
1364
1365         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1366                                 user_context_converter, &cmd->state.cid,
1367                                 &ctx_node);
1368         if (unlikely(ret != 0))
1369                 return ret;
1370
1371         for (; cur_state < last_state; ++cur_state) {
1372                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1373                         continue;
1374
1375                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1376                                         user_surface_converter,
1377                                         &cur_state->value, &res_node);
1378                 if (unlikely(ret != 0))
1379                         return ret;
1380
1381                 if (dev_priv->has_mob) {
1382                         struct vmw_ctx_bindinfo bi;
1383
1384                         bi.ctx = ctx_node->res;
1385                         bi.res = res_node ? res_node->res : NULL;
1386                         bi.bt = vmw_ctx_binding_tex;
1387                         bi.i1.texture_stage = cur_state->stage;
1388                         vmw_context_binding_add(ctx_node->staged_bindings,
1389                                                 &bi);
1390                 }
1391         }
1392
1393         return 0;
1394 }
1395
1396 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1397                                       struct vmw_sw_context *sw_context,
1398                                       void *buf)
1399 {
1400         struct vmw_dma_buffer *vmw_bo;
1401         int ret;
1402
1403         struct {
1404                 uint32_t header;
1405                 SVGAFifoCmdDefineGMRFB body;
1406         } *cmd = buf;
1407
1408         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1409                                       &cmd->body.ptr,
1410                                       &vmw_bo);
1411         if (unlikely(ret != 0))
1412                 return ret;
1413
1414         vmw_dmabuf_unreference(&vmw_bo);
1415
1416         return ret;
1417 }
1418
1419 /**
1420  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1421  *
1422  * @dev_priv: Pointer to a device private struct.
1423  * @sw_context: The software context being used for this batch.
1424  * @res_type: The resource type.
1425  * @converter: Information about user-space binding for this resource type.
1426  * @res_id: Pointer to the user-space resource handle in the command stream.
1427  * @buf_id: Pointer to the user-space backup buffer handle in the command
1428  * stream.
1429  * @backup_offset: Offset of backup into MOB.
1430  *
1431  * This function prepares for registering a switch of backup buffers
1432  * in the resource metadata just prior to unreserving.
1433  */
1434 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1435                                  struct vmw_sw_context *sw_context,
1436                                  enum vmw_res_type res_type,
1437                                  const struct vmw_user_resource_conv
1438                                  *converter,
1439                                  uint32_t *res_id,
1440                                  uint32_t *buf_id,
1441                                  unsigned long backup_offset)
1442 {
1443         int ret;
1444         struct vmw_dma_buffer *dma_buf;
1445         struct vmw_resource_val_node *val_node;
1446
1447         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1448                                 converter, res_id, &val_node);
1449         if (unlikely(ret != 0))
1450                 return ret;
1451
1452         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1453         if (unlikely(ret != 0))
1454                 return ret;
1455
1456         if (val_node->first_usage)
1457                 val_node->no_buffer_needed = true;
1458
1459         vmw_dmabuf_unreference(&val_node->new_backup);
1460         val_node->new_backup = dma_buf;
1461         val_node->new_backup_offset = backup_offset;
1462
1463         return 0;
1464 }
1465
1466 /**
1467  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1468  * command
1469  *
1470  * @dev_priv: Pointer to a device private struct.
1471  * @sw_context: The software context being used for this batch.
1472  * @header: Pointer to the command header in the command stream.
1473  */
1474 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1475                                    struct vmw_sw_context *sw_context,
1476                                    SVGA3dCmdHeader *header)
1477 {
1478         struct vmw_bind_gb_surface_cmd {
1479                 SVGA3dCmdHeader header;
1480                 SVGA3dCmdBindGBSurface body;
1481         } *cmd;
1482
1483         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1484
1485         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1486                                      user_surface_converter,
1487                                      &cmd->body.sid, &cmd->body.mobid,
1488                                      0);
1489 }
1490
1491 /**
1492  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1493  * command
1494  *
1495  * @dev_priv: Pointer to a device private struct.
1496  * @sw_context: The software context being used for this batch.
1497  * @header: Pointer to the command header in the command stream.
1498  */
1499 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1500                                    struct vmw_sw_context *sw_context,
1501                                    SVGA3dCmdHeader *header)
1502 {
1503         struct vmw_gb_surface_cmd {
1504                 SVGA3dCmdHeader header;
1505                 SVGA3dCmdUpdateGBImage body;
1506         } *cmd;
1507
1508         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1509
1510         if (sw_context->quirks & VMW_QUIRK_SRC_SID_OK)
1511                 return 0;
1512
1513         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1514                                  user_surface_converter,
1515                                  &cmd->body.image.sid, NULL);
1516 }
1517
1518 /**
1519  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1520  * command
1521  *
1522  * @dev_priv: Pointer to a device private struct.
1523  * @sw_context: The software context being used for this batch.
1524  * @header: Pointer to the command header in the command stream.
1525  */
1526 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1527                                      struct vmw_sw_context *sw_context,
1528                                      SVGA3dCmdHeader *header)
1529 {
1530         struct vmw_gb_surface_cmd {
1531                 SVGA3dCmdHeader header;
1532                 SVGA3dCmdUpdateGBSurface body;
1533         } *cmd;
1534
1535         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1536
1537         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1538                                  user_surface_converter,
1539                                  &cmd->body.sid, NULL);
1540 }
1541
1542 /**
1543  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1544  * command
1545  *
1546  * @dev_priv: Pointer to a device private struct.
1547  * @sw_context: The software context being used for this batch.
1548  * @header: Pointer to the command header in the command stream.
1549  */
1550 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1551                                      struct vmw_sw_context *sw_context,
1552                                      SVGA3dCmdHeader *header)
1553 {
1554         struct vmw_gb_surface_cmd {
1555                 SVGA3dCmdHeader header;
1556                 SVGA3dCmdReadbackGBImage body;
1557         } *cmd;
1558
1559         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1560
1561         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1562                                  user_surface_converter,
1563                                  &cmd->body.image.sid, NULL);
1564 }
1565
1566 /**
1567  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1568  * command
1569  *
1570  * @dev_priv: Pointer to a device private struct.
1571  * @sw_context: The software context being used for this batch.
1572  * @header: Pointer to the command header in the command stream.
1573  */
1574 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1575                                        struct vmw_sw_context *sw_context,
1576                                        SVGA3dCmdHeader *header)
1577 {
1578         struct vmw_gb_surface_cmd {
1579                 SVGA3dCmdHeader header;
1580                 SVGA3dCmdReadbackGBSurface body;
1581         } *cmd;
1582
1583         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1584
1585         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1586                                  user_surface_converter,
1587                                  &cmd->body.sid, NULL);
1588 }
1589
1590 /**
1591  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1592  * command
1593  *
1594  * @dev_priv: Pointer to a device private struct.
1595  * @sw_context: The software context being used for this batch.
1596  * @header: Pointer to the command header in the command stream.
1597  */
1598 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1599                                        struct vmw_sw_context *sw_context,
1600                                        SVGA3dCmdHeader *header)
1601 {
1602         struct vmw_gb_surface_cmd {
1603                 SVGA3dCmdHeader header;
1604                 SVGA3dCmdInvalidateGBImage body;
1605         } *cmd;
1606
1607         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1608
1609         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1610                                  user_surface_converter,
1611                                  &cmd->body.image.sid, NULL);
1612 }
1613
1614 /**
1615  * vmw_cmd_invalidate_gb_surface - Validate an
1616  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1617  *
1618  * @dev_priv: Pointer to a device private struct.
1619  * @sw_context: The software context being used for this batch.
1620  * @header: Pointer to the command header in the command stream.
1621  */
1622 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1623                                          struct vmw_sw_context *sw_context,
1624                                          SVGA3dCmdHeader *header)
1625 {
1626         struct vmw_gb_surface_cmd {
1627                 SVGA3dCmdHeader header;
1628                 SVGA3dCmdInvalidateGBSurface body;
1629         } *cmd;
1630
1631         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1632
1633         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1634                                  user_surface_converter,
1635                                  &cmd->body.sid, NULL);
1636 }
1637
1638
1639 /**
1640  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1641  * command
1642  *
1643  * @dev_priv: Pointer to a device private struct.
1644  * @sw_context: The software context being used for this batch.
1645  * @header: Pointer to the command header in the command stream.
1646  */
1647 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1648                                  struct vmw_sw_context *sw_context,
1649                                  SVGA3dCmdHeader *header)
1650 {
1651         struct vmw_shader_define_cmd {
1652                 SVGA3dCmdHeader header;
1653                 SVGA3dCmdDefineShader body;
1654         } *cmd;
1655         int ret;
1656         size_t size;
1657         struct vmw_resource_val_node *val;
1658
1659         cmd = container_of(header, struct vmw_shader_define_cmd,
1660                            header);
1661
1662         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1663                                 user_context_converter, &cmd->body.cid,
1664                                 &val);
1665         if (unlikely(ret != 0))
1666                 return ret;
1667
1668         if (unlikely(!dev_priv->has_mob))
1669                 return 0;
1670
1671         size = cmd->header.size - sizeof(cmd->body);
1672         ret = vmw_compat_shader_add(dev_priv,
1673                                     vmw_context_res_man(val->res),
1674                                     cmd->body.shid, cmd + 1,
1675                                     cmd->body.type, size,
1676                                     &sw_context->staged_cmd_res);
1677         if (unlikely(ret != 0))
1678                 return ret;
1679
1680         return vmw_resource_relocation_add(&sw_context->res_relocations,
1681                                            NULL, &cmd->header.id -
1682                                            sw_context->buf_start);
1683
1684         return 0;
1685 }
1686
1687 /**
1688  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1689  * command
1690  *
1691  * @dev_priv: Pointer to a device private struct.
1692  * @sw_context: The software context being used for this batch.
1693  * @header: Pointer to the command header in the command stream.
1694  */
1695 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1696                                   struct vmw_sw_context *sw_context,
1697                                   SVGA3dCmdHeader *header)
1698 {
1699         struct vmw_shader_destroy_cmd {
1700                 SVGA3dCmdHeader header;
1701                 SVGA3dCmdDestroyShader body;
1702         } *cmd;
1703         int ret;
1704         struct vmw_resource_val_node *val;
1705
1706         cmd = container_of(header, struct vmw_shader_destroy_cmd,
1707                            header);
1708
1709         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1710                                 user_context_converter, &cmd->body.cid,
1711                                 &val);
1712         if (unlikely(ret != 0))
1713                 return ret;
1714
1715         if (unlikely(!dev_priv->has_mob))
1716                 return 0;
1717
1718         ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
1719                                        cmd->body.shid,
1720                                        cmd->body.type,
1721                                        &sw_context->staged_cmd_res);
1722         if (unlikely(ret != 0))
1723                 return ret;
1724
1725         return vmw_resource_relocation_add(&sw_context->res_relocations,
1726                                            NULL, &cmd->header.id -
1727                                            sw_context->buf_start);
1728
1729         return 0;
1730 }
1731
1732 /**
1733  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1734  * command
1735  *
1736  * @dev_priv: Pointer to a device private struct.
1737  * @sw_context: The software context being used for this batch.
1738  * @header: Pointer to the command header in the command stream.
1739  */
1740 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1741                               struct vmw_sw_context *sw_context,
1742                               SVGA3dCmdHeader *header)
1743 {
1744         struct vmw_set_shader_cmd {
1745                 SVGA3dCmdHeader header;
1746                 SVGA3dCmdSetShader body;
1747         } *cmd;
1748         struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1749         struct vmw_ctx_bindinfo bi;
1750         struct vmw_resource *res = NULL;
1751         int ret;
1752
1753         cmd = container_of(header, struct vmw_set_shader_cmd,
1754                            header);
1755
1756         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1757                                 user_context_converter, &cmd->body.cid,
1758                                 &ctx_node);
1759         if (unlikely(ret != 0))
1760                 return ret;
1761
1762         if (!dev_priv->has_mob)
1763                 return 0;
1764
1765         if (cmd->body.shid != SVGA3D_INVALID_ID) {
1766                 res = vmw_compat_shader_lookup
1767                         (vmw_context_res_man(ctx_node->res),
1768                          cmd->body.shid,
1769                          cmd->body.type);
1770
1771                 if (!IS_ERR(res)) {
1772                         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1773                                                     vmw_res_shader,
1774                                                     &cmd->body.shid, res,
1775                                                     &res_node);
1776                         vmw_resource_unreference(&res);
1777                         if (unlikely(ret != 0))
1778                                 return ret;
1779                 }
1780         }
1781
1782         if (!res_node) {
1783                 ret = vmw_cmd_res_check(dev_priv, sw_context,
1784                                         vmw_res_shader,
1785                                         user_shader_converter,
1786                                         &cmd->body.shid, &res_node);
1787                 if (unlikely(ret != 0))
1788                         return ret;
1789         }
1790
1791         bi.ctx = ctx_node->res;
1792         bi.res = res_node ? res_node->res : NULL;
1793         bi.bt = vmw_ctx_binding_shader;
1794         bi.i1.shader_type = cmd->body.type;
1795         return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1796 }
1797
1798 /**
1799  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1800  * command
1801  *
1802  * @dev_priv: Pointer to a device private struct.
1803  * @sw_context: The software context being used for this batch.
1804  * @header: Pointer to the command header in the command stream.
1805  */
1806 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1807                                     struct vmw_sw_context *sw_context,
1808                                     SVGA3dCmdHeader *header)
1809 {
1810         struct vmw_set_shader_const_cmd {
1811                 SVGA3dCmdHeader header;
1812                 SVGA3dCmdSetShaderConst body;
1813         } *cmd;
1814         int ret;
1815
1816         cmd = container_of(header, struct vmw_set_shader_const_cmd,
1817                            header);
1818
1819         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1820                                 user_context_converter, &cmd->body.cid,
1821                                 NULL);
1822         if (unlikely(ret != 0))
1823                 return ret;
1824
1825         if (dev_priv->has_mob)
1826                 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1827
1828         return 0;
1829 }
1830
1831 /**
1832  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1833  * command
1834  *
1835  * @dev_priv: Pointer to a device private struct.
1836  * @sw_context: The software context being used for this batch.
1837  * @header: Pointer to the command header in the command stream.
1838  */
1839 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1840                                   struct vmw_sw_context *sw_context,
1841                                   SVGA3dCmdHeader *header)
1842 {
1843         struct vmw_bind_gb_shader_cmd {
1844                 SVGA3dCmdHeader header;
1845                 SVGA3dCmdBindGBShader body;
1846         } *cmd;
1847
1848         cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1849                            header);
1850
1851         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1852                                      user_shader_converter,
1853                                      &cmd->body.shid, &cmd->body.mobid,
1854                                      cmd->body.offsetInBytes);
1855 }
1856
1857 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1858                                 struct vmw_sw_context *sw_context,
1859                                 void *buf, uint32_t *size)
1860 {
1861         uint32_t size_remaining = *size;
1862         uint32_t cmd_id;
1863
1864         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1865         switch (cmd_id) {
1866         case SVGA_CMD_UPDATE:
1867                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1868                 break;
1869         case SVGA_CMD_DEFINE_GMRFB:
1870                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1871                 break;
1872         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1873                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1874                 break;
1875         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1876                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1877                 break;
1878         default:
1879                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1880                 return -EINVAL;
1881         }
1882
1883         if (*size > size_remaining) {
1884                 DRM_ERROR("Invalid SVGA command (size mismatch):"
1885                           " %u.\n", cmd_id);
1886                 return -EINVAL;
1887         }
1888
1889         if (unlikely(!sw_context->kernel)) {
1890                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1891                 return -EPERM;
1892         }
1893
1894         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1895                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1896
1897         return 0;
1898 }
1899
1900 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1901         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1902                     false, false, false),
1903         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1904                     false, false, false),
1905         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1906                     true, false, false),
1907         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1908                     true, false, false),
1909         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1910                     true, false, false),
1911         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1912                     false, false, false),
1913         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1914                     false, false, false),
1915         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1916                     true, false, false),
1917         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1918                     true, false, false),
1919         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1920                     true, false, false),
1921         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1922                     &vmw_cmd_set_render_target_check, true, false, false),
1923         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1924                     true, false, false),
1925         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1926                     true, false, false),
1927         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1928                     true, false, false),
1929         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1930                     true, false, false),
1931         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1932                     true, false, false),
1933         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1934                     true, false, false),
1935         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1936                     true, false, false),
1937         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1938                     false, false, false),
1939         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1940                     true, false, false),
1941         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1942                     true, false, false),
1943         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1944                     true, false, false),
1945         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1946                     true, false, false),
1947         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1948                     true, false, false),
1949         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1950                     true, false, false),
1951         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1952                     true, false, false),
1953         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1954                     true, false, false),
1955         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1956                     true, false, false),
1957         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1958                     true, false, false),
1959         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1960                     &vmw_cmd_blt_surf_screen_check, false, false, false),
1961         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1962                     false, false, false),
1963         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1964                     false, false, false),
1965         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1966                     false, false, false),
1967         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1968                     false, false, false),
1969         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1970                     false, false, false),
1971         VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1972                     false, false, false),
1973         VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1974                     false, false, false),
1975         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1976                     false, false, false),
1977         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1978                     false, false, false),
1979         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1980                     false, false, false),
1981         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1982                     false, false, false),
1983         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1984                     false, false, false),
1985         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1986                     false, false, false),
1987         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1988                     false, false, true),
1989         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1990                     false, false, true),
1991         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1992                     false, false, true),
1993         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1994                     false, false, true),
1995         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1996                     false, false, true),
1997         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1998                     false, false, true),
1999         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2000                     false, false, true),
2001         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2002                     false, false, true),
2003         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2004                     true, false, true),
2005         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2006                     false, false, true),
2007         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2008                     true, false, true),
2009         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
2010                     &vmw_cmd_update_gb_surface, true, false, true),
2011         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2012                     &vmw_cmd_readback_gb_image, true, false, true),
2013         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2014                     &vmw_cmd_readback_gb_surface, true, false, true),
2015         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2016                     &vmw_cmd_invalidate_gb_image, true, false, true),
2017         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2018                     &vmw_cmd_invalidate_gb_surface, true, false, true),
2019         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2020                     false, false, true),
2021         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2022                     false, false, true),
2023         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2024                     false, false, true),
2025         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2026                     false, false, true),
2027         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2028                     false, false, true),
2029         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2030                     false, false, true),
2031         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2032                     true, false, true),
2033         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2034                     false, false, true),
2035         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2036                     false, false, false),
2037         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2038                     true, false, true),
2039         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2040                     true, false, true),
2041         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2042                     true, false, true),
2043         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2044                     true, false, true),
2045         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2046                     false, false, true),
2047         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2048                     false, false, true),
2049         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2050                     false, false, true),
2051         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2052                     false, false, true),
2053         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2054                     false, false, true),
2055         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2056                     false, false, true),
2057         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2058                     false, false, true),
2059         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2060                     false, false, true),
2061         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2062                     false, false, true),
2063         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2064                     false, false, true),
2065         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2066                     true, false, true)
2067 };
2068
2069 static int vmw_cmd_check(struct vmw_private *dev_priv,
2070                          struct vmw_sw_context *sw_context,
2071                          void *buf, uint32_t *size)
2072 {
2073         uint32_t cmd_id;
2074         uint32_t size_remaining = *size;
2075         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2076         int ret;
2077         const struct vmw_cmd_entry *entry;
2078         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2079
2080         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2081         /* Handle any none 3D commands */
2082         if (unlikely(cmd_id < SVGA_CMD_MAX))
2083                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2084
2085
2086         cmd_id = le32_to_cpu(header->id);
2087         *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2088
2089         cmd_id -= SVGA_3D_CMD_BASE;
2090         if (unlikely(*size > size_remaining))
2091                 goto out_invalid;
2092
2093         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
2094                 goto out_invalid;
2095
2096         entry = &vmw_cmd_entries[cmd_id];
2097         if (unlikely(!entry->func))
2098                 goto out_invalid;
2099
2100         if (unlikely(!entry->user_allow && !sw_context->kernel))
2101                 goto out_privileged;
2102
2103         if (unlikely(entry->gb_disable && gb))
2104                 goto out_old;
2105
2106         if (unlikely(entry->gb_enable && !gb))
2107                 goto out_new;
2108
2109         ret = entry->func(dev_priv, sw_context, header);
2110         if (unlikely(ret != 0))
2111                 goto out_invalid;
2112
2113         return 0;
2114 out_invalid:
2115         DRM_ERROR("Invalid SVGA3D command: %d\n",
2116                   cmd_id + SVGA_3D_CMD_BASE);
2117         return -EINVAL;
2118 out_privileged:
2119         DRM_ERROR("Privileged SVGA3D command: %d\n",
2120                   cmd_id + SVGA_3D_CMD_BASE);
2121         return -EPERM;
2122 out_old:
2123         DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2124                   cmd_id + SVGA_3D_CMD_BASE);
2125         return -EINVAL;
2126 out_new:
2127         DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2128                   cmd_id + SVGA_3D_CMD_BASE);
2129         return -EINVAL;
2130 }
2131
2132 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2133                              struct vmw_sw_context *sw_context,
2134                              void *buf,
2135                              uint32_t size)
2136 {
2137         int32_t cur_size = size;
2138         int ret;
2139
2140         sw_context->buf_start = buf;
2141
2142         while (cur_size > 0) {
2143                 size = cur_size;
2144                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2145                 if (unlikely(ret != 0))
2146                         return ret;
2147                 buf = (void *)((unsigned long) buf + size);
2148                 cur_size -= size;
2149         }
2150
2151         if (unlikely(cur_size != 0)) {
2152                 DRM_ERROR("Command verifier out of sync.\n");
2153                 return -EINVAL;
2154         }
2155
2156         return 0;
2157 }
2158
2159 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2160 {
2161         sw_context->cur_reloc = 0;
2162 }
2163
2164 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2165 {
2166         uint32_t i;
2167         struct vmw_relocation *reloc;
2168         struct ttm_validate_buffer *validate;
2169         struct ttm_buffer_object *bo;
2170
2171         for (i = 0; i < sw_context->cur_reloc; ++i) {
2172                 reloc = &sw_context->relocs[i];
2173                 validate = &sw_context->val_bufs[reloc->index].base;
2174                 bo = validate->bo;
2175                 switch (bo->mem.mem_type) {
2176                 case TTM_PL_VRAM:
2177                         reloc->location->offset += bo->offset;
2178                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
2179                         break;
2180                 case VMW_PL_GMR:
2181                         reloc->location->gmrId = bo->mem.start;
2182                         break;
2183                 case VMW_PL_MOB:
2184                         *reloc->mob_loc = bo->mem.start;
2185                         break;
2186                 default:
2187                         BUG();
2188                 }
2189         }
2190         vmw_free_relocations(sw_context);
2191 }
2192
2193 /**
2194  * vmw_resource_list_unrefererence - Free up a resource list and unreference
2195  * all resources referenced by it.
2196  *
2197  * @list: The resource list.
2198  */
2199 static void vmw_resource_list_unreference(struct list_head *list)
2200 {
2201         struct vmw_resource_val_node *val, *val_next;
2202
2203         /*
2204          * Drop references to resources held during command submission.
2205          */
2206
2207         list_for_each_entry_safe(val, val_next, list, head) {
2208                 list_del_init(&val->head);
2209                 vmw_resource_unreference(&val->res);
2210                 if (unlikely(val->staged_bindings))
2211                         kfree(val->staged_bindings);
2212                 kfree(val);
2213         }
2214 }
2215
2216 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2217 {
2218         struct vmw_validate_buffer *entry, *next;
2219         struct vmw_resource_val_node *val;
2220
2221         /*
2222          * Drop references to DMA buffers held during command submission.
2223          */
2224         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
2225                                  base.head) {
2226                 list_del(&entry->base.head);
2227                 ttm_bo_unref(&entry->base.bo);
2228                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
2229                 sw_context->cur_val_buf--;
2230         }
2231         BUG_ON(sw_context->cur_val_buf != 0);
2232
2233         list_for_each_entry(val, &sw_context->resource_list, head)
2234                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2235 }
2236
2237 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2238                                       struct ttm_buffer_object *bo,
2239                                       bool validate_as_mob)
2240 {
2241         struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
2242                                                   base);
2243         int ret;
2244
2245         if (vbo->pin_count > 0)
2246                 return 0;
2247
2248         if (validate_as_mob)
2249                 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2250
2251         /**
2252          * Put BO in VRAM if there is space, otherwise as a GMR.
2253          * If there is no space in VRAM and GMR ids are all used up,
2254          * start evicting GMRs to make room. If the DMA buffer can't be
2255          * used as a GMR, this will return -ENOMEM.
2256          */
2257
2258         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
2259         if (likely(ret == 0 || ret == -ERESTARTSYS))
2260                 return ret;
2261
2262         /**
2263          * If that failed, try VRAM again, this time evicting
2264          * previous contents.
2265          */
2266
2267         DRM_INFO("Falling through to VRAM.\n");
2268         ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2269         return ret;
2270 }
2271
2272 static int vmw_validate_buffers(struct vmw_private *dev_priv,
2273                                 struct vmw_sw_context *sw_context)
2274 {
2275         struct vmw_validate_buffer *entry;
2276         int ret;
2277
2278         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2279                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2280                                                  entry->validate_as_mob);
2281                 if (unlikely(ret != 0))
2282                         return ret;
2283         }
2284         return 0;
2285 }
2286
2287 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2288                                  uint32_t size)
2289 {
2290         if (likely(sw_context->cmd_bounce_size >= size))
2291                 return 0;
2292
2293         if (sw_context->cmd_bounce_size == 0)
2294                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2295
2296         while (sw_context->cmd_bounce_size < size) {
2297                 sw_context->cmd_bounce_size =
2298                         PAGE_ALIGN(sw_context->cmd_bounce_size +
2299                                    (sw_context->cmd_bounce_size >> 1));
2300         }
2301
2302         if (sw_context->cmd_bounce != NULL)
2303                 vfree(sw_context->cmd_bounce);
2304
2305         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2306
2307         if (sw_context->cmd_bounce == NULL) {
2308                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
2309                 sw_context->cmd_bounce_size = 0;
2310                 return -ENOMEM;
2311         }
2312
2313         return 0;
2314 }
2315
2316 /**
2317  * vmw_execbuf_fence_commands - create and submit a command stream fence
2318  *
2319  * Creates a fence object and submits a command stream marker.
2320  * If this fails for some reason, We sync the fifo and return NULL.
2321  * It is then safe to fence buffers with a NULL pointer.
2322  *
2323  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2324  * a userspace handle if @p_handle is not NULL, otherwise not.
2325  */
2326
2327 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2328                                struct vmw_private *dev_priv,
2329                                struct vmw_fence_obj **p_fence,
2330                                uint32_t *p_handle)
2331 {
2332         uint32_t sequence;
2333         int ret;
2334         bool synced = false;
2335
2336         /* p_handle implies file_priv. */
2337         BUG_ON(p_handle != NULL && file_priv == NULL);
2338
2339         ret = vmw_fifo_send_fence(dev_priv, &sequence);
2340         if (unlikely(ret != 0)) {
2341                 DRM_ERROR("Fence submission error. Syncing.\n");
2342                 synced = true;
2343         }
2344
2345         if (p_handle != NULL)
2346                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2347                                             sequence, p_fence, p_handle);
2348         else
2349                 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
2350
2351         if (unlikely(ret != 0 && !synced)) {
2352                 (void) vmw_fallback_wait(dev_priv, false, false,
2353                                          sequence, false,
2354                                          VMW_FENCE_WAIT_TIMEOUT);
2355                 *p_fence = NULL;
2356         }
2357
2358         return 0;
2359 }
2360
2361 /**
2362  * vmw_execbuf_copy_fence_user - copy fence object information to
2363  * user-space.
2364  *
2365  * @dev_priv: Pointer to a vmw_private struct.
2366  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2367  * @ret: Return value from fence object creation.
2368  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2369  * which the information should be copied.
2370  * @fence: Pointer to the fenc object.
2371  * @fence_handle: User-space fence handle.
2372  *
2373  * This function copies fence information to user-space. If copying fails,
2374  * The user-space struct drm_vmw_fence_rep::error member is hopefully
2375  * left untouched, and if it's preloaded with an -EFAULT by user-space,
2376  * the error will hopefully be detected.
2377  * Also if copying fails, user-space will be unable to signal the fence
2378  * object so we wait for it immediately, and then unreference the
2379  * user-space reference.
2380  */
2381 void
2382 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2383                             struct vmw_fpriv *vmw_fp,
2384                             int ret,
2385                             struct drm_vmw_fence_rep __user *user_fence_rep,
2386                             struct vmw_fence_obj *fence,
2387                             uint32_t fence_handle)
2388 {
2389         struct drm_vmw_fence_rep fence_rep;
2390
2391         if (user_fence_rep == NULL)
2392                 return;
2393
2394         memset(&fence_rep, 0, sizeof(fence_rep));
2395
2396         fence_rep.error = ret;
2397         if (ret == 0) {
2398                 BUG_ON(fence == NULL);
2399
2400                 fence_rep.handle = fence_handle;
2401                 fence_rep.seqno = fence->base.seqno;
2402                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2403                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2404         }
2405
2406         /*
2407          * copy_to_user errors will be detected by user space not
2408          * seeing fence_rep::error filled in. Typically
2409          * user-space would have pre-set that member to -EFAULT.
2410          */
2411         ret = copy_to_user(user_fence_rep, &fence_rep,
2412                            sizeof(fence_rep));
2413
2414         /*
2415          * User-space lost the fence object. We need to sync
2416          * and unreference the handle.
2417          */
2418         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2419                 ttm_ref_object_base_unref(vmw_fp->tfile,
2420                                           fence_handle, TTM_REF_USAGE);
2421                 DRM_ERROR("Fence copy error. Syncing.\n");
2422                 (void) vmw_fence_obj_wait(fence, false, false,
2423                                           VMW_FENCE_WAIT_TIMEOUT);
2424         }
2425 }
2426
2427 /**
2428  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
2429  * the fifo.
2430  *
2431  * @dev_priv: Pointer to a device private structure.
2432  * @kernel_commands: Pointer to the unpatched command batch.
2433  * @command_size: Size of the unpatched command batch.
2434  * @sw_context: Structure holding the relocation lists.
2435  *
2436  * Side effects: If this function returns 0, then the command batch
2437  * pointed to by @kernel_commands will have been modified.
2438  */
2439 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
2440                                    void *kernel_commands,
2441                                    u32 command_size,
2442                                    struct vmw_sw_context *sw_context)
2443 {
2444         void *cmd = vmw_fifo_reserve(dev_priv, command_size);
2445
2446         if (!cmd) {
2447                 DRM_ERROR("Failed reserving fifo space for commands.\n");
2448                 return -ENOMEM;
2449         }
2450
2451         vmw_apply_relocations(sw_context);
2452         memcpy(cmd, kernel_commands, command_size);
2453         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2454         vmw_resource_relocations_free(&sw_context->res_relocations);
2455         vmw_fifo_commit(dev_priv, command_size);
2456
2457         return 0;
2458 }
2459
2460 /**
2461  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
2462  * the command buffer manager.
2463  *
2464  * @dev_priv: Pointer to a device private structure.
2465  * @header: Opaque handle to the command buffer allocation.
2466  * @command_size: Size of the unpatched command batch.
2467  * @sw_context: Structure holding the relocation lists.
2468  *
2469  * Side effects: If this function returns 0, then the command buffer
2470  * represented by @header will have been modified.
2471  */
2472 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
2473                                      struct vmw_cmdbuf_header *header,
2474                                      u32 command_size,
2475                                      struct vmw_sw_context *sw_context)
2476 {
2477         void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
2478                                        SVGA3D_INVALID_ID, false, header);
2479
2480         vmw_apply_relocations(sw_context);
2481         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2482         vmw_resource_relocations_free(&sw_context->res_relocations);
2483         vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
2484
2485         return 0;
2486 }
2487
2488 /**
2489  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
2490  * submission using a command buffer.
2491  *
2492  * @dev_priv: Pointer to a device private structure.
2493  * @user_commands: User-space pointer to the commands to be submitted.
2494  * @command_size: Size of the unpatched command batch.
2495  * @header: Out parameter returning the opaque pointer to the command buffer.
2496  *
2497  * This function checks whether we can use the command buffer manager for
2498  * submission and if so, creates a command buffer of suitable size and
2499  * copies the user data into that buffer.
2500  *
2501  * On successful return, the function returns a pointer to the data in the
2502  * command buffer and *@header is set to non-NULL.
2503  * If command buffers could not be used, the function will return the value
2504  * of @kernel_commands on function call. That value may be NULL. In that case,
2505  * the value of *@header will be set to NULL.
2506  * If an error is encountered, the function will return a pointer error value.
2507  * If the function is interrupted by a signal while sleeping, it will return
2508  * -ERESTARTSYS casted to a pointer error value.
2509  */
2510 void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
2511                          void __user *user_commands,
2512                          void *kernel_commands,
2513                          u32 command_size,
2514                          struct vmw_cmdbuf_header **header)
2515 {
2516         size_t cmdbuf_size;
2517         int ret;
2518
2519         *header = NULL;
2520         if (!dev_priv->cman || kernel_commands)
2521                 return kernel_commands;
2522
2523         if (command_size > SVGA_CB_MAX_SIZE) {
2524                 DRM_ERROR("Command buffer is too large.\n");
2525                 return ERR_PTR(-EINVAL);
2526         }
2527
2528         /* If possible, add a little space for fencing. */
2529         cmdbuf_size = command_size + 512;
2530         cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
2531         kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
2532                                            true, header);
2533         if (IS_ERR(kernel_commands))
2534                 return kernel_commands;
2535
2536         ret = copy_from_user(kernel_commands, user_commands,
2537                              command_size);
2538         if (ret) {
2539                 DRM_ERROR("Failed copying commands.\n");
2540                 vmw_cmdbuf_header_free(*header);
2541                 *header = NULL;
2542                 return ERR_PTR(-EFAULT);
2543         }
2544
2545         return kernel_commands;
2546 }
2547
2548 int vmw_execbuf_process(struct drm_file *file_priv,
2549                         struct vmw_private *dev_priv,
2550                         void __user *user_commands,
2551                         void *kernel_commands,
2552                         uint32_t command_size,
2553                         uint64_t throttle_us,
2554                         uint32_t quirks,
2555                         struct drm_vmw_fence_rep __user *user_fence_rep,
2556                         struct vmw_fence_obj **out_fence)
2557 {
2558         struct vmw_sw_context *sw_context = &dev_priv->ctx;
2559         struct vmw_fence_obj *fence = NULL;
2560         struct vmw_resource *error_resource;
2561         struct list_head resource_list;
2562         struct vmw_cmdbuf_header *header;
2563         struct ww_acquire_ctx ticket;
2564         uint32_t handle;
2565         int ret;
2566
2567         if (throttle_us) {
2568                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2569                                    throttle_us);
2570                 
2571                 if (ret)
2572                         return ret;
2573         }
2574         
2575         kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
2576                                              kernel_commands, command_size,
2577                                              &header);
2578         if (IS_ERR(kernel_commands))
2579                 return PTR_ERR(kernel_commands);
2580
2581         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2582         if (ret) {
2583                 ret = -ERESTARTSYS;
2584                 goto out_free_header;
2585         }
2586
2587         sw_context->kernel = false;
2588         if (kernel_commands == NULL) {
2589                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2590                 if (unlikely(ret != 0))
2591                         goto out_unlock;
2592
2593
2594                 ret = copy_from_user(sw_context->cmd_bounce,
2595                                      user_commands, command_size);
2596
2597                 if (unlikely(ret != 0)) {
2598                         ret = -EFAULT;
2599                         DRM_ERROR("Failed copying commands.\n");
2600                         goto out_unlock;
2601                 }
2602                 kernel_commands = sw_context->cmd_bounce;
2603         } else if (!header)
2604                 sw_context->kernel = true;
2605
2606         sw_context->fp = vmw_fpriv(file_priv);
2607         sw_context->cur_reloc = 0;
2608         sw_context->cur_val_buf = 0;
2609         sw_context->quirks = quirks;
2610         INIT_LIST_HEAD(&sw_context->resource_list);
2611         sw_context->cur_query_bo = dev_priv->pinned_bo;
2612         sw_context->last_query_ctx = NULL;
2613         sw_context->needs_post_query_barrier = false;
2614         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2615         INIT_LIST_HEAD(&sw_context->validate_nodes);
2616         INIT_LIST_HEAD(&sw_context->res_relocations);
2617         if (!sw_context->res_ht_initialized) {
2618                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2619                 if (unlikely(ret != 0))
2620                         goto out_unlock;
2621                 sw_context->res_ht_initialized = true;
2622         }
2623         INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2624         INIT_LIST_HEAD(&resource_list);
2625         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2626                                 command_size);
2627         if (unlikely(ret != 0))
2628                 goto out_err_nores;
2629
2630         ret = vmw_resources_reserve(sw_context);
2631         if (unlikely(ret != 0))
2632                 goto out_err_nores;
2633
2634         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
2635                                      true, NULL);
2636         if (unlikely(ret != 0))
2637                 goto out_err;
2638
2639         ret = vmw_validate_buffers(dev_priv, sw_context);
2640         if (unlikely(ret != 0))
2641                 goto out_err;
2642
2643         ret = vmw_resources_validate(sw_context);
2644         if (unlikely(ret != 0))
2645                 goto out_err;
2646
2647         ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2648         if (unlikely(ret != 0)) {
2649                 ret = -ERESTARTSYS;
2650                 goto out_err;
2651         }
2652
2653         if (dev_priv->has_mob) {
2654                 ret = vmw_rebind_contexts(sw_context);
2655                 if (unlikely(ret != 0))
2656                         goto out_unlock_binding;
2657         }
2658
2659         if (!header) {
2660                 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
2661                                               command_size, sw_context);
2662         } else {
2663                 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
2664                                                 sw_context);
2665                 header = NULL;
2666         }
2667         if (ret)
2668                 goto out_unlock_binding;
2669
2670         vmw_query_bo_switch_commit(dev_priv, sw_context);
2671         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2672                                          &fence,
2673                                          (user_fence_rep) ? &handle : NULL);
2674         /*
2675          * This error is harmless, because if fence submission fails,
2676          * vmw_fifo_send_fence will sync. The error will be propagated to
2677          * user-space in @fence_rep
2678          */
2679
2680         if (ret != 0)
2681                 DRM_ERROR("Fence submission error. Syncing.\n");
2682
2683         vmw_resource_list_unreserve(&sw_context->resource_list, false);
2684         mutex_unlock(&dev_priv->binding_mutex);
2685
2686         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2687                                     (void *) fence);
2688
2689         if (unlikely(dev_priv->pinned_bo != NULL &&
2690                      !dev_priv->query_cid_valid))
2691                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2692
2693         vmw_clear_validations(sw_context);
2694         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2695                                     user_fence_rep, fence, handle);
2696
2697         /* Don't unreference when handing fence out */
2698         if (unlikely(out_fence != NULL)) {
2699                 *out_fence = fence;
2700                 fence = NULL;
2701         } else if (likely(fence != NULL)) {
2702                 vmw_fence_obj_unreference(&fence);
2703         }
2704
2705         list_splice_init(&sw_context->resource_list, &resource_list);
2706         vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
2707         mutex_unlock(&dev_priv->cmdbuf_mutex);
2708
2709         /*
2710          * Unreference resources outside of the cmdbuf_mutex to
2711          * avoid deadlocks in resource destruction paths.
2712          */
2713         vmw_resource_list_unreference(&resource_list);
2714
2715         return 0;
2716
2717 out_unlock_binding:
2718         mutex_unlock(&dev_priv->binding_mutex);
2719 out_err:
2720         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2721 out_err_nores:
2722         vmw_resource_list_unreserve(&sw_context->resource_list, true);
2723         vmw_resource_relocations_free(&sw_context->res_relocations);
2724         vmw_free_relocations(sw_context);
2725         vmw_clear_validations(sw_context);
2726         if (unlikely(dev_priv->pinned_bo != NULL &&
2727                      !dev_priv->query_cid_valid))
2728                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2729 out_unlock:
2730         list_splice_init(&sw_context->resource_list, &resource_list);
2731         error_resource = sw_context->error_resource;
2732         sw_context->error_resource = NULL;
2733         vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
2734         mutex_unlock(&dev_priv->cmdbuf_mutex);
2735
2736         /*
2737          * Unreference resources outside of the cmdbuf_mutex to
2738          * avoid deadlocks in resource destruction paths.
2739          */
2740         vmw_resource_list_unreference(&resource_list);
2741         if (unlikely(error_resource != NULL))
2742                 vmw_resource_unreference(&error_resource);
2743 out_free_header:
2744         if (header)
2745                 vmw_cmdbuf_header_free(header);
2746
2747         return ret;
2748 }
2749
2750 /**
2751  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2752  *
2753  * @dev_priv: The device private structure.
2754  *
2755  * This function is called to idle the fifo and unpin the query buffer
2756  * if the normal way to do this hits an error, which should typically be
2757  * extremely rare.
2758  */
2759 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2760 {
2761         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2762
2763         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2764         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2765         if (dev_priv->dummy_query_bo_pinned) {
2766                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
2767                 dev_priv->dummy_query_bo_pinned = false;
2768         }
2769 }
2770
2771
2772 /**
2773  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2774  * query bo.
2775  *
2776  * @dev_priv: The device private structure.
2777  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2778  * _after_ a query barrier that flushes all queries touching the current
2779  * buffer pointed to by @dev_priv->pinned_bo
2780  *
2781  * This function should be used to unpin the pinned query bo, or
2782  * as a query barrier when we need to make sure that all queries have
2783  * finished before the next fifo command. (For example on hardware
2784  * context destructions where the hardware may otherwise leak unfinished
2785  * queries).
2786  *
2787  * This function does not return any failure codes, but make attempts
2788  * to do safe unpinning in case of errors.
2789  *
2790  * The function will synchronize on the previous query barrier, and will
2791  * thus not finish until that barrier has executed.
2792  *
2793  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2794  * before calling this function.
2795  */
2796 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2797                                      struct vmw_fence_obj *fence)
2798 {
2799         int ret = 0;
2800         struct list_head validate_list;
2801         struct ttm_validate_buffer pinned_val, query_val;
2802         struct vmw_fence_obj *lfence = NULL;
2803         struct ww_acquire_ctx ticket;
2804
2805         if (dev_priv->pinned_bo == NULL)
2806                 goto out_unlock;
2807
2808         INIT_LIST_HEAD(&validate_list);
2809
2810         pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
2811         pinned_val.shared = false;
2812         list_add_tail(&pinned_val.head, &validate_list);
2813
2814         query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
2815         query_val.shared = false;
2816         list_add_tail(&query_val.head, &validate_list);
2817
2818         ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
2819                                      false, NULL);
2820         if (unlikely(ret != 0)) {
2821                 vmw_execbuf_unpin_panic(dev_priv);
2822                 goto out_no_reserve;
2823         }
2824
2825         if (dev_priv->query_cid_valid) {
2826                 BUG_ON(fence != NULL);
2827                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2828                 if (unlikely(ret != 0)) {
2829                         vmw_execbuf_unpin_panic(dev_priv);
2830                         goto out_no_emit;
2831                 }
2832                 dev_priv->query_cid_valid = false;
2833         }
2834
2835         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2836         if (dev_priv->dummy_query_bo_pinned) {
2837                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
2838                 dev_priv->dummy_query_bo_pinned = false;
2839         }
2840         if (fence == NULL) {
2841                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2842                                                   NULL);
2843                 fence = lfence;
2844         }
2845         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2846         if (lfence != NULL)
2847                 vmw_fence_obj_unreference(&lfence);
2848
2849         ttm_bo_unref(&query_val.bo);
2850         ttm_bo_unref(&pinned_val.bo);
2851         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
2852         DRM_INFO("Dummy query bo pin count: %d\n",
2853                  dev_priv->dummy_query_bo->pin_count);
2854
2855 out_unlock:
2856         return;
2857
2858 out_no_emit:
2859         ttm_eu_backoff_reservation(&ticket, &validate_list);
2860 out_no_reserve:
2861         ttm_bo_unref(&query_val.bo);
2862         ttm_bo_unref(&pinned_val.bo);
2863         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
2864 }
2865
2866 /**
2867  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2868  * query bo.
2869  *
2870  * @dev_priv: The device private structure.
2871  *
2872  * This function should be used to unpin the pinned query bo, or
2873  * as a query barrier when we need to make sure that all queries have
2874  * finished before the next fifo command. (For example on hardware
2875  * context destructions where the hardware may otherwise leak unfinished
2876  * queries).
2877  *
2878  * This function does not return any failure codes, but make attempts
2879  * to do safe unpinning in case of errors.
2880  *
2881  * The function will synchronize on the previous query barrier, and will
2882  * thus not finish until that barrier has executed.
2883  */
2884 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2885 {
2886         mutex_lock(&dev_priv->cmdbuf_mutex);
2887         if (dev_priv->query_cid_valid)
2888                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2889         mutex_unlock(&dev_priv->cmdbuf_mutex);
2890 }
2891
2892
2893 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2894                       struct drm_file *file_priv)
2895 {
2896         struct vmw_private *dev_priv = vmw_priv(dev);
2897         struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2898         int ret;
2899
2900         /*
2901          * This will allow us to extend the ioctl argument while
2902          * maintaining backwards compatibility:
2903          * We take different code paths depending on the value of
2904          * arg->version.
2905          */
2906
2907         if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2908                 DRM_ERROR("Incorrect execbuf version.\n");
2909                 DRM_ERROR("You're running outdated experimental "
2910                           "vmwgfx user-space drivers.");
2911                 return -EINVAL;
2912         }
2913
2914         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2915         if (unlikely(ret != 0))
2916                 return ret;
2917
2918         ret = vmw_execbuf_process(file_priv, dev_priv,
2919                                   (void __user *)(unsigned long)arg->commands,
2920                                   NULL, arg->command_size, arg->throttle_us,
2921                                   0,
2922                                   (void __user *)(unsigned long)arg->fence_rep,
2923                                   NULL);
2924         ttm_read_unlock(&dev_priv->reservation_sem);
2925         if (unlikely(ret != 0))
2926                 return ret;
2927
2928         vmw_kms_cursor_post_execbuf(dev_priv);
2929
2930         return 0;
2931 }