1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
33 #define VMW_RES_HT_ORDER 12
36 * struct vmw_resource_relocation - Relocation info for resources
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
43 struct vmw_resource_relocation {
44 struct list_head head;
45 const struct vmw_resource *res;
50 * struct vmw_resource_val_node - Validation info for resources
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
57 * @staged_bindings: If @res is a context, tracks bindings set up during
58 * the command batch. Otherwise NULL.
59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60 * @first_usage: Set to true the first time the resource is referenced in
62 * @no_buffer_needed: Resources do not need to allocate buffer backup on
63 * reservation. The command stream will provide one.
65 struct vmw_resource_val_node {
66 struct list_head head;
67 struct drm_hash_item hash;
68 struct vmw_resource *res;
69 struct vmw_dma_buffer *new_backup;
70 struct vmw_ctx_binding_state *staged_bindings;
71 unsigned long new_backup_offset;
73 bool no_buffer_needed;
77 * struct vmw_cmd_entry - Describe a command for the verifier
79 * @user_allow: Whether allowed from the execbuf ioctl.
80 * @gb_disable: Whether disabled if guest-backed objects are available.
81 * @gb_enable: Whether enabled iff guest-backed objects are available.
83 struct vmw_cmd_entry {
84 int (*func) (struct vmw_private *, struct vmw_sw_context *,
91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 (_gb_disable), (_gb_enable)}
96 * vmw_resource_unreserve - unreserve resources previously reserved for
99 * @list_head: list of resources to unreserve.
100 * @backoff: Whether command submission failed.
102 static void vmw_resource_list_unreserve(struct list_head *list,
105 struct vmw_resource_val_node *val;
107 list_for_each_entry(val, list, head) {
108 struct vmw_resource *res = val->res;
109 struct vmw_dma_buffer *new_backup =
110 backoff ? NULL : val->new_backup;
113 * Transfer staged context bindings to the
114 * persistent context binding tracker.
116 if (unlikely(val->staged_bindings)) {
118 vmw_context_binding_state_transfer
119 (val->res, val->staged_bindings);
121 kfree(val->staged_bindings);
122 val->staged_bindings = NULL;
124 vmw_resource_unreserve(res, new_backup,
125 val->new_backup_offset);
126 vmw_dmabuf_unreference(&val->new_backup);
132 * vmw_resource_val_add - Add a resource to the software context's
133 * resource list if it's not already on it.
135 * @sw_context: Pointer to the software context.
136 * @res: Pointer to the resource.
137 * @p_node On successful return points to a valid pointer to a
138 * struct vmw_resource_val_node, if non-NULL on entry.
140 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141 struct vmw_resource *res,
142 struct vmw_resource_val_node **p_node)
144 struct vmw_resource_val_node *node;
145 struct drm_hash_item *hash;
148 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
150 node = container_of(hash, struct vmw_resource_val_node, hash);
151 node->first_usage = false;
152 if (unlikely(p_node != NULL))
157 node = kzalloc(sizeof(*node), GFP_KERNEL);
158 if (unlikely(node == NULL)) {
159 DRM_ERROR("Failed to allocate a resource validation "
164 node->hash.key = (unsigned long) res;
165 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166 if (unlikely(ret != 0)) {
167 DRM_ERROR("Failed to initialize a resource validation "
172 list_add_tail(&node->head, &sw_context->resource_list);
173 node->res = vmw_resource_reference(res);
174 node->first_usage = true;
176 if (unlikely(p_node != NULL))
183 * vmw_resource_context_res_add - Put resources previously bound to a context on
184 * the validation list
186 * @dev_priv: Pointer to a device private structure
187 * @sw_context: Pointer to a software context used for this command submission
188 * @ctx: Pointer to the context resource
190 * This function puts all resources that were previously bound to @ctx on
191 * the resource validation list. This is part of the context state reemission
193 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194 struct vmw_sw_context *sw_context,
195 struct vmw_resource *ctx)
197 struct list_head *binding_list;
198 struct vmw_ctx_binding *entry;
200 struct vmw_resource *res;
202 mutex_lock(&dev_priv->binding_mutex);
203 binding_list = vmw_context_binding_list(ctx);
205 list_for_each_entry(entry, binding_list, ctx_list) {
206 res = vmw_resource_reference_unless_doomed(entry->bi.res);
207 if (unlikely(res == NULL))
210 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211 vmw_resource_unreference(&res);
212 if (unlikely(ret != 0))
216 mutex_unlock(&dev_priv->binding_mutex);
221 * vmw_resource_relocation_add - Add a relocation to the relocation list
223 * @list: Pointer to head of relocation list.
224 * @res: The resource.
225 * @offset: Offset into the command buffer currently being parsed where the
226 * id that needs fixup is located. Granularity is 4 bytes.
228 static int vmw_resource_relocation_add(struct list_head *list,
229 const struct vmw_resource *res,
230 unsigned long offset)
232 struct vmw_resource_relocation *rel;
234 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
235 if (unlikely(rel == NULL)) {
236 DRM_ERROR("Failed to allocate a resource relocation.\n");
241 rel->offset = offset;
242 list_add_tail(&rel->head, list);
248 * vmw_resource_relocations_free - Free all relocations on a list
250 * @list: Pointer to the head of the relocation list.
252 static void vmw_resource_relocations_free(struct list_head *list)
254 struct vmw_resource_relocation *rel, *n;
256 list_for_each_entry_safe(rel, n, list, head) {
257 list_del(&rel->head);
263 * vmw_resource_relocations_apply - Apply all relocations on a list
265 * @cb: Pointer to the start of the command buffer bein patch. This need
266 * not be the same buffer as the one being parsed when the relocation
267 * list was built, but the contents must be the same modulo the
269 * @list: Pointer to the head of the relocation list.
271 static void vmw_resource_relocations_apply(uint32_t *cb,
272 struct list_head *list)
274 struct vmw_resource_relocation *rel;
276 list_for_each_entry(rel, list, head) {
277 if (likely(rel->res != NULL))
278 cb[rel->offset] = rel->res->id;
280 cb[rel->offset] = SVGA_3D_CMD_NOP;
284 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
285 struct vmw_sw_context *sw_context,
286 SVGA3dCmdHeader *header)
288 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
291 static int vmw_cmd_ok(struct vmw_private *dev_priv,
292 struct vmw_sw_context *sw_context,
293 SVGA3dCmdHeader *header)
299 * vmw_bo_to_validate_list - add a bo to a validate list
301 * @sw_context: The software context used for this command submission batch.
302 * @bo: The buffer object to add.
303 * @validate_as_mob: Validate this buffer as a MOB.
304 * @p_val_node: If non-NULL Will be updated with the validate node number
307 * Returns -EINVAL if the limit of number of buffer objects per command
308 * submission is reached.
310 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311 struct vmw_dma_buffer *vbo,
312 bool validate_as_mob,
313 uint32_t *p_val_node)
316 struct vmw_validate_buffer *vval_buf;
317 struct ttm_validate_buffer *val_buf;
318 struct drm_hash_item *hash;
321 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
323 vval_buf = container_of(hash, struct vmw_validate_buffer,
325 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
326 DRM_ERROR("Inconsistent buffer usage.\n");
329 val_buf = &vval_buf->base;
330 val_node = vval_buf - sw_context->val_bufs;
332 val_node = sw_context->cur_val_buf;
333 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
334 DRM_ERROR("Max number of DMA buffers per submission "
338 vval_buf = &sw_context->val_bufs[val_node];
339 vval_buf->hash.key = (unsigned long) vbo;
340 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341 if (unlikely(ret != 0)) {
342 DRM_ERROR("Failed to initialize a buffer validation "
346 ++sw_context->cur_val_buf;
347 val_buf = &vval_buf->base;
348 val_buf->bo = ttm_bo_reference(&vbo->base);
349 val_buf->shared = false;
350 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351 vval_buf->validate_as_mob = validate_as_mob;
355 *p_val_node = val_node;
361 * vmw_resources_reserve - Reserve all resources on the sw_context's
364 * @sw_context: Pointer to the software context.
366 * Note that since vmware's command submission currently is protected by
367 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
368 * since only a single thread at once will attempt this.
370 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
372 struct vmw_resource_val_node *val;
375 list_for_each_entry(val, &sw_context->resource_list, head) {
376 struct vmw_resource *res = val->res;
378 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
379 if (unlikely(ret != 0))
383 struct vmw_dma_buffer *vbo = res->backup;
385 ret = vmw_bo_to_validate_list
387 vmw_resource_needs_backup(res), NULL);
389 if (unlikely(ret != 0))
397 * vmw_resources_validate - Validate all resources on the sw_context's
400 * @sw_context: Pointer to the software context.
402 * Before this function is called, all resource backup buffers must have
405 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
407 struct vmw_resource_val_node *val;
410 list_for_each_entry(val, &sw_context->resource_list, head) {
411 struct vmw_resource *res = val->res;
413 ret = vmw_resource_validate(res);
414 if (unlikely(ret != 0)) {
415 if (ret != -ERESTARTSYS)
416 DRM_ERROR("Failed to validate resource.\n");
425 * vmw_cmd_res_reloc_add - Add a resource to a software context's
426 * relocation- and validation lists.
428 * @dev_priv: Pointer to a struct vmw_private identifying the device.
429 * @sw_context: Pointer to the software context.
430 * @res_type: Resource type.
431 * @id_loc: Pointer to where the id that needs translation is located.
432 * @res: Valid pointer to a struct vmw_resource.
433 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
434 * used for this resource is returned here.
436 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
437 struct vmw_sw_context *sw_context,
438 enum vmw_res_type res_type,
440 struct vmw_resource *res,
441 struct vmw_resource_val_node **p_val)
444 struct vmw_resource_val_node *node;
447 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
449 id_loc - sw_context->buf_start);
450 if (unlikely(ret != 0))
453 ret = vmw_resource_val_add(sw_context, res, &node);
454 if (unlikely(ret != 0))
457 if (res_type == vmw_res_context && dev_priv->has_mob &&
461 * Put contexts first on the list to be able to exit
462 * list traversal for contexts early.
464 list_del(&node->head);
465 list_add(&node->head, &sw_context->resource_list);
467 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
468 if (unlikely(ret != 0))
470 node->staged_bindings =
471 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
472 if (node->staged_bindings == NULL) {
473 DRM_ERROR("Failed to allocate context binding "
477 INIT_LIST_HEAD(&node->staged_bindings->list);
488 * vmw_cmd_res_check - Check that a resource is present and if so, put it
489 * on the resource validate list unless it's already there.
491 * @dev_priv: Pointer to a device private structure.
492 * @sw_context: Pointer to the software context.
493 * @res_type: Resource type.
494 * @converter: User-space visisble type specific information.
495 * @id_loc: Pointer to the location in the command buffer currently being
496 * parsed from where the user-space resource id handle is located.
497 * @p_val: Pointer to pointer to resource validalidation node. Populated
501 vmw_cmd_res_check(struct vmw_private *dev_priv,
502 struct vmw_sw_context *sw_context,
503 enum vmw_res_type res_type,
504 const struct vmw_user_resource_conv *converter,
506 struct vmw_resource_val_node **p_val)
508 struct vmw_res_cache_entry *rcache =
509 &sw_context->res_cache[res_type];
510 struct vmw_resource *res;
511 struct vmw_resource_val_node *node;
514 if (*id_loc == SVGA3D_INVALID_ID) {
517 if (res_type == vmw_res_context) {
518 DRM_ERROR("Illegal context invalid id.\n");
525 * Fastpath in case of repeated commands referencing the same
529 if (likely(rcache->valid && *id_loc == rcache->handle)) {
530 const struct vmw_resource *res = rcache->res;
532 rcache->node->first_usage = false;
534 *p_val = rcache->node;
536 return vmw_resource_relocation_add
537 (&sw_context->res_relocations, res,
538 id_loc - sw_context->buf_start);
541 ret = vmw_user_resource_lookup_handle(dev_priv,
542 sw_context->fp->tfile,
546 if (unlikely(ret != 0)) {
547 DRM_ERROR("Could not find or use resource 0x%08x.\n",
553 rcache->valid = true;
555 rcache->handle = *id_loc;
557 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
559 if (unlikely(ret != 0))
565 vmw_resource_unreference(&res);
569 BUG_ON(sw_context->error_resource != NULL);
570 sw_context->error_resource = res;
576 * vmw_rebind_contexts - Rebind all resources previously bound to
577 * referenced contexts.
579 * @sw_context: Pointer to the software context.
581 * Rebind context binding points that have been scrubbed because of eviction.
583 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
585 struct vmw_resource_val_node *val;
588 list_for_each_entry(val, &sw_context->resource_list, head) {
589 if (unlikely(!val->staged_bindings))
592 ret = vmw_context_rebind_all(val->res);
593 if (unlikely(ret != 0)) {
594 if (ret != -ERESTARTSYS)
595 DRM_ERROR("Failed to rebind context.\n");
604 * vmw_cmd_cid_check - Check a command header for valid context information.
606 * @dev_priv: Pointer to a device private structure.
607 * @sw_context: Pointer to the software context.
608 * @header: A command header with an embedded user-space context handle.
610 * Convenience function: Call vmw_cmd_res_check with the user-space context
611 * handle embedded in @header.
613 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
614 struct vmw_sw_context *sw_context,
615 SVGA3dCmdHeader *header)
618 SVGA3dCmdHeader header;
622 cmd = container_of(header, struct vmw_cid_cmd, header);
623 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
624 user_context_converter, &cmd->cid, NULL);
627 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
628 struct vmw_sw_context *sw_context,
629 SVGA3dCmdHeader *header)
632 SVGA3dCmdHeader header;
633 SVGA3dCmdSetRenderTarget body;
635 struct vmw_resource_val_node *ctx_node;
636 struct vmw_resource_val_node *res_node;
639 cmd = container_of(header, struct vmw_sid_cmd, header);
641 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
642 user_context_converter, &cmd->body.cid,
644 if (unlikely(ret != 0))
647 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
648 user_surface_converter,
649 &cmd->body.target.sid, &res_node);
650 if (unlikely(ret != 0))
653 if (dev_priv->has_mob) {
654 struct vmw_ctx_bindinfo bi;
656 bi.ctx = ctx_node->res;
657 bi.res = res_node ? res_node->res : NULL;
658 bi.bt = vmw_ctx_binding_rt;
659 bi.i1.rt_type = cmd->body.type;
660 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
666 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
667 struct vmw_sw_context *sw_context,
668 SVGA3dCmdHeader *header)
671 SVGA3dCmdHeader header;
672 SVGA3dCmdSurfaceCopy body;
676 cmd = container_of(header, struct vmw_sid_cmd, header);
678 if (!(sw_context->quirks & VMW_QUIRK_SRC_SID_OK)) {
679 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
680 user_surface_converter,
681 &cmd->body.src.sid, NULL);
686 if (sw_context->quirks & VMW_QUIRK_DST_SID_OK)
689 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
690 user_surface_converter,
691 &cmd->body.dest.sid, NULL);
694 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
695 struct vmw_sw_context *sw_context,
696 SVGA3dCmdHeader *header)
699 SVGA3dCmdHeader header;
700 SVGA3dCmdSurfaceStretchBlt body;
704 cmd = container_of(header, struct vmw_sid_cmd, header);
705 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
706 user_surface_converter,
707 &cmd->body.src.sid, NULL);
708 if (unlikely(ret != 0))
710 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
711 user_surface_converter,
712 &cmd->body.dest.sid, NULL);
715 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
716 struct vmw_sw_context *sw_context,
717 SVGA3dCmdHeader *header)
720 SVGA3dCmdHeader header;
721 SVGA3dCmdBlitSurfaceToScreen body;
724 cmd = container_of(header, struct vmw_sid_cmd, header);
726 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
727 user_surface_converter,
728 &cmd->body.srcImage.sid, NULL);
731 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
732 struct vmw_sw_context *sw_context,
733 SVGA3dCmdHeader *header)
736 SVGA3dCmdHeader header;
737 SVGA3dCmdPresent body;
741 cmd = container_of(header, struct vmw_sid_cmd, header);
743 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
744 user_surface_converter, &cmd->body.sid,
749 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
751 * @dev_priv: The device private structure.
752 * @new_query_bo: The new buffer holding query results.
753 * @sw_context: The software context used for this command submission.
755 * This function checks whether @new_query_bo is suitable for holding
756 * query results, and if another buffer currently is pinned for query
757 * results. If so, the function prepares the state of @sw_context for
758 * switching pinned buffers after successful submission of the current
761 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
762 struct vmw_dma_buffer *new_query_bo,
763 struct vmw_sw_context *sw_context)
765 struct vmw_res_cache_entry *ctx_entry =
766 &sw_context->res_cache[vmw_res_context];
769 BUG_ON(!ctx_entry->valid);
770 sw_context->last_query_ctx = ctx_entry->res;
772 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
774 if (unlikely(new_query_bo->base.num_pages > 4)) {
775 DRM_ERROR("Query buffer too large.\n");
779 if (unlikely(sw_context->cur_query_bo != NULL)) {
780 sw_context->needs_post_query_barrier = true;
781 ret = vmw_bo_to_validate_list(sw_context,
782 sw_context->cur_query_bo,
783 dev_priv->has_mob, NULL);
784 if (unlikely(ret != 0))
787 sw_context->cur_query_bo = new_query_bo;
789 ret = vmw_bo_to_validate_list(sw_context,
790 dev_priv->dummy_query_bo,
791 dev_priv->has_mob, NULL);
792 if (unlikely(ret != 0))
802 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
804 * @dev_priv: The device private structure.
805 * @sw_context: The software context used for this command submission batch.
807 * This function will check if we're switching query buffers, and will then,
808 * issue a dummy occlusion query wait used as a query barrier. When the fence
809 * object following that query wait has signaled, we are sure that all
810 * preceding queries have finished, and the old query buffer can be unpinned.
811 * However, since both the new query buffer and the old one are fenced with
812 * that fence, we can do an asynchronus unpin now, and be sure that the
813 * old query buffer won't be moved until the fence has signaled.
815 * As mentioned above, both the new - and old query buffers need to be fenced
816 * using a sequence emitted *after* calling this function.
818 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
819 struct vmw_sw_context *sw_context)
822 * The validate list should still hold references to all
826 if (sw_context->needs_post_query_barrier) {
827 struct vmw_res_cache_entry *ctx_entry =
828 &sw_context->res_cache[vmw_res_context];
829 struct vmw_resource *ctx;
832 BUG_ON(!ctx_entry->valid);
833 ctx = ctx_entry->res;
835 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
837 if (unlikely(ret != 0))
838 DRM_ERROR("Out of fifo space for dummy query.\n");
841 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
842 if (dev_priv->pinned_bo) {
843 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
844 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
847 if (!sw_context->needs_post_query_barrier) {
848 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
851 * We pin also the dummy_query_bo buffer so that we
852 * don't need to validate it when emitting
853 * dummy queries in context destroy paths.
856 if (!dev_priv->dummy_query_bo_pinned) {
857 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
859 dev_priv->dummy_query_bo_pinned = true;
862 BUG_ON(sw_context->last_query_ctx == NULL);
863 dev_priv->query_cid = sw_context->last_query_ctx->id;
864 dev_priv->query_cid_valid = true;
865 dev_priv->pinned_bo =
866 vmw_dmabuf_reference(sw_context->cur_query_bo);
872 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
873 * handle to a MOB id.
875 * @dev_priv: Pointer to a device private structure.
876 * @sw_context: The software context used for this command batch validation.
877 * @id: Pointer to the user-space handle to be translated.
878 * @vmw_bo_p: Points to a location that, on successful return will carry
879 * a reference-counted pointer to the DMA buffer identified by the
880 * user-space handle in @id.
882 * This function saves information needed to translate a user-space buffer
883 * handle to a MOB id. The translation does not take place immediately, but
884 * during a call to vmw_apply_relocations(). This function builds a relocation
885 * list and a list of buffers to validate. The former needs to be freed using
886 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
887 * needs to be freed using vmw_clear_validations.
889 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
890 struct vmw_sw_context *sw_context,
892 struct vmw_dma_buffer **vmw_bo_p)
894 struct vmw_dma_buffer *vmw_bo = NULL;
895 uint32_t handle = *id;
896 struct vmw_relocation *reloc;
899 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
900 if (unlikely(ret != 0)) {
901 DRM_ERROR("Could not find or use MOB buffer.\n");
906 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
907 DRM_ERROR("Max number relocations per submission"
913 reloc = &sw_context->relocs[sw_context->cur_reloc++];
915 reloc->location = NULL;
917 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
918 if (unlikely(ret != 0))
925 vmw_dmabuf_unreference(&vmw_bo);
931 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
932 * handle to a valid SVGAGuestPtr
934 * @dev_priv: Pointer to a device private structure.
935 * @sw_context: The software context used for this command batch validation.
936 * @ptr: Pointer to the user-space handle to be translated.
937 * @vmw_bo_p: Points to a location that, on successful return will carry
938 * a reference-counted pointer to the DMA buffer identified by the
939 * user-space handle in @id.
941 * This function saves information needed to translate a user-space buffer
942 * handle to a valid SVGAGuestPtr. The translation does not take place
943 * immediately, but during a call to vmw_apply_relocations().
944 * This function builds a relocation list and a list of buffers to validate.
945 * The former needs to be freed using either vmw_apply_relocations() or
946 * vmw_free_relocations(). The latter needs to be freed using
947 * vmw_clear_validations.
949 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
950 struct vmw_sw_context *sw_context,
952 struct vmw_dma_buffer **vmw_bo_p)
954 struct vmw_dma_buffer *vmw_bo = NULL;
955 uint32_t handle = ptr->gmrId;
956 struct vmw_relocation *reloc;
959 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
960 if (unlikely(ret != 0)) {
961 DRM_ERROR("Could not find or use GMR region.\n");
966 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
967 DRM_ERROR("Max number relocations per submission"
973 reloc = &sw_context->relocs[sw_context->cur_reloc++];
974 reloc->location = ptr;
976 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
977 if (unlikely(ret != 0))
984 vmw_dmabuf_unreference(&vmw_bo);
990 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
992 * @dev_priv: Pointer to a device private struct.
993 * @sw_context: The software context used for this command submission.
994 * @header: Pointer to the command header in the command stream.
996 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
997 struct vmw_sw_context *sw_context,
998 SVGA3dCmdHeader *header)
1000 struct vmw_begin_gb_query_cmd {
1001 SVGA3dCmdHeader header;
1002 SVGA3dCmdBeginGBQuery q;
1005 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1008 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1009 user_context_converter, &cmd->q.cid,
1014 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1016 * @dev_priv: Pointer to a device private struct.
1017 * @sw_context: The software context used for this command submission.
1018 * @header: Pointer to the command header in the command stream.
1020 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1021 struct vmw_sw_context *sw_context,
1022 SVGA3dCmdHeader *header)
1024 struct vmw_begin_query_cmd {
1025 SVGA3dCmdHeader header;
1026 SVGA3dCmdBeginQuery q;
1029 cmd = container_of(header, struct vmw_begin_query_cmd,
1032 if (unlikely(dev_priv->has_mob)) {
1034 SVGA3dCmdHeader header;
1035 SVGA3dCmdBeginGBQuery q;
1038 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1040 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1041 gb_cmd.header.size = cmd->header.size;
1042 gb_cmd.q.cid = cmd->q.cid;
1043 gb_cmd.q.type = cmd->q.type;
1045 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1046 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1049 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1050 user_context_converter, &cmd->q.cid,
1055 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1057 * @dev_priv: Pointer to a device private struct.
1058 * @sw_context: The software context used for this command submission.
1059 * @header: Pointer to the command header in the command stream.
1061 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1062 struct vmw_sw_context *sw_context,
1063 SVGA3dCmdHeader *header)
1065 struct vmw_dma_buffer *vmw_bo;
1066 struct vmw_query_cmd {
1067 SVGA3dCmdHeader header;
1068 SVGA3dCmdEndGBQuery q;
1072 cmd = container_of(header, struct vmw_query_cmd, header);
1073 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1074 if (unlikely(ret != 0))
1077 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1080 if (unlikely(ret != 0))
1083 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1085 vmw_dmabuf_unreference(&vmw_bo);
1090 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1092 * @dev_priv: Pointer to a device private struct.
1093 * @sw_context: The software context used for this command submission.
1094 * @header: Pointer to the command header in the command stream.
1096 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1097 struct vmw_sw_context *sw_context,
1098 SVGA3dCmdHeader *header)
1100 struct vmw_dma_buffer *vmw_bo;
1101 struct vmw_query_cmd {
1102 SVGA3dCmdHeader header;
1103 SVGA3dCmdEndQuery q;
1107 cmd = container_of(header, struct vmw_query_cmd, header);
1108 if (dev_priv->has_mob) {
1110 SVGA3dCmdHeader header;
1111 SVGA3dCmdEndGBQuery q;
1114 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1116 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1117 gb_cmd.header.size = cmd->header.size;
1118 gb_cmd.q.cid = cmd->q.cid;
1119 gb_cmd.q.type = cmd->q.type;
1120 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1121 gb_cmd.q.offset = cmd->q.guestResult.offset;
1123 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1124 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1127 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1128 if (unlikely(ret != 0))
1131 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1132 &cmd->q.guestResult,
1134 if (unlikely(ret != 0))
1137 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1139 vmw_dmabuf_unreference(&vmw_bo);
1144 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1146 * @dev_priv: Pointer to a device private struct.
1147 * @sw_context: The software context used for this command submission.
1148 * @header: Pointer to the command header in the command stream.
1150 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1151 struct vmw_sw_context *sw_context,
1152 SVGA3dCmdHeader *header)
1154 struct vmw_dma_buffer *vmw_bo;
1155 struct vmw_query_cmd {
1156 SVGA3dCmdHeader header;
1157 SVGA3dCmdWaitForGBQuery q;
1161 cmd = container_of(header, struct vmw_query_cmd, header);
1162 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1163 if (unlikely(ret != 0))
1166 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1169 if (unlikely(ret != 0))
1172 vmw_dmabuf_unreference(&vmw_bo);
1177 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1179 * @dev_priv: Pointer to a device private struct.
1180 * @sw_context: The software context used for this command submission.
1181 * @header: Pointer to the command header in the command stream.
1183 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1184 struct vmw_sw_context *sw_context,
1185 SVGA3dCmdHeader *header)
1187 struct vmw_dma_buffer *vmw_bo;
1188 struct vmw_query_cmd {
1189 SVGA3dCmdHeader header;
1190 SVGA3dCmdWaitForQuery q;
1194 cmd = container_of(header, struct vmw_query_cmd, header);
1195 if (dev_priv->has_mob) {
1197 SVGA3dCmdHeader header;
1198 SVGA3dCmdWaitForGBQuery q;
1201 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1203 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1204 gb_cmd.header.size = cmd->header.size;
1205 gb_cmd.q.cid = cmd->q.cid;
1206 gb_cmd.q.type = cmd->q.type;
1207 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1208 gb_cmd.q.offset = cmd->q.guestResult.offset;
1210 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1211 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1214 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1215 if (unlikely(ret != 0))
1218 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1219 &cmd->q.guestResult,
1221 if (unlikely(ret != 0))
1224 vmw_dmabuf_unreference(&vmw_bo);
1228 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1229 struct vmw_sw_context *sw_context,
1230 SVGA3dCmdHeader *header)
1232 struct vmw_dma_buffer *vmw_bo = NULL;
1233 struct vmw_surface *srf = NULL;
1234 struct vmw_dma_cmd {
1235 SVGA3dCmdHeader header;
1236 SVGA3dCmdSurfaceDMA dma;
1239 SVGA3dCmdSurfaceDMASuffix *suffix;
1242 cmd = container_of(header, struct vmw_dma_cmd, header);
1243 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1244 header->size - sizeof(*suffix));
1246 /* Make sure device and verifier stays in sync. */
1247 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1248 DRM_ERROR("Invalid DMA suffix size.\n");
1252 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1253 &cmd->dma.guest.ptr,
1255 if (unlikely(ret != 0))
1258 /* Make sure DMA doesn't cross BO boundaries. */
1259 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1260 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1261 DRM_ERROR("Invalid DMA offset.\n");
1265 bo_size -= cmd->dma.guest.ptr.offset;
1266 if (unlikely(suffix->maximumOffset > bo_size))
1267 suffix->maximumOffset = bo_size;
1269 if (sw_context->quirks & VMW_QUIRK_DST_SID_OK)
1270 goto out_no_surface;
1272 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1273 user_surface_converter, &cmd->dma.host.sid,
1275 if (unlikely(ret != 0)) {
1276 if (unlikely(ret != -ERESTARTSYS))
1277 DRM_ERROR("could not find surface for DMA.\n");
1278 goto out_no_surface;
1281 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1283 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1287 vmw_dmabuf_unreference(&vmw_bo);
1291 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1292 struct vmw_sw_context *sw_context,
1293 SVGA3dCmdHeader *header)
1295 struct vmw_draw_cmd {
1296 SVGA3dCmdHeader header;
1297 SVGA3dCmdDrawPrimitives body;
1299 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1300 (unsigned long)header + sizeof(*cmd));
1301 SVGA3dPrimitiveRange *range;
1306 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1307 if (unlikely(ret != 0))
1310 cmd = container_of(header, struct vmw_draw_cmd, header);
1311 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1313 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1314 DRM_ERROR("Illegal number of vertex declarations.\n");
1318 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1319 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1320 user_surface_converter,
1321 &decl->array.surfaceId, NULL);
1322 if (unlikely(ret != 0))
1326 maxnum = (header->size - sizeof(cmd->body) -
1327 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1328 if (unlikely(cmd->body.numRanges > maxnum)) {
1329 DRM_ERROR("Illegal number of index ranges.\n");
1333 range = (SVGA3dPrimitiveRange *) decl;
1334 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1335 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1336 user_surface_converter,
1337 &range->indexArray.surfaceId, NULL);
1338 if (unlikely(ret != 0))
1345 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1346 struct vmw_sw_context *sw_context,
1347 SVGA3dCmdHeader *header)
1349 struct vmw_tex_state_cmd {
1350 SVGA3dCmdHeader header;
1351 SVGA3dCmdSetTextureState state;
1354 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1355 ((unsigned long) header + header->size + sizeof(header));
1356 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1357 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1358 struct vmw_resource_val_node *ctx_node;
1359 struct vmw_resource_val_node *res_node;
1362 cmd = container_of(header, struct vmw_tex_state_cmd,
1365 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1366 user_context_converter, &cmd->state.cid,
1368 if (unlikely(ret != 0))
1371 for (; cur_state < last_state; ++cur_state) {
1372 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1375 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1376 user_surface_converter,
1377 &cur_state->value, &res_node);
1378 if (unlikely(ret != 0))
1381 if (dev_priv->has_mob) {
1382 struct vmw_ctx_bindinfo bi;
1384 bi.ctx = ctx_node->res;
1385 bi.res = res_node ? res_node->res : NULL;
1386 bi.bt = vmw_ctx_binding_tex;
1387 bi.i1.texture_stage = cur_state->stage;
1388 vmw_context_binding_add(ctx_node->staged_bindings,
1396 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1397 struct vmw_sw_context *sw_context,
1400 struct vmw_dma_buffer *vmw_bo;
1405 SVGAFifoCmdDefineGMRFB body;
1408 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1411 if (unlikely(ret != 0))
1414 vmw_dmabuf_unreference(&vmw_bo);
1420 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1422 * @dev_priv: Pointer to a device private struct.
1423 * @sw_context: The software context being used for this batch.
1424 * @res_type: The resource type.
1425 * @converter: Information about user-space binding for this resource type.
1426 * @res_id: Pointer to the user-space resource handle in the command stream.
1427 * @buf_id: Pointer to the user-space backup buffer handle in the command
1429 * @backup_offset: Offset of backup into MOB.
1431 * This function prepares for registering a switch of backup buffers
1432 * in the resource metadata just prior to unreserving.
1434 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1435 struct vmw_sw_context *sw_context,
1436 enum vmw_res_type res_type,
1437 const struct vmw_user_resource_conv
1441 unsigned long backup_offset)
1444 struct vmw_dma_buffer *dma_buf;
1445 struct vmw_resource_val_node *val_node;
1447 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1448 converter, res_id, &val_node);
1449 if (unlikely(ret != 0))
1452 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1453 if (unlikely(ret != 0))
1456 if (val_node->first_usage)
1457 val_node->no_buffer_needed = true;
1459 vmw_dmabuf_unreference(&val_node->new_backup);
1460 val_node->new_backup = dma_buf;
1461 val_node->new_backup_offset = backup_offset;
1467 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1470 * @dev_priv: Pointer to a device private struct.
1471 * @sw_context: The software context being used for this batch.
1472 * @header: Pointer to the command header in the command stream.
1474 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1475 struct vmw_sw_context *sw_context,
1476 SVGA3dCmdHeader *header)
1478 struct vmw_bind_gb_surface_cmd {
1479 SVGA3dCmdHeader header;
1480 SVGA3dCmdBindGBSurface body;
1483 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1485 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1486 user_surface_converter,
1487 &cmd->body.sid, &cmd->body.mobid,
1492 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1495 * @dev_priv: Pointer to a device private struct.
1496 * @sw_context: The software context being used for this batch.
1497 * @header: Pointer to the command header in the command stream.
1499 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1500 struct vmw_sw_context *sw_context,
1501 SVGA3dCmdHeader *header)
1503 struct vmw_gb_surface_cmd {
1504 SVGA3dCmdHeader header;
1505 SVGA3dCmdUpdateGBImage body;
1508 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1510 if (sw_context->quirks & VMW_QUIRK_SRC_SID_OK)
1513 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1514 user_surface_converter,
1515 &cmd->body.image.sid, NULL);
1519 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1522 * @dev_priv: Pointer to a device private struct.
1523 * @sw_context: The software context being used for this batch.
1524 * @header: Pointer to the command header in the command stream.
1526 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1527 struct vmw_sw_context *sw_context,
1528 SVGA3dCmdHeader *header)
1530 struct vmw_gb_surface_cmd {
1531 SVGA3dCmdHeader header;
1532 SVGA3dCmdUpdateGBSurface body;
1535 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1537 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1538 user_surface_converter,
1539 &cmd->body.sid, NULL);
1543 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1546 * @dev_priv: Pointer to a device private struct.
1547 * @sw_context: The software context being used for this batch.
1548 * @header: Pointer to the command header in the command stream.
1550 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1551 struct vmw_sw_context *sw_context,
1552 SVGA3dCmdHeader *header)
1554 struct vmw_gb_surface_cmd {
1555 SVGA3dCmdHeader header;
1556 SVGA3dCmdReadbackGBImage body;
1559 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1561 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1562 user_surface_converter,
1563 &cmd->body.image.sid, NULL);
1567 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1570 * @dev_priv: Pointer to a device private struct.
1571 * @sw_context: The software context being used for this batch.
1572 * @header: Pointer to the command header in the command stream.
1574 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1575 struct vmw_sw_context *sw_context,
1576 SVGA3dCmdHeader *header)
1578 struct vmw_gb_surface_cmd {
1579 SVGA3dCmdHeader header;
1580 SVGA3dCmdReadbackGBSurface body;
1583 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1585 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1586 user_surface_converter,
1587 &cmd->body.sid, NULL);
1591 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1594 * @dev_priv: Pointer to a device private struct.
1595 * @sw_context: The software context being used for this batch.
1596 * @header: Pointer to the command header in the command stream.
1598 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1599 struct vmw_sw_context *sw_context,
1600 SVGA3dCmdHeader *header)
1602 struct vmw_gb_surface_cmd {
1603 SVGA3dCmdHeader header;
1604 SVGA3dCmdInvalidateGBImage body;
1607 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1609 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1610 user_surface_converter,
1611 &cmd->body.image.sid, NULL);
1615 * vmw_cmd_invalidate_gb_surface - Validate an
1616 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1618 * @dev_priv: Pointer to a device private struct.
1619 * @sw_context: The software context being used for this batch.
1620 * @header: Pointer to the command header in the command stream.
1622 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1623 struct vmw_sw_context *sw_context,
1624 SVGA3dCmdHeader *header)
1626 struct vmw_gb_surface_cmd {
1627 SVGA3dCmdHeader header;
1628 SVGA3dCmdInvalidateGBSurface body;
1631 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1633 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1634 user_surface_converter,
1635 &cmd->body.sid, NULL);
1640 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1643 * @dev_priv: Pointer to a device private struct.
1644 * @sw_context: The software context being used for this batch.
1645 * @header: Pointer to the command header in the command stream.
1647 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1648 struct vmw_sw_context *sw_context,
1649 SVGA3dCmdHeader *header)
1651 struct vmw_shader_define_cmd {
1652 SVGA3dCmdHeader header;
1653 SVGA3dCmdDefineShader body;
1657 struct vmw_resource_val_node *val;
1659 cmd = container_of(header, struct vmw_shader_define_cmd,
1662 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1663 user_context_converter, &cmd->body.cid,
1665 if (unlikely(ret != 0))
1668 if (unlikely(!dev_priv->has_mob))
1671 size = cmd->header.size - sizeof(cmd->body);
1672 ret = vmw_compat_shader_add(dev_priv,
1673 vmw_context_res_man(val->res),
1674 cmd->body.shid, cmd + 1,
1675 cmd->body.type, size,
1676 &sw_context->staged_cmd_res);
1677 if (unlikely(ret != 0))
1680 return vmw_resource_relocation_add(&sw_context->res_relocations,
1681 NULL, &cmd->header.id -
1682 sw_context->buf_start);
1688 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1691 * @dev_priv: Pointer to a device private struct.
1692 * @sw_context: The software context being used for this batch.
1693 * @header: Pointer to the command header in the command stream.
1695 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1696 struct vmw_sw_context *sw_context,
1697 SVGA3dCmdHeader *header)
1699 struct vmw_shader_destroy_cmd {
1700 SVGA3dCmdHeader header;
1701 SVGA3dCmdDestroyShader body;
1704 struct vmw_resource_val_node *val;
1706 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1709 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1710 user_context_converter, &cmd->body.cid,
1712 if (unlikely(ret != 0))
1715 if (unlikely(!dev_priv->has_mob))
1718 ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
1721 &sw_context->staged_cmd_res);
1722 if (unlikely(ret != 0))
1725 return vmw_resource_relocation_add(&sw_context->res_relocations,
1726 NULL, &cmd->header.id -
1727 sw_context->buf_start);
1733 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1736 * @dev_priv: Pointer to a device private struct.
1737 * @sw_context: The software context being used for this batch.
1738 * @header: Pointer to the command header in the command stream.
1740 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1741 struct vmw_sw_context *sw_context,
1742 SVGA3dCmdHeader *header)
1744 struct vmw_set_shader_cmd {
1745 SVGA3dCmdHeader header;
1746 SVGA3dCmdSetShader body;
1748 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1749 struct vmw_ctx_bindinfo bi;
1750 struct vmw_resource *res = NULL;
1753 cmd = container_of(header, struct vmw_set_shader_cmd,
1756 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1757 user_context_converter, &cmd->body.cid,
1759 if (unlikely(ret != 0))
1762 if (!dev_priv->has_mob)
1765 if (cmd->body.shid != SVGA3D_INVALID_ID) {
1766 res = vmw_compat_shader_lookup
1767 (vmw_context_res_man(ctx_node->res),
1772 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1774 &cmd->body.shid, res,
1776 vmw_resource_unreference(&res);
1777 if (unlikely(ret != 0))
1783 ret = vmw_cmd_res_check(dev_priv, sw_context,
1785 user_shader_converter,
1786 &cmd->body.shid, &res_node);
1787 if (unlikely(ret != 0))
1791 bi.ctx = ctx_node->res;
1792 bi.res = res_node ? res_node->res : NULL;
1793 bi.bt = vmw_ctx_binding_shader;
1794 bi.i1.shader_type = cmd->body.type;
1795 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1799 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1802 * @dev_priv: Pointer to a device private struct.
1803 * @sw_context: The software context being used for this batch.
1804 * @header: Pointer to the command header in the command stream.
1806 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1807 struct vmw_sw_context *sw_context,
1808 SVGA3dCmdHeader *header)
1810 struct vmw_set_shader_const_cmd {
1811 SVGA3dCmdHeader header;
1812 SVGA3dCmdSetShaderConst body;
1816 cmd = container_of(header, struct vmw_set_shader_const_cmd,
1819 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1820 user_context_converter, &cmd->body.cid,
1822 if (unlikely(ret != 0))
1825 if (dev_priv->has_mob)
1826 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1832 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1835 * @dev_priv: Pointer to a device private struct.
1836 * @sw_context: The software context being used for this batch.
1837 * @header: Pointer to the command header in the command stream.
1839 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1840 struct vmw_sw_context *sw_context,
1841 SVGA3dCmdHeader *header)
1843 struct vmw_bind_gb_shader_cmd {
1844 SVGA3dCmdHeader header;
1845 SVGA3dCmdBindGBShader body;
1848 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1851 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1852 user_shader_converter,
1853 &cmd->body.shid, &cmd->body.mobid,
1854 cmd->body.offsetInBytes);
1857 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1858 struct vmw_sw_context *sw_context,
1859 void *buf, uint32_t *size)
1861 uint32_t size_remaining = *size;
1864 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1866 case SVGA_CMD_UPDATE:
1867 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1869 case SVGA_CMD_DEFINE_GMRFB:
1870 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1872 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1873 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1875 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1876 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1879 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1883 if (*size > size_remaining) {
1884 DRM_ERROR("Invalid SVGA command (size mismatch):"
1889 if (unlikely(!sw_context->kernel)) {
1890 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1894 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1895 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1900 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1901 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1902 false, false, false),
1903 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1904 false, false, false),
1905 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1906 true, false, false),
1907 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1908 true, false, false),
1909 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1910 true, false, false),
1911 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1912 false, false, false),
1913 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1914 false, false, false),
1915 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1916 true, false, false),
1917 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1918 true, false, false),
1919 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1920 true, false, false),
1921 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1922 &vmw_cmd_set_render_target_check, true, false, false),
1923 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1924 true, false, false),
1925 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1926 true, false, false),
1927 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1928 true, false, false),
1929 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1930 true, false, false),
1931 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1932 true, false, false),
1933 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1934 true, false, false),
1935 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1936 true, false, false),
1937 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1938 false, false, false),
1939 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1940 true, false, false),
1941 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1942 true, false, false),
1943 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1944 true, false, false),
1945 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1946 true, false, false),
1947 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1948 true, false, false),
1949 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1950 true, false, false),
1951 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1952 true, false, false),
1953 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1954 true, false, false),
1955 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1956 true, false, false),
1957 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1958 true, false, false),
1959 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1960 &vmw_cmd_blt_surf_screen_check, false, false, false),
1961 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1962 false, false, false),
1963 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1964 false, false, false),
1965 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1966 false, false, false),
1967 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1968 false, false, false),
1969 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1970 false, false, false),
1971 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1972 false, false, false),
1973 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1974 false, false, false),
1975 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1976 false, false, false),
1977 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1978 false, false, false),
1979 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1980 false, false, false),
1981 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1982 false, false, false),
1983 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1984 false, false, false),
1985 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1986 false, false, false),
1987 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1988 false, false, true),
1989 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1990 false, false, true),
1991 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1992 false, false, true),
1993 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1994 false, false, true),
1995 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1996 false, false, true),
1997 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1998 false, false, true),
1999 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2000 false, false, true),
2001 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2002 false, false, true),
2003 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2005 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2006 false, false, true),
2007 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2009 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
2010 &vmw_cmd_update_gb_surface, true, false, true),
2011 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2012 &vmw_cmd_readback_gb_image, true, false, true),
2013 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2014 &vmw_cmd_readback_gb_surface, true, false, true),
2015 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2016 &vmw_cmd_invalidate_gb_image, true, false, true),
2017 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2018 &vmw_cmd_invalidate_gb_surface, true, false, true),
2019 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2020 false, false, true),
2021 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2022 false, false, true),
2023 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2024 false, false, true),
2025 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2026 false, false, true),
2027 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2028 false, false, true),
2029 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2030 false, false, true),
2031 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2033 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2034 false, false, true),
2035 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2036 false, false, false),
2037 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2039 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2041 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2043 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2045 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2046 false, false, true),
2047 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2048 false, false, true),
2049 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2050 false, false, true),
2051 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2052 false, false, true),
2053 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2054 false, false, true),
2055 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2056 false, false, true),
2057 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2058 false, false, true),
2059 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2060 false, false, true),
2061 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2062 false, false, true),
2063 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2064 false, false, true),
2065 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2069 static int vmw_cmd_check(struct vmw_private *dev_priv,
2070 struct vmw_sw_context *sw_context,
2071 void *buf, uint32_t *size)
2074 uint32_t size_remaining = *size;
2075 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2077 const struct vmw_cmd_entry *entry;
2078 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2080 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2081 /* Handle any none 3D commands */
2082 if (unlikely(cmd_id < SVGA_CMD_MAX))
2083 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2086 cmd_id = le32_to_cpu(header->id);
2087 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2089 cmd_id -= SVGA_3D_CMD_BASE;
2090 if (unlikely(*size > size_remaining))
2093 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
2096 entry = &vmw_cmd_entries[cmd_id];
2097 if (unlikely(!entry->func))
2100 if (unlikely(!entry->user_allow && !sw_context->kernel))
2101 goto out_privileged;
2103 if (unlikely(entry->gb_disable && gb))
2106 if (unlikely(entry->gb_enable && !gb))
2109 ret = entry->func(dev_priv, sw_context, header);
2110 if (unlikely(ret != 0))
2115 DRM_ERROR("Invalid SVGA3D command: %d\n",
2116 cmd_id + SVGA_3D_CMD_BASE);
2119 DRM_ERROR("Privileged SVGA3D command: %d\n",
2120 cmd_id + SVGA_3D_CMD_BASE);
2123 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2124 cmd_id + SVGA_3D_CMD_BASE);
2127 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2128 cmd_id + SVGA_3D_CMD_BASE);
2132 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2133 struct vmw_sw_context *sw_context,
2137 int32_t cur_size = size;
2140 sw_context->buf_start = buf;
2142 while (cur_size > 0) {
2144 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2145 if (unlikely(ret != 0))
2147 buf = (void *)((unsigned long) buf + size);
2151 if (unlikely(cur_size != 0)) {
2152 DRM_ERROR("Command verifier out of sync.\n");
2159 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2161 sw_context->cur_reloc = 0;
2164 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2167 struct vmw_relocation *reloc;
2168 struct ttm_validate_buffer *validate;
2169 struct ttm_buffer_object *bo;
2171 for (i = 0; i < sw_context->cur_reloc; ++i) {
2172 reloc = &sw_context->relocs[i];
2173 validate = &sw_context->val_bufs[reloc->index].base;
2175 switch (bo->mem.mem_type) {
2177 reloc->location->offset += bo->offset;
2178 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
2181 reloc->location->gmrId = bo->mem.start;
2184 *reloc->mob_loc = bo->mem.start;
2190 vmw_free_relocations(sw_context);
2194 * vmw_resource_list_unrefererence - Free up a resource list and unreference
2195 * all resources referenced by it.
2197 * @list: The resource list.
2199 static void vmw_resource_list_unreference(struct list_head *list)
2201 struct vmw_resource_val_node *val, *val_next;
2204 * Drop references to resources held during command submission.
2207 list_for_each_entry_safe(val, val_next, list, head) {
2208 list_del_init(&val->head);
2209 vmw_resource_unreference(&val->res);
2210 if (unlikely(val->staged_bindings))
2211 kfree(val->staged_bindings);
2216 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2218 struct vmw_validate_buffer *entry, *next;
2219 struct vmw_resource_val_node *val;
2222 * Drop references to DMA buffers held during command submission.
2224 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
2226 list_del(&entry->base.head);
2227 ttm_bo_unref(&entry->base.bo);
2228 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
2229 sw_context->cur_val_buf--;
2231 BUG_ON(sw_context->cur_val_buf != 0);
2233 list_for_each_entry(val, &sw_context->resource_list, head)
2234 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2237 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2238 struct ttm_buffer_object *bo,
2240 bool validate_as_mob)
2242 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
2246 if (vbo->pin_count > 0)
2249 if (validate_as_mob)
2250 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
2254 * Put BO in VRAM if there is space, otherwise as a GMR.
2255 * If there is no space in VRAM and GMR ids are all used up,
2256 * start evicting GMRs to make room. If the DMA buffer can't be
2257 * used as a GMR, this will return -ENOMEM.
2260 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
2262 if (likely(ret == 0 || ret == -ERESTARTSYS))
2266 * If that failed, try VRAM again, this time evicting
2267 * previous contents.
2270 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
2274 static int vmw_validate_buffers(struct vmw_private *dev_priv,
2275 struct vmw_sw_context *sw_context)
2277 struct vmw_validate_buffer *entry;
2280 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2281 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2283 entry->validate_as_mob);
2284 if (unlikely(ret != 0))
2290 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2293 if (likely(sw_context->cmd_bounce_size >= size))
2296 if (sw_context->cmd_bounce_size == 0)
2297 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2299 while (sw_context->cmd_bounce_size < size) {
2300 sw_context->cmd_bounce_size =
2301 PAGE_ALIGN(sw_context->cmd_bounce_size +
2302 (sw_context->cmd_bounce_size >> 1));
2305 if (sw_context->cmd_bounce != NULL)
2306 vfree(sw_context->cmd_bounce);
2308 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2310 if (sw_context->cmd_bounce == NULL) {
2311 DRM_ERROR("Failed to allocate command bounce buffer.\n");
2312 sw_context->cmd_bounce_size = 0;
2320 * vmw_execbuf_fence_commands - create and submit a command stream fence
2322 * Creates a fence object and submits a command stream marker.
2323 * If this fails for some reason, We sync the fifo and return NULL.
2324 * It is then safe to fence buffers with a NULL pointer.
2326 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2327 * a userspace handle if @p_handle is not NULL, otherwise not.
2330 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2331 struct vmw_private *dev_priv,
2332 struct vmw_fence_obj **p_fence,
2337 bool synced = false;
2339 /* p_handle implies file_priv. */
2340 BUG_ON(p_handle != NULL && file_priv == NULL);
2342 ret = vmw_fifo_send_fence(dev_priv, &sequence);
2343 if (unlikely(ret != 0)) {
2344 DRM_ERROR("Fence submission error. Syncing.\n");
2348 if (p_handle != NULL)
2349 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2350 sequence, p_fence, p_handle);
2352 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
2354 if (unlikely(ret != 0 && !synced)) {
2355 (void) vmw_fallback_wait(dev_priv, false, false,
2357 VMW_FENCE_WAIT_TIMEOUT);
2365 * vmw_execbuf_copy_fence_user - copy fence object information to
2368 * @dev_priv: Pointer to a vmw_private struct.
2369 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2370 * @ret: Return value from fence object creation.
2371 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2372 * which the information should be copied.
2373 * @fence: Pointer to the fenc object.
2374 * @fence_handle: User-space fence handle.
2376 * This function copies fence information to user-space. If copying fails,
2377 * The user-space struct drm_vmw_fence_rep::error member is hopefully
2378 * left untouched, and if it's preloaded with an -EFAULT by user-space,
2379 * the error will hopefully be detected.
2380 * Also if copying fails, user-space will be unable to signal the fence
2381 * object so we wait for it immediately, and then unreference the
2382 * user-space reference.
2385 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2386 struct vmw_fpriv *vmw_fp,
2388 struct drm_vmw_fence_rep __user *user_fence_rep,
2389 struct vmw_fence_obj *fence,
2390 uint32_t fence_handle)
2392 struct drm_vmw_fence_rep fence_rep;
2394 if (user_fence_rep == NULL)
2397 memset(&fence_rep, 0, sizeof(fence_rep));
2399 fence_rep.error = ret;
2401 BUG_ON(fence == NULL);
2403 fence_rep.handle = fence_handle;
2404 fence_rep.seqno = fence->base.seqno;
2405 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2406 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2410 * copy_to_user errors will be detected by user space not
2411 * seeing fence_rep::error filled in. Typically
2412 * user-space would have pre-set that member to -EFAULT.
2414 ret = copy_to_user(user_fence_rep, &fence_rep,
2418 * User-space lost the fence object. We need to sync
2419 * and unreference the handle.
2421 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2422 ttm_ref_object_base_unref(vmw_fp->tfile,
2423 fence_handle, TTM_REF_USAGE);
2424 DRM_ERROR("Fence copy error. Syncing.\n");
2425 (void) vmw_fence_obj_wait(fence, false, false,
2426 VMW_FENCE_WAIT_TIMEOUT);
2431 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
2434 * @dev_priv: Pointer to a device private structure.
2435 * @kernel_commands: Pointer to the unpatched command batch.
2436 * @command_size: Size of the unpatched command batch.
2437 * @sw_context: Structure holding the relocation lists.
2439 * Side effects: If this function returns 0, then the command batch
2440 * pointed to by @kernel_commands will have been modified.
2442 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
2443 void *kernel_commands,
2445 struct vmw_sw_context *sw_context)
2447 void *cmd = vmw_fifo_reserve(dev_priv, command_size);
2450 DRM_ERROR("Failed reserving fifo space for commands.\n");
2454 vmw_apply_relocations(sw_context);
2455 memcpy(cmd, kernel_commands, command_size);
2456 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2457 vmw_resource_relocations_free(&sw_context->res_relocations);
2458 vmw_fifo_commit(dev_priv, command_size);
2464 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
2465 * the command buffer manager.
2467 * @dev_priv: Pointer to a device private structure.
2468 * @header: Opaque handle to the command buffer allocation.
2469 * @command_size: Size of the unpatched command batch.
2470 * @sw_context: Structure holding the relocation lists.
2472 * Side effects: If this function returns 0, then the command buffer
2473 * represented by @header will have been modified.
2475 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
2476 struct vmw_cmdbuf_header *header,
2478 struct vmw_sw_context *sw_context)
2480 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
2481 SVGA3D_INVALID_ID, false, header);
2483 vmw_apply_relocations(sw_context);
2484 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2485 vmw_resource_relocations_free(&sw_context->res_relocations);
2486 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
2492 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
2493 * submission using a command buffer.
2495 * @dev_priv: Pointer to a device private structure.
2496 * @user_commands: User-space pointer to the commands to be submitted.
2497 * @command_size: Size of the unpatched command batch.
2498 * @header: Out parameter returning the opaque pointer to the command buffer.
2500 * This function checks whether we can use the command buffer manager for
2501 * submission and if so, creates a command buffer of suitable size and
2502 * copies the user data into that buffer.
2504 * On successful return, the function returns a pointer to the data in the
2505 * command buffer and *@header is set to non-NULL.
2506 * If command buffers could not be used, the function will return the value
2507 * of @kernel_commands on function call. That value may be NULL. In that case,
2508 * the value of *@header will be set to NULL.
2509 * If an error is encountered, the function will return a pointer error value.
2510 * If the function is interrupted by a signal while sleeping, it will return
2511 * -ERESTARTSYS casted to a pointer error value.
2513 void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
2514 void __user *user_commands,
2515 void *kernel_commands,
2517 struct vmw_cmdbuf_header **header)
2523 if (!dev_priv->cman || kernel_commands)
2524 return kernel_commands;
2526 if (command_size > SVGA_CB_MAX_SIZE) {
2527 DRM_ERROR("Command buffer is too large.\n");
2528 return ERR_PTR(-EINVAL);
2531 /* If possible, add a little space for fencing. */
2532 cmdbuf_size = command_size + 512;
2533 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
2534 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
2536 if (IS_ERR(kernel_commands))
2537 return kernel_commands;
2539 ret = copy_from_user(kernel_commands, user_commands,
2542 DRM_ERROR("Failed copying commands.\n");
2543 vmw_cmdbuf_header_free(*header);
2545 return ERR_PTR(-EFAULT);
2548 return kernel_commands;
2551 int vmw_execbuf_process(struct drm_file *file_priv,
2552 struct vmw_private *dev_priv,
2553 void __user *user_commands,
2554 void *kernel_commands,
2555 uint32_t command_size,
2556 uint64_t throttle_us,
2558 struct drm_vmw_fence_rep __user *user_fence_rep,
2559 struct vmw_fence_obj **out_fence)
2561 struct vmw_sw_context *sw_context = &dev_priv->ctx;
2562 struct vmw_fence_obj *fence = NULL;
2563 struct vmw_resource *error_resource;
2564 struct list_head resource_list;
2565 struct vmw_cmdbuf_header *header;
2566 struct ww_acquire_ctx ticket;
2571 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2578 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
2579 kernel_commands, command_size,
2581 if (IS_ERR(kernel_commands))
2582 return PTR_ERR(kernel_commands);
2584 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2587 goto out_free_header;
2590 sw_context->kernel = false;
2591 if (kernel_commands == NULL) {
2592 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2593 if (unlikely(ret != 0))
2597 ret = copy_from_user(sw_context->cmd_bounce,
2598 user_commands, command_size);
2600 if (unlikely(ret != 0)) {
2602 DRM_ERROR("Failed copying commands.\n");
2605 kernel_commands = sw_context->cmd_bounce;
2607 sw_context->kernel = true;
2609 sw_context->fp = vmw_fpriv(file_priv);
2610 sw_context->cur_reloc = 0;
2611 sw_context->cur_val_buf = 0;
2612 sw_context->quirks = quirks;
2613 INIT_LIST_HEAD(&sw_context->resource_list);
2614 sw_context->cur_query_bo = dev_priv->pinned_bo;
2615 sw_context->last_query_ctx = NULL;
2616 sw_context->needs_post_query_barrier = false;
2617 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2618 INIT_LIST_HEAD(&sw_context->validate_nodes);
2619 INIT_LIST_HEAD(&sw_context->res_relocations);
2620 if (!sw_context->res_ht_initialized) {
2621 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2622 if (unlikely(ret != 0))
2624 sw_context->res_ht_initialized = true;
2626 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2627 INIT_LIST_HEAD(&resource_list);
2628 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2630 if (unlikely(ret != 0))
2633 ret = vmw_resources_reserve(sw_context);
2634 if (unlikely(ret != 0))
2637 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
2639 if (unlikely(ret != 0))
2642 ret = vmw_validate_buffers(dev_priv, sw_context);
2643 if (unlikely(ret != 0))
2646 ret = vmw_resources_validate(sw_context);
2647 if (unlikely(ret != 0))
2650 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2651 if (unlikely(ret != 0)) {
2656 if (dev_priv->has_mob) {
2657 ret = vmw_rebind_contexts(sw_context);
2658 if (unlikely(ret != 0))
2659 goto out_unlock_binding;
2663 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
2664 command_size, sw_context);
2666 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
2671 goto out_unlock_binding;
2673 vmw_query_bo_switch_commit(dev_priv, sw_context);
2674 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2676 (user_fence_rep) ? &handle : NULL);
2678 * This error is harmless, because if fence submission fails,
2679 * vmw_fifo_send_fence will sync. The error will be propagated to
2680 * user-space in @fence_rep
2684 DRM_ERROR("Fence submission error. Syncing.\n");
2686 vmw_resource_list_unreserve(&sw_context->resource_list, false);
2687 mutex_unlock(&dev_priv->binding_mutex);
2689 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2692 if (unlikely(dev_priv->pinned_bo != NULL &&
2693 !dev_priv->query_cid_valid))
2694 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2696 vmw_clear_validations(sw_context);
2697 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2698 user_fence_rep, fence, handle);
2700 /* Don't unreference when handing fence out */
2701 if (unlikely(out_fence != NULL)) {
2704 } else if (likely(fence != NULL)) {
2705 vmw_fence_obj_unreference(&fence);
2708 list_splice_init(&sw_context->resource_list, &resource_list);
2709 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
2710 mutex_unlock(&dev_priv->cmdbuf_mutex);
2713 * Unreference resources outside of the cmdbuf_mutex to
2714 * avoid deadlocks in resource destruction paths.
2716 vmw_resource_list_unreference(&resource_list);
2721 mutex_unlock(&dev_priv->binding_mutex);
2723 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2725 vmw_resource_list_unreserve(&sw_context->resource_list, true);
2726 vmw_resource_relocations_free(&sw_context->res_relocations);
2727 vmw_free_relocations(sw_context);
2728 vmw_clear_validations(sw_context);
2729 if (unlikely(dev_priv->pinned_bo != NULL &&
2730 !dev_priv->query_cid_valid))
2731 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2733 list_splice_init(&sw_context->resource_list, &resource_list);
2734 error_resource = sw_context->error_resource;
2735 sw_context->error_resource = NULL;
2736 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
2737 mutex_unlock(&dev_priv->cmdbuf_mutex);
2740 * Unreference resources outside of the cmdbuf_mutex to
2741 * avoid deadlocks in resource destruction paths.
2743 vmw_resource_list_unreference(&resource_list);
2744 if (unlikely(error_resource != NULL))
2745 vmw_resource_unreference(&error_resource);
2748 vmw_cmdbuf_header_free(header);
2754 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2756 * @dev_priv: The device private structure.
2758 * This function is called to idle the fifo and unpin the query buffer
2759 * if the normal way to do this hits an error, which should typically be
2762 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2764 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2766 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2767 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2768 if (dev_priv->dummy_query_bo_pinned) {
2769 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
2770 dev_priv->dummy_query_bo_pinned = false;
2776 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2779 * @dev_priv: The device private structure.
2780 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2781 * _after_ a query barrier that flushes all queries touching the current
2782 * buffer pointed to by @dev_priv->pinned_bo
2784 * This function should be used to unpin the pinned query bo, or
2785 * as a query barrier when we need to make sure that all queries have
2786 * finished before the next fifo command. (For example on hardware
2787 * context destructions where the hardware may otherwise leak unfinished
2790 * This function does not return any failure codes, but make attempts
2791 * to do safe unpinning in case of errors.
2793 * The function will synchronize on the previous query barrier, and will
2794 * thus not finish until that barrier has executed.
2796 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2797 * before calling this function.
2799 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2800 struct vmw_fence_obj *fence)
2803 struct list_head validate_list;
2804 struct ttm_validate_buffer pinned_val, query_val;
2805 struct vmw_fence_obj *lfence = NULL;
2806 struct ww_acquire_ctx ticket;
2808 if (dev_priv->pinned_bo == NULL)
2811 INIT_LIST_HEAD(&validate_list);
2813 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
2814 pinned_val.shared = false;
2815 list_add_tail(&pinned_val.head, &validate_list);
2817 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
2818 query_val.shared = false;
2819 list_add_tail(&query_val.head, &validate_list);
2821 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
2823 if (unlikely(ret != 0)) {
2824 vmw_execbuf_unpin_panic(dev_priv);
2825 goto out_no_reserve;
2828 if (dev_priv->query_cid_valid) {
2829 BUG_ON(fence != NULL);
2830 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2831 if (unlikely(ret != 0)) {
2832 vmw_execbuf_unpin_panic(dev_priv);
2835 dev_priv->query_cid_valid = false;
2838 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2839 if (dev_priv->dummy_query_bo_pinned) {
2840 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
2841 dev_priv->dummy_query_bo_pinned = false;
2843 if (fence == NULL) {
2844 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2848 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2850 vmw_fence_obj_unreference(&lfence);
2852 ttm_bo_unref(&query_val.bo);
2853 ttm_bo_unref(&pinned_val.bo);
2854 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
2855 DRM_INFO("Dummy query bo pin count: %d\n",
2856 dev_priv->dummy_query_bo->pin_count);
2862 ttm_eu_backoff_reservation(&ticket, &validate_list);
2864 ttm_bo_unref(&query_val.bo);
2865 ttm_bo_unref(&pinned_val.bo);
2866 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
2870 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2873 * @dev_priv: The device private structure.
2875 * This function should be used to unpin the pinned query bo, or
2876 * as a query barrier when we need to make sure that all queries have
2877 * finished before the next fifo command. (For example on hardware
2878 * context destructions where the hardware may otherwise leak unfinished
2881 * This function does not return any failure codes, but make attempts
2882 * to do safe unpinning in case of errors.
2884 * The function will synchronize on the previous query barrier, and will
2885 * thus not finish until that barrier has executed.
2887 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2889 mutex_lock(&dev_priv->cmdbuf_mutex);
2890 if (dev_priv->query_cid_valid)
2891 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2892 mutex_unlock(&dev_priv->cmdbuf_mutex);
2896 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2897 struct drm_file *file_priv)
2899 struct vmw_private *dev_priv = vmw_priv(dev);
2900 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2904 * This will allow us to extend the ioctl argument while
2905 * maintaining backwards compatibility:
2906 * We take different code paths depending on the value of
2910 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2911 DRM_ERROR("Incorrect execbuf version.\n");
2912 DRM_ERROR("You're running outdated experimental "
2913 "vmwgfx user-space drivers.");
2917 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2918 if (unlikely(ret != 0))
2921 ret = vmw_execbuf_process(file_priv, dev_priv,
2922 (void __user *)(unsigned long)arg->commands,
2923 NULL, arg->command_size, arg->throttle_us,
2925 (void __user *)(unsigned long)arg->fence_rep,
2927 ttm_read_unlock(&dev_priv->reservation_sem);
2928 if (unlikely(ret != 0))
2931 vmw_kms_cursor_post_execbuf(dev_priv);