drm/vmwgfx: Initial DX support
authorThomas Hellstrom <thellstrom@vmware.com>
Mon, 10 Aug 2015 17:39:35 +0000 (10:39 -0700)
committerThomas Hellstrom <thellstrom@vmware.com>
Wed, 12 Aug 2015 17:06:32 +0000 (10:06 -0700)
Initial DX support.
Co-authored with Sinclair Yeh, Charmaine Lee and Jakob Bornecrantz.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Sinclair Yeh <syeh@vmware.com>
Signed-off-by: Charmaine Lee <charmainel@vmware.com>
22 files changed:
drivers/gpu/drm/vmwgfx/Makefile
drivers/gpu/drm/vmwgfx/vmwgfx_binding.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_binding.h [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/gpu/drm/vmwgfx/vmwgfx_so.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_so.h [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
include/uapi/drm/vmwgfx_drm.h

index 484093986d5afeb7a3c380443ae0108fb596b765..d281575bbe11ae090460d75fb371c8d49da1a36d 100644 (file)
@@ -8,5 +8,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
            vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
            vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
            vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
+           vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
new file mode 100644 (file)
index 0000000..9c42e96
--- /dev/null
@@ -0,0 +1,1294 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * This file implements the vmwgfx context binding manager,
+ * The sole reason for having to use this code is that vmware guest
+ * backed contexts can be swapped out to their backing mobs by the device
+ * at any time, also swapped in at any time. At swapin time, the device
+ * validates the context bindings to make sure they point to valid resources.
+ * It's this outside-of-drawcall validation (that can happen at any time),
+ * that makes this code necessary.
+ *
+ * We therefore need to kill any context bindings pointing to a resource
+ * when the resource is swapped out. Furthermore, if the vmwgfx driver has
+ * swapped out the context we can't swap it in again to kill bindings because
+ * of backing mob reservation lockdep violations, so as part of
+ * context swapout, also kill all bindings of a context, so that they are
+ * already killed if a resource to which a binding points
+ * needs to be swapped out.
+ *
+ * Note that a resource can be pointed to by bindings from multiple contexts,
+ * Therefore we can't easily protect this data by a per context mutex
+ * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
+ * to protect all binding manager data.
+ *
+ * Finally, any association between a context and a global resource
+ * (surface, shader or even DX query) is conceptually a context binding that
+ * needs to be tracked by this code.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
+#include "device_include/svga3d_reg.h"
+
+#define VMW_BINDING_RT_BIT     0
+#define VMW_BINDING_PS_BIT     1
+#define VMW_BINDING_SO_BIT     2
+#define VMW_BINDING_VB_BIT     3
+#define VMW_BINDING_NUM_BITS   4
+
+#define VMW_BINDING_PS_SR_BIT  0
+
+/**
+ * struct vmw_ctx_binding_state - per context binding state
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: linked list of individual active bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units bindings.
+ * @ds_view: Depth-stencil view binding.
+ * @so_targets: StreamOutput target bindings.
+ * @vertex_buffers: Vertex buffer bindings.
+ * @index_buffer: Index buffer binding.
+ * @per_shader: Per shader-type bindings.
+ * @dirty: Bitmap tracking per binding-type changes that have not yet
+ * been emitted to the device.
+ * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
+ * have not yet been emitted to the device.
+ * @bind_cmd_buffer: Scratch space used to construct binding commands.
+ * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
+ * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
+ * device binding slot of the first command data entry in @bind_cmd_buffer.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+       struct vmw_private *dev_priv;
+       struct list_head list;
+       struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
+       struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+       struct vmw_ctx_bindinfo_view ds_view;
+       struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
+       struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+       struct vmw_ctx_bindinfo_ib index_buffer;
+       struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
+
+       unsigned long dirty;
+       DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
+
+       u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
+       u32 bind_cmd_count;
+       u32 bind_first_slot;
+};
+
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+                                          bool rebind);
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
+                                      bool rebind);
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_binding_build_asserts(void) __attribute__ ((unused));
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
+
+/**
+ * struct vmw_binding_info - Per binding type information for the binding
+ * manager
+ *
+ * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
+ * @offsets: array[shader_slot] of offsets to the array[slot]
+ * of struct bindings for the binding type.
+ * @scrub_func: Pointer to the scrub function for this binding type.
+ *
+ * Holds static information to help optimize the binding manager and avoid
+ * an excessive amount of switch statements.
+ */
+struct vmw_binding_info {
+       size_t size;
+       const size_t *offsets;
+       vmw_scrub_func scrub_func;
+};
+
+/*
+ * A number of static variables that help determine the scrub func and the
+ * location of the struct vmw_ctx_bindinfo slots for each binding type.
+ */
+static const size_t vmw_binding_shader_offsets[] = {
+       offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
+       offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
+       offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
+};
+static const size_t vmw_binding_rt_offsets[] = {
+       offsetof(struct vmw_ctx_binding_state, render_targets),
+};
+static const size_t vmw_binding_tex_offsets[] = {
+       offsetof(struct vmw_ctx_binding_state, texture_units),
+};
+static const size_t vmw_binding_cb_offsets[] = {
+       offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
+       offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
+       offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
+};
+static const size_t vmw_binding_dx_ds_offsets[] = {
+       offsetof(struct vmw_ctx_binding_state, ds_view),
+};
+static const size_t vmw_binding_sr_offsets[] = {
+       offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
+       offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
+       offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
+};
+static const size_t vmw_binding_so_offsets[] = {
+       offsetof(struct vmw_ctx_binding_state, so_targets),
+};
+static const size_t vmw_binding_vb_offsets[] = {
+       offsetof(struct vmw_ctx_binding_state, vertex_buffers),
+};
+static const size_t vmw_binding_ib_offsets[] = {
+       offsetof(struct vmw_ctx_binding_state, index_buffer),
+};
+
+static const struct vmw_binding_info vmw_binding_infos[] = {
+       [vmw_ctx_binding_shader] = {
+               .size = sizeof(struct vmw_ctx_bindinfo_shader),
+               .offsets = vmw_binding_shader_offsets,
+               .scrub_func = vmw_binding_scrub_shader},
+       [vmw_ctx_binding_rt] = {
+               .size = sizeof(struct vmw_ctx_bindinfo_view),
+               .offsets = vmw_binding_rt_offsets,
+               .scrub_func = vmw_binding_scrub_render_target},
+       [vmw_ctx_binding_tex] = {
+               .size = sizeof(struct vmw_ctx_bindinfo_tex),
+               .offsets = vmw_binding_tex_offsets,
+               .scrub_func = vmw_binding_scrub_texture},
+       [vmw_ctx_binding_cb] = {
+               .size = sizeof(struct vmw_ctx_bindinfo_cb),
+               .offsets = vmw_binding_cb_offsets,
+               .scrub_func = vmw_binding_scrub_cb},
+       [vmw_ctx_binding_dx_shader] = {
+               .size = sizeof(struct vmw_ctx_bindinfo_shader),
+               .offsets = vmw_binding_shader_offsets,
+               .scrub_func = vmw_binding_scrub_dx_shader},
+       [vmw_ctx_binding_dx_rt] = {
+               .size = sizeof(struct vmw_ctx_bindinfo_view),
+               .offsets = vmw_binding_rt_offsets,
+               .scrub_func = vmw_binding_scrub_dx_rt},
+       [vmw_ctx_binding_sr] = {
+               .size = sizeof(struct vmw_ctx_bindinfo_view),
+               .offsets = vmw_binding_sr_offsets,
+               .scrub_func = vmw_binding_scrub_sr},
+       [vmw_ctx_binding_ds] = {
+               .size = sizeof(struct vmw_ctx_bindinfo_view),
+               .offsets = vmw_binding_dx_ds_offsets,
+               .scrub_func = vmw_binding_scrub_dx_rt},
+       [vmw_ctx_binding_so] = {
+               .size = sizeof(struct vmw_ctx_bindinfo_so),
+               .offsets = vmw_binding_so_offsets,
+               .scrub_func = vmw_binding_scrub_so},
+       [vmw_ctx_binding_vb] = {
+               .size = sizeof(struct vmw_ctx_bindinfo_vb),
+               .offsets = vmw_binding_vb_offsets,
+               .scrub_func = vmw_binding_scrub_vb},
+       [vmw_ctx_binding_ib] = {
+               .size = sizeof(struct vmw_ctx_bindinfo_ib),
+               .offsets = vmw_binding_ib_offsets,
+               .scrub_func = vmw_binding_scrub_ib},
+};
+
+/**
+ * vmw_cbs_context - Return a pointer to the context resource of a
+ * context binding state tracker.
+ *
+ * @cbs: The context binding state tracker.
+ *
+ * Provided there are any active bindings, this function will return an
+ * unreferenced pointer to the context resource that owns the context
+ * binding state tracker. If there are no active bindings, this function
+ * will return NULL. Note that the caller must somehow ensure that a reference
+ * is held on the context resource prior to calling this function.
+ */
+static const struct vmw_resource *
+vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
+{
+       if (list_empty(&cbs->list))
+               return NULL;
+
+       return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
+                               ctx_list)->ctx;
+}
+
+/**
+ * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
+ *
+ * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
+ * @bt: The binding type.
+ * @shader_slot: The shader slot of the binding. If none, then set to 0.
+ * @slot: The slot of the binding.
+ */
+static struct vmw_ctx_bindinfo *
+vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
+               enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
+{
+       const struct vmw_binding_info *b = &vmw_binding_infos[bt];
+       size_t offset = b->offsets[shader_slot] + b->size*slot;
+
+       return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
+}
+
+/**
+ * vmw_binding_drop: Stop tracking a context binding
+ *
+ * @bi: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
+{
+       list_del(&bi->ctx_list);
+       if (!list_empty(&bi->res_list))
+               list_del(&bi->res_list);
+       bi->ctx = NULL;
+}
+
+/**
+ * vmw_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+                   const struct vmw_ctx_bindinfo *bi,
+                   u32 shader_slot, u32 slot)
+{
+       struct vmw_ctx_bindinfo *loc =
+               vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
+       const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
+
+       if (loc->ctx != NULL)
+               vmw_binding_drop(loc);
+
+       memcpy(loc, bi, b->size);
+       loc->scrubbed = false;
+       list_add(&loc->ctx_list, &cbs->list);
+       INIT_LIST_HEAD(&loc->res_list);
+}
+
+/**
+ * vmw_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
+                                const struct vmw_ctx_binding_state *from,
+                                const struct vmw_ctx_bindinfo *bi)
+{
+       size_t offset = (unsigned long)bi - (unsigned long)from;
+       struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
+               ((unsigned long) cbs + offset);
+
+       if (loc->ctx != NULL) {
+               WARN_ON(bi->scrubbed);
+
+               vmw_binding_drop(loc);
+       }
+
+       if (bi->res != NULL) {
+               memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
+               list_add_tail(&loc->ctx_list, &cbs->list);
+               list_add_tail(&loc->res_list, &loc->res->binding_head);
+       }
+}
+
+/**
+ * vmw_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+       struct vmw_ctx_bindinfo *entry, *next;
+
+       vmw_binding_state_scrub(cbs);
+       list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+               vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+       struct vmw_ctx_bindinfo *entry;
+
+       list_for_each_entry(entry, &cbs->list, ctx_list) {
+               if (!entry->scrubbed) {
+                       (void) vmw_binding_infos[entry->bt].scrub_func
+                               (entry, false);
+                       entry->scrubbed = true;
+               }
+       }
+
+       (void) vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_binding_res_list_kill(struct list_head *head)
+{
+       struct vmw_ctx_bindinfo *entry, *next;
+
+       vmw_binding_res_list_scrub(head);
+       list_for_each_entry_safe(entry, next, head, res_list)
+               vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_binding_res_list_scrub(struct list_head *head)
+{
+       struct vmw_ctx_bindinfo *entry;
+
+       list_for_each_entry(entry, head, res_list) {
+               if (!entry->scrubbed) {
+                       (void) vmw_binding_infos[entry->bt].scrub_func
+                               (entry, false);
+                       entry->scrubbed = true;
+               }
+       }
+
+       list_for_each_entry(entry, head, res_list) {
+               struct vmw_ctx_binding_state *cbs =
+                       vmw_context_binding_state(entry->ctx);
+
+               (void) vmw_binding_emit_dirty(cbs);
+       }
+}
+
+
+/**
+ * vmw_binding_state_commit - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ * @scrubbed: Transfer only scrubbed bindings.
+ *
+ * Transfers binding info from a temporary structure
+ * (typically used by execbuf) to the persistent
+ * structure in the context. This can be done once commands have been
+ * submitted to hardware
+ */
+void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+                             struct vmw_ctx_binding_state *from)
+{
+       struct vmw_ctx_bindinfo *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
+               vmw_binding_transfer(to, from, entry);
+               vmw_binding_drop(entry);
+       }
+}
+
+/**
+ * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
+{
+       struct vmw_ctx_bindinfo *entry;
+       int ret;
+
+       list_for_each_entry(entry, &cbs->list, ctx_list) {
+               if (likely(!entry->scrubbed))
+                       continue;
+
+               if ((entry->res == NULL || entry->res->id ==
+                           SVGA3D_INVALID_ID))
+                       continue;
+
+               ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               entry->scrubbed = false;
+       }
+
+       return vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+       struct vmw_ctx_bindinfo_shader *binding =
+               container_of(bi, typeof(*binding), bi);
+       struct vmw_private *dev_priv = bi->ctx->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdSetShader body;
+       } *cmd;
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for shader "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = bi->ctx->id;
+       cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+       cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+/**
+ * vmw_binding_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+                                          bool rebind)
+{
+       struct vmw_ctx_bindinfo_view *binding =
+               container_of(bi, typeof(*binding), bi);
+       struct vmw_private *dev_priv = bi->ctx->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdSetRenderTarget body;
+       } *cmd;
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for render target "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = bi->ctx->id;
+       cmd->body.type = binding->slot;
+       cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+       cmd->body.target.face = 0;
+       cmd->body.target.mipmap = 0;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+/**
+ * vmw_binding_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
+                                    bool rebind)
+{
+       struct vmw_ctx_bindinfo_tex *binding =
+               container_of(bi, typeof(*binding), bi);
+       struct vmw_private *dev_priv = bi->ctx->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               struct {
+                       SVGA3dCmdSetTextureState c;
+                       SVGA3dTextureState s1;
+               } body;
+       } *cmd;
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for texture "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.c.cid = bi->ctx->id;
+       cmd->body.s1.stage = binding->texture_stage;
+       cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+       cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+       struct vmw_ctx_bindinfo_shader *binding =
+               container_of(bi, typeof(*binding), bi);
+       struct vmw_private *dev_priv = bi->ctx->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetShader body;
+       } *cmd;
+
+       cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for DX shader "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+       cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+       cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+/**
+ * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+       struct vmw_ctx_bindinfo_cb *binding =
+               container_of(bi, typeof(*binding), bi);
+       struct vmw_private *dev_priv = bi->ctx->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetSingleConstantBuffer body;
+       } *cmd;
+
+       cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for DX shader "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.slot = binding->slot;
+       cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+       if (rebind) {
+               cmd->body.offsetInBytes = binding->offset;
+               cmd->body.sizeInBytes = binding->size;
+               cmd->body.sid = bi->res->id;
+       } else {
+               cmd->body.offsetInBytes = 0;
+               cmd->body.sizeInBytes = 0;
+               cmd->body.sid = SVGA3D_INVALID_ID;
+       }
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+/**
+ * vmw_collect_view_ids - Build view id data for a view binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of view id data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
+                                const struct vmw_ctx_bindinfo *bi,
+                                u32 max_num)
+{
+       const struct vmw_ctx_bindinfo_view *biv =
+               container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+       unsigned long i;
+
+       cbs->bind_cmd_count = 0;
+       cbs->bind_first_slot = 0;
+
+       for (i = 0; i < max_num; ++i, ++biv) {
+               if (!biv->bi.ctx)
+                       break;
+
+               cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+                       ((biv->bi.scrubbed) ?
+                        SVGA3D_INVALID_ID : biv->bi.res->id);
+       }
+}
+
+/**
+ * vmw_collect_dirty_view_ids - Build view id data for a view binding command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of view id data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
+                                      const struct vmw_ctx_bindinfo *bi,
+                                      unsigned long *dirty,
+                                      u32 max_num)
+{
+       const struct vmw_ctx_bindinfo_view *biv =
+               container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+       unsigned long i, next_bit;
+
+       cbs->bind_cmd_count = 0;
+       i = find_first_bit(dirty, max_num);
+       next_bit = i;
+       cbs->bind_first_slot = i;
+
+       biv += i;
+       for (; i < max_num; ++i, ++biv) {
+               cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+                       ((!biv->bi.ctx || biv->bi.scrubbed) ?
+                        SVGA3D_INVALID_ID : biv->bi.res->id);
+
+               if (next_bit == i) {
+                       next_bit = find_next_bit(dirty, max_num, i + 1);
+                       if (next_bit >= max_num)
+                               break;
+               }
+       }
+}
+
+/**
+ * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
+                          int shader_slot)
+{
+       const struct vmw_ctx_bindinfo *loc =
+               &cbs->per_shader[shader_slot].shader_res[0].bi;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetShaderResources body;
+       } *cmd;
+       size_t cmd_size, view_id_size;
+       const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+       vmw_collect_dirty_view_ids(cbs, loc,
+                                  cbs->per_shader[shader_slot].dirty_sr,
+                                  SVGA3D_DX_MAX_SRVIEWS);
+       if (cbs->bind_cmd_count == 0)
+               return 0;
+
+       view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+       cmd_size = sizeof(*cmd) + view_id_size;
+       cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for DX shader"
+                         " resource binding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
+       cmd->header.size = sizeof(cmd->body) + view_id_size;
+       cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
+       cmd->body.startView = cbs->bind_first_slot;
+
+       memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+       vmw_fifo_commit(ctx->dev_priv, cmd_size);
+       bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
+                    cbs->bind_first_slot, cbs->bind_cmd_count);
+
+       return 0;
+}
+
+/**
+ * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
+{
+       const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetRenderTargets body;
+       } *cmd;
+       size_t cmd_size, view_id_size;
+       const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+       vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
+       view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+       cmd_size = sizeof(*cmd) + view_id_size;
+       cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for DX render-target"
+                         " binding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
+       cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+       if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
+               cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
+       else
+               cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
+
+       memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+       vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+       return 0;
+
+}
+
+/**
+ * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
+                                  const struct vmw_ctx_bindinfo *bi,
+                                  u32 max_num)
+{
+       const struct vmw_ctx_bindinfo_so *biso =
+               container_of(bi, struct vmw_ctx_bindinfo_so, bi);
+       unsigned long i;
+       SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
+
+       cbs->bind_cmd_count = 0;
+       cbs->bind_first_slot = 0;
+
+       for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
+                   ++cbs->bind_cmd_count) {
+               if (!biso->bi.ctx)
+                       break;
+
+               if (!biso->bi.scrubbed) {
+                       so_buffer->sid = biso->bi.res->id;
+                       so_buffer->offset = biso->offset;
+                       so_buffer->sizeInBytes = biso->size;
+               } else {
+                       so_buffer->sid = SVGA3D_INVALID_ID;
+                       so_buffer->offset = 0;
+                       so_buffer->sizeInBytes = 0;
+               }
+       }
+}
+
+/**
+ * vmw_binding_emit_set_so - Issue delayed streamout binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
+{
+       const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetSOTargets body;
+       } *cmd;
+       size_t cmd_size, so_target_size;
+       const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+       vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
+       if (cbs->bind_cmd_count == 0)
+               return 0;
+
+       so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
+       cmd_size = sizeof(*cmd) + so_target_size;
+       cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for DX SO target"
+                         " binding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
+       cmd->header.size = sizeof(cmd->body) + so_target_size;
+       memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
+
+       vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+       return 0;
+
+}
+
+/**
+ * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
+{
+       struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
+       u32 i;
+       int ret;
+
+       for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
+               if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
+                       continue;
+
+               ret = vmw_emit_set_sr(cbs, i);
+               if (ret)
+                       break;
+
+               __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
+ * SVGA3dCmdDXSetVertexBuffers command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of SVGA3dVertexBuffer data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
+                                 const struct vmw_ctx_bindinfo *bi,
+                                 unsigned long *dirty,
+                                 u32 max_num)
+{
+       const struct vmw_ctx_bindinfo_vb *biv =
+               container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+       unsigned long i, next_bit;
+       SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
+
+       cbs->bind_cmd_count = 0;
+       i = find_first_bit(dirty, max_num);
+       next_bit = i;
+       cbs->bind_first_slot = i;
+
+       biv += i;
+       for (; i < max_num; ++i, ++biv, ++vbs) {
+               if (!biv->bi.ctx || biv->bi.scrubbed) {
+                       vbs->sid = SVGA3D_INVALID_ID;
+                       vbs->stride = 0;
+                       vbs->offset = 0;
+               } else {
+                       vbs->sid = biv->bi.res->id;
+                       vbs->stride = biv->stride;
+                       vbs->offset = biv->offset;
+               }
+               cbs->bind_cmd_count++;
+               if (next_bit == i) {
+                       next_bit = find_next_bit(dirty, max_num, i + 1);
+                       if (next_bit >= max_num)
+                               break;
+               }
+       }
+}
+
+/**
+ * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
+{
+       const struct vmw_ctx_bindinfo *loc =
+               &cbs->vertex_buffers[0].bi;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetVertexBuffers body;
+       } *cmd;
+       size_t cmd_size, set_vb_size;
+       const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+       vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
+                            SVGA3D_DX_MAX_VERTEXBUFFERS);
+       if (cbs->bind_cmd_count == 0)
+               return 0;
+
+       set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
+       cmd_size = sizeof(*cmd) + set_vb_size;
+       cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
+                         " binding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
+       cmd->header.size = sizeof(cmd->body) + set_vb_size;
+       cmd->body.startBuffer = cbs->bind_first_slot;
+
+       memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
+
+       vmw_fifo_commit(ctx->dev_priv, cmd_size);
+       bitmap_clear(cbs->dirty_vb,
+                    cbs->bind_first_slot, cbs->bind_cmd_count);
+
+       return 0;
+}
+
+/**
+ * vmw_binding_emit_dirty - Issue delayed binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ * This function issues the delayed binding commands that arise from
+ * previous scrub / unscrub calls. These binding commands are typically
+ * commands that batch a number of bindings and therefore it makes sense
+ * to delay them.
+ */
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
+{
+       int ret = 0;
+       unsigned long hit = 0;
+
+       while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
+             < VMW_BINDING_NUM_BITS) {
+
+               switch (hit) {
+               case VMW_BINDING_RT_BIT:
+                       ret = vmw_emit_set_rt(cbs);
+                       break;
+               case VMW_BINDING_PS_BIT:
+                       ret = vmw_binding_emit_dirty_ps(cbs);
+                       break;
+               case VMW_BINDING_SO_BIT:
+                       ret = vmw_emit_set_so(cbs);
+                       break;
+               case VMW_BINDING_VB_BIT:
+                       ret = vmw_emit_set_vb(cbs);
+                       break;
+               default:
+                       BUG();
+               }
+               if (ret)
+                       return ret;
+
+               __clear_bit(hit, &cbs->dirty);
+               hit++;
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+       struct vmw_ctx_bindinfo_view *biv =
+               container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+       struct vmw_ctx_binding_state *cbs =
+               vmw_context_binding_state(bi->ctx);
+
+       __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
+       __set_bit(VMW_BINDING_PS_SR_BIT,
+                 &cbs->per_shader[biv->shader_slot].dirty);
+       __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
+
+       return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+       struct vmw_ctx_binding_state *cbs =
+               vmw_context_binding_state(bi->ctx);
+
+       __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
+
+       return 0;
+}
+
+/**
+ * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+       struct vmw_ctx_binding_state *cbs =
+               vmw_context_binding_state(bi->ctx);
+
+       __set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
+
+       return 0;
+}
+
+/**
+ * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+       struct vmw_ctx_bindinfo_vb *bivb =
+               container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+       struct vmw_ctx_binding_state *cbs =
+               vmw_context_binding_state(bi->ctx);
+
+       __set_bit(bivb->slot, cbs->dirty_vb);
+       __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
+
+       return 0;
+}
+
+/**
+ * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+       struct vmw_ctx_bindinfo_ib *binding =
+               container_of(bi, typeof(*binding), bi);
+       struct vmw_private *dev_priv = bi->ctx->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetIndexBuffer body;
+       } *cmd;
+
+       cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for DX index buffer "
+                         "binding.\n");
+               return -ENOMEM;
+       }
+       cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
+       cmd->header.size = sizeof(cmd->body);
+       if (rebind) {
+               cmd->body.sid = bi->res->id;
+               cmd->body.format = binding->format;
+               cmd->body.offset = binding->offset;
+       } else {
+               cmd->body.sid = SVGA3D_INVALID_ID;
+               cmd->body.format = 0;
+               cmd->body.offset = 0;
+       }
+
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+/**
+ * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
+ * memory accounting.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Returns a pointer to a newly allocated struct or an error pointer on error.
+ */
+struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv)
+{
+       struct vmw_ctx_binding_state *cbs;
+       int ret;
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
+                                  false, false);
+       if (ret)
+               return ERR_PTR(ret);
+
+       cbs = vzalloc(sizeof(*cbs));
+       if (!cbs) {
+               ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+               return ERR_PTR(-ENOMEM);
+       }
+
+       cbs->dev_priv = dev_priv;
+       INIT_LIST_HEAD(&cbs->list);
+
+       return cbs;
+}
+
+/**
+ * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
+ * memory accounting info.
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
+ */
+void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
+{
+       struct vmw_private *dev_priv = cbs->dev_priv;
+
+       vfree(cbs);
+       ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+}
+
+/**
+ * vmw_binding_state_list - Get the binding list of a
+ * struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state
+ *
+ * Returns the binding list which can be used to traverse through the bindings
+ * and access the resource information of all bindings.
+ */
+struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
+{
+       return &cbs->list;
+}
+
+/**
+ * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
+ *
+ * Drops all bindings registered in @cbs. No device binding actions are
+ * performed.
+ */
+void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
+{
+       struct vmw_ctx_bindinfo *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+               vmw_binding_drop(entry);
+}
+
+/*
+ * This function is unused at run-time, and only used to hold various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_binding_build_asserts(void)
+{
+       BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
+       BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
+       BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
+
+       /*
+        * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
+        * view id arrays.
+        */
+       BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
+       BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
+       BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
+
+       /*
+        * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
+        * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
+        */
+       BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
+                    VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+       BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
+                    VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
new file mode 100644 (file)
index 0000000..bf2e77a
--- /dev/null
@@ -0,0 +1,209 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _VMWGFX_BINDING_H_
+#define _VMWGFX_BINDING_H_
+
+#include "device_include/svga3d_reg.h"
+#include <linux/list.h>
+
+#define VMW_MAX_VIEW_BINDINGS 128
+
+struct vmw_private;
+struct vmw_ctx_binding_state;
+
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+       vmw_ctx_binding_shader,
+       vmw_ctx_binding_rt,
+       vmw_ctx_binding_tex,
+       vmw_ctx_binding_cb,
+       vmw_ctx_binding_dx_shader,
+       vmw_ctx_binding_dx_rt,
+       vmw_ctx_binding_sr,
+       vmw_ctx_binding_ds,
+       vmw_ctx_binding_so,
+       vmw_ctx_binding_vb,
+       vmw_ctx_binding_ib,
+       vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - single binding metadata
+ *
+ * @ctx_list: List head for the context's list of bindings.
+ * @res_list: List head for a resource's list of bindings.
+ * @ctx: Non-refcounted pointer to the context that owns the binding. NULL
+ * indicates no binding present.
+ * @res: Non-refcounted pointer to the resource the binding points to. This
+ * is typically a surface or a view.
+ * @bt: Binding type.
+ * @scrubbed: Whether the binding has been scrubbed from the context.
+ */
+struct vmw_ctx_bindinfo {
+       struct list_head ctx_list;
+       struct list_head res_list;
+       struct vmw_resource *ctx;
+       struct vmw_resource *res;
+       enum vmw_ctx_binding_type bt;
+       bool scrubbed;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_tex - texture stage binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @texture_stage: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_tex {
+       struct vmw_ctx_bindinfo bi;
+       uint32 texture_stage;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_shader - Shader binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_shader {
+       struct vmw_ctx_bindinfo bi;
+       SVGA3dShaderType shader_slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_cb {
+       struct vmw_ctx_bindinfo bi;
+       SVGA3dShaderType shader_slot;
+       uint32 offset;
+       uint32 size;
+       uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_view - View binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_view {
+       struct vmw_ctx_bindinfo bi;
+       SVGA3dShaderType shader_slot;
+       uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_so {
+       struct vmw_ctx_bindinfo bi;
+       uint32 offset;
+       uint32 size;
+       uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @stride: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_vb {
+       struct vmw_ctx_bindinfo bi;
+       uint32 offset;
+       uint32 stride;
+       uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @format: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_ib {
+       struct vmw_ctx_bindinfo bi;
+       uint32 offset;
+       uint32 format;
+};
+
+/**
+ * struct vmw_dx_shader_bindings - per shader type context binding state
+ *
+ * @shader: The shader binding for this shader type
+ * @const_buffer: Const buffer bindings for this shader type.
+ * @shader_res: Shader resource view bindings for this shader type.
+ * @dirty_sr: Bitmap tracking individual shader resource bindings changes
+ * that have not yet been emitted to the device.
+ * @dirty: Bitmap tracking per-binding type binding changes that have not
+ * yet been emitted to the device.
+ */
+struct vmw_dx_shader_bindings {
+       struct vmw_ctx_bindinfo_shader shader;
+       struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+       struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
+       DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
+       unsigned long dirty;
+};
+
+extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+                           const struct vmw_ctx_bindinfo *ci,
+                           u32 shader_slot, u32 slot);
+extern void
+vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+                        struct vmw_ctx_binding_state *from);
+extern void vmw_binding_res_list_kill(struct list_head *head);
+extern void vmw_binding_res_list_scrub(struct list_head *head);
+extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
+extern struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv);
+extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
+extern struct list_head *
+vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
+
+#endif
index 04fa8526b55eb41023e2cadb77ad3dd439c21bb3..5ae8f921da2a478bef55b617c3a28f66ed1e2773 100644 (file)
@@ -916,9 +916,8 @@ static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
 
        cur = man->cur;
        if (cur && (size + man->cur_pos > cur->size ||
-           (ctx_id != SVGA3D_INVALID_ID &&
-            (cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
-            ctx_id != cur->cb_header->dxContext)))
+                   ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
+                    ctx_id != cur->cb_header->dxContext)))
                __vmw_cmdbuf_cur_flush(man);
 
        if (!man->cur) {
index 21e9b7f8dad0b89aedeabae661cef83e6879952d..59d965f8b5304986d5fcbbd8779974e4226ff4e5 100644 (file)
  **************************************************************************/
 
 #include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
 
 #define VMW_CMDBUF_RES_MAN_HT_ORDER 12
 
-enum vmw_cmdbuf_res_state {
-       VMW_CMDBUF_RES_COMMITED,
-       VMW_CMDBUF_RES_ADD,
-       VMW_CMDBUF_RES_DEL
-};
-
 /**
  * struct vmw_cmdbuf_res - Command buffer managed resource entry.
  *
@@ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
 
        list_for_each_entry_safe(entry, next, list, head) {
                list_del(&entry->head);
+               if (entry->res->func->commit_notify)
+                       entry->res->func->commit_notify(entry->res,
+                                                       entry->state);
                switch (entry->state) {
                case VMW_CMDBUF_RES_ADD:
-                       entry->state = VMW_CMDBUF_RES_COMMITED;
+                       entry->state = VMW_CMDBUF_RES_COMMITTED;
                        list_add_tail(&entry->head, &entry->man->list);
                        break;
                case VMW_CMDBUF_RES_DEL:
@@ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
                                                 &entry->hash);
                        list_del(&entry->head);
                        list_add_tail(&entry->head, &entry->man->list);
-                       entry->state = VMW_CMDBUF_RES_COMMITED;
+                       entry->state = VMW_CMDBUF_RES_COMMITTED;
                        break;
                default:
                        BUG();
@@ -231,6 +229,9 @@ out_invalid_key:
  * @res_type: The resource type.
  * @user_key: The user-space id of the resource.
  * @list: The staging list.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
  *
  * This function looks up the struct vmw_cmdbuf_res entry from the manager
  * hash table and, if it exists, removes it. Depending on its current staging
@@ -240,7 +241,8 @@ out_invalid_key:
 int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
                          enum vmw_cmdbuf_res_type res_type,
                          u32 user_key,
-                         struct list_head *list)
+                         struct list_head *list,
+                         struct vmw_resource **res_p)
 {
        struct vmw_cmdbuf_res *entry;
        struct drm_hash_item *hash;
@@ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
        switch (entry->state) {
        case VMW_CMDBUF_RES_ADD:
                vmw_cmdbuf_res_free(man, entry);
+               *res_p = NULL;
                break;
-       case VMW_CMDBUF_RES_COMMITED:
+       case VMW_CMDBUF_RES_COMMITTED:
                (void) drm_ht_remove_item(&man->resources, &entry->hash);
                list_del(&entry->head);
                entry->state = VMW_CMDBUF_RES_DEL;
                list_add_tail(&entry->head, list);
+               *res_p = entry->res;
                break;
        default:
                BUG();
index 15f954423e7c7895b9a872177068dc40aac4a152..abfe67c893c77d7b884ef98f927cbfb35a9f122e 100644 (file)
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 #include "ttm/ttm_placement.h"
 
 struct vmw_user_context {
        struct ttm_base_object base;
        struct vmw_resource res;
-       struct vmw_ctx_binding_state cbs;
+       struct vmw_ctx_binding_state *cbs;
        struct vmw_cmdbuf_res_manager *man;
+       struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+       spinlock_t cotable_lock;
 };
 
-
-
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
-
 static void vmw_user_context_free(struct vmw_resource *res);
 static struct vmw_resource *
 vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -51,12 +50,14 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
                                 bool readback,
                                 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
-                                          bool rebind);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+static int vmw_dx_context_create(struct vmw_resource *res);
+static int vmw_dx_context_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_destroy(struct vmw_resource *res);
+
 static uint64_t vmw_user_context_size;
 
 static const struct vmw_user_resource_conv user_context_conv = {
@@ -93,15 +94,36 @@ static const struct vmw_res_func vmw_gb_context_func = {
        .unbind = vmw_gb_context_unbind
 };
 
-static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
-       [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
-       [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
-       [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+static const struct vmw_res_func vmw_dx_context_func = {
+       .res_type = vmw_res_dx_context,
+       .needs_backup = true,
+       .may_evict = true,
+       .type_name = "dx contexts",
+       .backup_placement = &vmw_mob_placement,
+       .create = vmw_dx_context_create,
+       .destroy = vmw_dx_context_destroy,
+       .bind = vmw_dx_context_bind,
+       .unbind = vmw_dx_context_unbind
+};
 
 /**
  * Context management:
  */
 
+static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+{
+       struct vmw_resource *res;
+       int i;
+
+       for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+               spin_lock(&uctx->cotable_lock);
+               res = uctx->cotables[i];
+               uctx->cotables[i] = NULL;
+               spin_unlock(&uctx->cotable_lock);
+               vmw_resource_unreference(&res);
+       }
+}
+
 static void vmw_hw_context_destroy(struct vmw_resource *res)
 {
        struct vmw_user_context *uctx =
@@ -113,17 +135,19 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
        } *cmd;
 
 
-       if (res->func->destroy == vmw_gb_context_destroy) {
+       if (res->func->destroy == vmw_gb_context_destroy ||
+           res->func->destroy == vmw_dx_context_destroy) {
                mutex_lock(&dev_priv->cmdbuf_mutex);
                vmw_cmdbuf_res_man_destroy(uctx->man);
                mutex_lock(&dev_priv->binding_mutex);
-               (void) vmw_context_binding_state_kill(&uctx->cbs);
-               (void) vmw_gb_context_destroy(res);
+               vmw_binding_state_kill(uctx->cbs);
+               (void) res->func->destroy(res);
                mutex_unlock(&dev_priv->binding_mutex);
                if (dev_priv->pinned_bo != NULL &&
                    !dev_priv->query_cid_valid)
                        __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
                mutex_unlock(&dev_priv->cmdbuf_mutex);
+               vmw_context_cotables_unref(uctx);
                return;
        }
 
@@ -144,16 +168,20 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
 }
 
 static int vmw_gb_context_init(struct vmw_private *dev_priv,
+                              bool dx,
                               struct vmw_resource *res,
-                              void (*res_free) (struct vmw_resource *res))
+                              void (*res_free)(struct vmw_resource *res))
 {
-       int ret;
+       int ret, i;
        struct vmw_user_context *uctx =
                container_of(res, struct vmw_user_context, res);
 
+       res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+                           SVGA3D_CONTEXT_DATA_SIZE);
        ret = vmw_resource_init(dev_priv, res, true,
-                               res_free, &vmw_gb_context_func);
-       res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+                               res_free,
+                               dx ? &vmw_dx_context_func :
+                               &vmw_gb_context_func);
        if (unlikely(ret != 0))
                goto out_err;
 
@@ -166,12 +194,32 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
                }
        }
 
-       memset(&uctx->cbs, 0, sizeof(uctx->cbs));
-       INIT_LIST_HEAD(&uctx->cbs.list);
+       uctx->cbs = vmw_binding_state_alloc(dev_priv);
+       if (IS_ERR(uctx->cbs)) {
+               ret = PTR_ERR(uctx->cbs);
+               goto out_err;
+       }
+
+       spin_lock_init(&uctx->cotable_lock);
+
+       if (dx) {
+               for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+                       uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
+                                                             &uctx->res, i);
+                       if (unlikely(uctx->cotables[i] == NULL)) {
+                               ret = -ENOMEM;
+                               goto out_cotables;
+                       }
+               }
+       }
+
+
 
        vmw_resource_activate(res, vmw_hw_context_destroy);
        return 0;
 
+out_cotables:
+       vmw_context_cotables_unref(uctx);
 out_err:
        if (res_free)
                res_free(res);
@@ -182,7 +230,8 @@ out_err:
 
 static int vmw_context_init(struct vmw_private *dev_priv,
                            struct vmw_resource *res,
-                           void (*res_free) (struct vmw_resource *res))
+                           void (*res_free)(struct vmw_resource *res),
+                           bool dx)
 {
        int ret;
 
@@ -192,7 +241,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
        } *cmd;
 
        if (dev_priv->has_mob)
-               return vmw_gb_context_init(dev_priv, res, res_free);
+               return vmw_gb_context_init(dev_priv, dx, res, res_free);
 
        ret = vmw_resource_init(dev_priv, res, false,
                                res_free, &vmw_legacy_context_func);
@@ -232,19 +281,10 @@ out_early:
        return ret;
 }
 
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
-       struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
-       int ret;
-
-       if (unlikely(res == NULL))
-               return NULL;
-
-       ret = vmw_context_init(dev_priv, res, NULL);
-
-       return (ret == 0) ? res : NULL;
-}
 
+/*
+ * GB context.
+ */
 
 static int vmw_gb_context_create(struct vmw_resource *res)
 {
@@ -309,7 +349,6 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
                          "binding.\n");
                return -ENOMEM;
        }
-
        cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
        cmd->header.size = sizeof(cmd->body);
        cmd->body.cid = res->id;
@@ -346,7 +385,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
        BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
        mutex_lock(&dev_priv->binding_mutex);
-       vmw_context_binding_state_scrub(&uctx->cbs);
+       vmw_binding_state_scrub(uctx->cbs);
 
        submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
@@ -419,6 +458,221 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
        return 0;
 }
 
+/*
+ * DX context.
+ */
+
+static int vmw_dx_context_create(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       int ret;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXDefineContext body;
+       } *cmd;
+
+       if (likely(res->id != -1))
+               return 0;
+
+       ret = vmw_resource_alloc_id(res);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to allocate a context id.\n");
+               goto out_no_id;
+       }
+
+       if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
+               ret = -EBUSY;
+               goto out_no_fifo;
+       }
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for context "
+                         "creation.\n");
+               ret = -ENOMEM;
+               goto out_no_fifo;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = res->id;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       vmw_fifo_resource_inc(dev_priv);
+
+       return 0;
+
+out_no_fifo:
+       vmw_resource_release_id(res);
+out_no_id:
+       return ret;
+}
+
+static int vmw_dx_context_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXBindContext body;
+       } *cmd;
+       struct ttm_buffer_object *bo = val_buf->bo;
+
+       BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for context "
+                         "binding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = res->id;
+       cmd->body.mobid = bo->mem.start;
+       cmd->body.validContents = res->backup_dirty;
+       res->backup_dirty = false;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+
+       return 0;
+}
+
+/**
+ * vmw_dx_context_scrub_cotables - Scrub all bindings and
+ * cotables from a context
+ *
+ * @ctx: Pointer to the context resource
+ * @readback: Whether to save the otable contents on scrubbing.
+ *
+ * COtables must be unbound before their context, but unbinding requires
+ * the backup buffer being reserved, whereas scrubbing does not.
+ * This function scrubs all cotables of a context, potentially reading back
+ * the contents into their backup buffers. However, scrubbing cotables
+ * also makes the device context invalid, so scrub all bindings first so
+ * that doesn't have to be done later with an invalid context.
+ */
+void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+                                  bool readback)
+{
+       struct vmw_user_context *uctx =
+               container_of(ctx, struct vmw_user_context, res);
+       int i;
+
+       vmw_binding_state_scrub(uctx->cbs);
+       for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+               struct vmw_resource *res;
+
+               /* Avoid racing with ongoing cotable destruction. */
+               spin_lock(&uctx->cotable_lock);
+               res = uctx->cotables[vmw_cotable_scrub_order[i]];
+               if (res)
+                       res = vmw_resource_reference_unless_doomed(res);
+               spin_unlock(&uctx->cotable_lock);
+               if (!res)
+                       continue;
+
+               WARN_ON(vmw_cotable_scrub(res, readback));
+               vmw_resource_unreference(&res);
+       }
+}
+
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct ttm_buffer_object *bo = val_buf->bo;
+       struct vmw_fence_obj *fence;
+
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXReadbackContext body;
+       } *cmd1;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXBindContext body;
+       } *cmd2;
+       uint32_t submit_size;
+       uint8_t *cmd;
+
+
+       BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+       mutex_lock(&dev_priv->binding_mutex);
+       vmw_dx_context_scrub_cotables(res, readback);
+
+       submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+       cmd = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for context "
+                         "unbinding.\n");
+               mutex_unlock(&dev_priv->binding_mutex);
+               return -ENOMEM;
+       }
+
+       cmd2 = (void *) cmd;
+       if (readback) {
+               cmd1 = (void *) cmd;
+               cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
+               cmd1->header.size = sizeof(cmd1->body);
+               cmd1->body.cid = res->id;
+               cmd2 = (void *) (&cmd1[1]);
+       }
+       cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+       cmd2->header.size = sizeof(cmd2->body);
+       cmd2->body.cid = res->id;
+       cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+       vmw_fifo_commit(dev_priv, submit_size);
+       mutex_unlock(&dev_priv->binding_mutex);
+
+       /*
+        * Create a fence object and fence the backup buffer.
+        */
+
+       (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+                                         &fence, NULL);
+
+       vmw_fence_single_bo(bo, fence);
+
+       if (likely(fence != NULL))
+               vmw_fence_obj_unreference(&fence);
+
+       return 0;
+}
+
+static int vmw_dx_context_destroy(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXDestroyContext body;
+       } *cmd;
+
+       if (likely(res->id == -1))
+               return 0;
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for context "
+                         "destruction.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = res->id;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       if (dev_priv->query_cid == res->id)
+               dev_priv->query_cid_valid = false;
+       vmw_resource_release_id(res);
+       vmw_fifo_resource_dec(dev_priv);
+
+       return 0;
+}
+
 /**
  * User-space context management:
  */
@@ -435,6 +689,8 @@ static void vmw_user_context_free(struct vmw_resource *res)
            container_of(res, struct vmw_user_context, res);
        struct vmw_private *dev_priv = res->dev_priv;
 
+       if (ctx->cbs)
+               vmw_binding_state_free(ctx->cbs);
        ttm_base_object_kfree(ctx, base);
        ttm_mem_global_free(vmw_mem_glob(dev_priv),
                            vmw_user_context_size);
@@ -465,8 +721,8 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
        return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
 }
 
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-                            struct drm_file *file_priv)
+static int vmw_context_define(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv, bool dx)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
        struct vmw_user_context *ctx;
@@ -476,6 +732,10 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        int ret;
 
+       if (!dev_priv->has_dx && dx) {
+               DRM_ERROR("DX contexts not supported by device.\n");
+               return -EINVAL;
+       }
 
        /*
         * Approximate idr memory usage with 128 bytes. It will be limited
@@ -516,7 +776,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
         * From here on, the destructor takes over resource freeing.
         */
 
-       ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+       ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
        if (unlikely(ret != 0))
                goto out_unlock;
 
@@ -535,387 +795,74 @@ out_err:
 out_unlock:
        ttm_read_unlock(&dev_priv->reservation_sem);
        return ret;
-
-}
-
-/**
- * vmw_context_scrub_shader - scrub a shader binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
-{
-       struct vmw_private *dev_priv = bi->ctx->dev_priv;
-       struct {
-               SVGA3dCmdHeader header;
-               SVGA3dCmdSetShader body;
-       } *cmd;
-
-       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-       if (unlikely(cmd == NULL)) {
-               DRM_ERROR("Failed reserving FIFO space for shader "
-                         "unbinding.\n");
-               return -ENOMEM;
-       }
-
-       cmd->header.id = SVGA_3D_CMD_SET_SHADER;
-       cmd->header.size = sizeof(cmd->body);
-       cmd->body.cid = bi->ctx->id;
-       cmd->body.type = bi->i1.shader_type;
-       cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-       vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-       return 0;
-}
-
-/**
- * vmw_context_scrub_render_target - scrub a render target binding
- * from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
-                                          bool rebind)
-{
-       struct vmw_private *dev_priv = bi->ctx->dev_priv;
-       struct {
-               SVGA3dCmdHeader header;
-               SVGA3dCmdSetRenderTarget body;
-       } *cmd;
-
-       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-       if (unlikely(cmd == NULL)) {
-               DRM_ERROR("Failed reserving FIFO space for render target "
-                         "unbinding.\n");
-               return -ENOMEM;
-       }
-
-       cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
-       cmd->header.size = sizeof(cmd->body);
-       cmd->body.cid = bi->ctx->id;
-       cmd->body.type = bi->i1.rt_type;
-       cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-       cmd->body.target.face = 0;
-       cmd->body.target.mipmap = 0;
-       vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-       return 0;
-}
-
-/**
- * vmw_context_scrub_texture - scrub a texture binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- *
- * TODO: Possibly complement this function with a function that takes
- * a list of texture bindings and combines them to a single command.
- */
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
-                                    bool rebind)
-{
-       struct vmw_private *dev_priv = bi->ctx->dev_priv;
-       struct {
-               SVGA3dCmdHeader header;
-               struct {
-                       SVGA3dCmdSetTextureState c;
-                       SVGA3dTextureState s1;
-               } body;
-       } *cmd;
-
-       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-       if (unlikely(cmd == NULL)) {
-               DRM_ERROR("Failed reserving FIFO space for texture "
-                         "unbinding.\n");
-               return -ENOMEM;
-       }
-
-
-       cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
-       cmd->header.size = sizeof(cmd->body);
-       cmd->body.c.cid = bi->ctx->id;
-       cmd->body.s1.stage = bi->i1.texture_stage;
-       cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
-       cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-       vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-       return 0;
 }
 
-/**
- * vmw_context_binding_drop: Stop tracking a context binding
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Stops tracking a context binding, and re-initializes its storage.
- * Typically used when the context binding is replaced with a binding to
- * another (or the same, for that matter) resource.
- */
-static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
 {
-       list_del(&cb->ctx_list);
-       if (!list_empty(&cb->res_list))
-               list_del(&cb->res_list);
-       cb->bi.ctx = NULL;
+       return vmw_context_define(dev, data, file_priv, false);
 }
 
-/**
- * vmw_context_binding_add: Start tracking a context binding
- *
- * @cbs: Pointer to the context binding state tracker.
- * @bi: Information about the binding to track.
- *
- * Performs basic checks on the binding to make sure arguments are within
- * bounds and then starts tracking the binding in the context binding
- * state structure @cbs.
- */
-int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-                           const struct vmw_ctx_bindinfo *bi)
+int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+                                     struct drm_file *file_priv)
 {
-       struct vmw_ctx_binding *loc;
-
-       switch (bi->bt) {
-       case vmw_ctx_binding_rt:
-               if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
-                       DRM_ERROR("Illegal render target type %u.\n",
-                                 (unsigned) bi->i1.rt_type);
-                       return -EINVAL;
-               }
-               loc = &cbs->render_targets[bi->i1.rt_type];
-               break;
-       case vmw_ctx_binding_tex:
-               if (unlikely((unsigned)bi->i1.texture_stage >=
-                            SVGA3D_NUM_TEXTURE_UNITS)) {
-                       DRM_ERROR("Illegal texture/sampler unit %u.\n",
-                                 (unsigned) bi->i1.texture_stage);
-                       return -EINVAL;
-               }
-               loc = &cbs->texture_units[bi->i1.texture_stage];
-               break;
-       case vmw_ctx_binding_shader:
-               if (unlikely((unsigned)bi->i1.shader_type >=
-                            SVGA3D_SHADERTYPE_PREDX_MAX)) {
-                       DRM_ERROR("Illegal shader type %u.\n",
-                                 (unsigned) bi->i1.shader_type);
-                       return -EINVAL;
-               }
-               loc = &cbs->shaders[bi->i1.shader_type];
-               break;
+       union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
+       struct drm_vmw_context_arg *rep = &arg->rep;
+
+       switch (arg->req) {
+       case drm_vmw_context_legacy:
+               return vmw_context_define(dev, rep, file_priv, false);
+       case drm_vmw_context_dx:
+               return vmw_context_define(dev, rep, file_priv, true);
        default:
-               BUG();
-       }
-
-       if (loc->bi.ctx != NULL)
-               vmw_context_binding_drop(loc);
-
-       loc->bi = *bi;
-       loc->bi.scrubbed = false;
-       list_add_tail(&loc->ctx_list, &cbs->list);
-       INIT_LIST_HEAD(&loc->res_list);
-
-       return 0;
-}
-
-/**
- * vmw_context_binding_transfer: Transfer a context binding tracking entry.
- *
- * @cbs: Pointer to the persistent context binding state tracker.
- * @bi: Information about the binding to track.
- *
- */
-static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
-                                        const struct vmw_ctx_bindinfo *bi)
-{
-       struct vmw_ctx_binding *loc;
-
-       switch (bi->bt) {
-       case vmw_ctx_binding_rt:
-               loc = &cbs->render_targets[bi->i1.rt_type];
-               break;
-       case vmw_ctx_binding_tex:
-               loc = &cbs->texture_units[bi->i1.texture_stage];
-               break;
-       case vmw_ctx_binding_shader:
-               loc = &cbs->shaders[bi->i1.shader_type];
                break;
-       default:
-               BUG();
-       }
-
-       if (loc->bi.ctx != NULL)
-               vmw_context_binding_drop(loc);
-
-       if (bi->res != NULL) {
-               loc->bi = *bi;
-               list_add_tail(&loc->ctx_list, &cbs->list);
-               list_add_tail(&loc->res_list, &bi->res->binding_head);
-       }
-}
-
-/**
- * vmw_context_binding_kill - Kill a binding on the device
- * and stop tracking it.
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Emits FIFO commands to scrub a binding represented by @cb.
- * Then stops tracking the binding and re-initializes its storage.
- */
-static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
-{
-       if (!cb->bi.scrubbed) {
-               (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
-               cb->bi.scrubbed = true;
-       }
-       vmw_context_binding_drop(cb);
-}
-
-/**
- * vmw_context_binding_state_kill - Kill all bindings associated with a
- * struct vmw_ctx_binding state structure, and re-initialize the structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker. Then re-initializes the whole structure.
- */
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
-{
-       struct vmw_ctx_binding *entry, *next;
-
-       list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
-               vmw_context_binding_kill(entry);
-}
-
-/**
- * vmw_context_binding_state_scrub - Scrub all bindings associated with a
- * struct vmw_ctx_binding state structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker.
- */
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
-{
-       struct vmw_ctx_binding *entry;
-
-       list_for_each_entry(entry, &cbs->list, ctx_list) {
-               if (!entry->bi.scrubbed) {
-                       (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
-                       entry->bi.scrubbed = true;
-               }
-       }
-}
-
-/**
- * vmw_context_binding_res_list_kill - Kill all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Kills all bindings associated with a specific resource. Typically
- * called before the resource is destroyed.
- */
-void vmw_context_binding_res_list_kill(struct list_head *head)
-{
-       struct vmw_ctx_binding *entry, *next;
-
-       list_for_each_entry_safe(entry, next, head, res_list)
-               vmw_context_binding_kill(entry);
-}
-
-/**
- * vmw_context_binding_res_list_scrub - Scrub all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Scrub all bindings associated with a specific resource. Typically
- * called before the resource is evicted.
- */
-void vmw_context_binding_res_list_scrub(struct list_head *head)
-{
-       struct vmw_ctx_binding *entry;
-
-       list_for_each_entry(entry, head, res_list) {
-               if (!entry->bi.scrubbed) {
-                       (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
-                       entry->bi.scrubbed = true;
-               }
        }
+       return -EINVAL;
 }
 
 /**
- * vmw_context_binding_state_transfer - Commit staged binding info
+ * vmw_context_binding_list - Return a list of context bindings
  *
- * @ctx: Pointer to context to commit the staged binding info to.
- * @from: Staged binding info built during execbuf.
+ * @ctx: The context resource
  *
- * Transfers binding info from a temporary structure to the persistent
- * structure in the context. This can be done once commands
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  */
-void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
-                                       struct vmw_ctx_binding_state *from)
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
 {
        struct vmw_user_context *uctx =
                container_of(ctx, struct vmw_user_context, res);
-       struct vmw_ctx_binding *entry, *next;
 
-       list_for_each_entry_safe(entry, next, &from->list, ctx_list)
-               vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
+       return vmw_binding_state_list(uctx->cbs);
 }
 
-/**
- * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
- *
- * @ctx: The context resource
- *
- * Walks through the context binding list and rebinds all scrubbed
- * resources.
- */
-int vmw_context_rebind_all(struct vmw_resource *ctx)
+struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
 {
-       struct vmw_ctx_binding *entry;
-       struct vmw_user_context *uctx =
-               container_of(ctx, struct vmw_user_context, res);
-       struct vmw_ctx_binding_state *cbs = &uctx->cbs;
-       int ret;
-
-       list_for_each_entry(entry, &cbs->list, ctx_list) {
-               if (likely(!entry->bi.scrubbed))
-                       continue;
-
-               if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
-                           SVGA3D_INVALID_ID))
-                       continue;
-
-               ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
-               if (unlikely(ret != 0))
-                       return ret;
+       return container_of(ctx, struct vmw_user_context, res)->man;
+}
 
-               entry->bi.scrubbed = false;
-       }
+struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+                                        SVGACOTableType cotable_type)
+{
+       if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+               return ERR_PTR(-EINVAL);
 
-       return 0;
+       return vmw_resource_reference
+               (container_of(ctx, struct vmw_user_context, res)->
+                cotables[cotable_type]);
 }
 
 /**
- * vmw_context_binding_list - Return a list of context bindings
+ * vmw_context_binding_state -
+ * Return a pointer to a context binding state structure
  *
  * @ctx: The context resource
  *
- * Returns the current list of bindings of the given context. Note that
- * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ * Returns the current state of bindings of the given context. Note that
+ * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  */
-struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
-{
-       return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
-}
-
-struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
+struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx)
 {
-       return container_of(ctx, struct vmw_user_context, res)->man;
+       return container_of(ctx, struct vmw_user_context, res)->cbs;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
new file mode 100644 (file)
index 0000000..22bb04f
--- /dev/null
@@ -0,0 +1,662 @@
+/**************************************************************************
+ *
+ * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Treat context OTables as resources to make use of the resource
+ * backing MOB eviction mechanism, that is used to read back the COTable
+ * whenever the backing MOB is evicted.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+
+/**
+ * struct vmw_cotable - Context Object Table resource
+ *
+ * @res: struct vmw_resource we are deriving from.
+ * @ctx: non-refcounted pointer to the owning context.
+ * @size_read_back: Size of data read back during eviction.
+ * @seen_entries: Seen entries in command stream for this cotable.
+ * @type: The cotable type.
+ * @scrubbed: Whether the cotable has been scrubbed.
+ * @resource_list: List of resources in the cotable.
+ */
+struct vmw_cotable {
+       struct vmw_resource res;
+       struct vmw_resource *ctx;
+       size_t size_read_back;
+       int seen_entries;
+       u32 type;
+       bool scrubbed;
+       struct list_head resource_list;
+};
+
+/**
+ * struct vmw_cotable_info - Static info about cotable types
+ *
+ * @min_initial_entries: Min number of initial intries at cotable allocation
+ * for this cotable type.
+ * @size: Size of each entry.
+ */
+struct vmw_cotable_info {
+       u32 min_initial_entries;
+       u32 size;
+       void (*unbind_func)(struct vmw_private *, struct list_head *,
+                           bool);
+};
+
+static const struct vmw_cotable_info co_info[] = {
+       {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
+       {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
+       {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
+       {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
+       {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
+       {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
+       {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
+       {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
+       {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
+       {1, sizeof(SVGACOTableDXQueryEntry), NULL},
+       {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
+};
+
+/*
+ * Cotables with bindings that we remove must be scrubbed first,
+ * otherwise, the device will swap in an invalid context when we remove
+ * bindings before scrubbing a cotable...
+ */
+const SVGACOTableType vmw_cotable_scrub_order[] = {
+       SVGA_COTABLE_RTVIEW,
+       SVGA_COTABLE_DSVIEW,
+       SVGA_COTABLE_SRVIEW,
+       SVGA_COTABLE_DXSHADER,
+       SVGA_COTABLE_ELEMENTLAYOUT,
+       SVGA_COTABLE_BLENDSTATE,
+       SVGA_COTABLE_DEPTHSTENCIL,
+       SVGA_COTABLE_RASTERIZERSTATE,
+       SVGA_COTABLE_SAMPLER,
+       SVGA_COTABLE_STREAMOUTPUT,
+       SVGA_COTABLE_DXQUERY,
+};
+
+static int vmw_cotable_bind(struct vmw_resource *res,
+                           struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_unbind(struct vmw_resource *res,
+                             bool readback,
+                             struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_create(struct vmw_resource *res);
+static int vmw_cotable_destroy(struct vmw_resource *res);
+
+static const struct vmw_res_func vmw_cotable_func = {
+       .res_type = vmw_res_cotable,
+       .needs_backup = true,
+       .may_evict = true,
+       .type_name = "context guest backed object tables",
+       .backup_placement = &vmw_mob_placement,
+       .create = vmw_cotable_create,
+       .destroy = vmw_cotable_destroy,
+       .bind = vmw_cotable_bind,
+       .unbind = vmw_cotable_unbind,
+};
+
+/**
+ * vmw_cotable - Convert a struct vmw_resource pointer to a struct
+ * vmw_cotable pointer
+ *
+ * @res: Pointer to the resource.
+ */
+static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
+{
+       return container_of(res, struct vmw_cotable, res);
+}
+
+/**
+ * vmw_cotable_destroy - Cotable resource destroy callback
+ *
+ * @res: Pointer to the cotable resource.
+ *
+ * There is no device cotable destroy command, so this function only
+ * makes sure that the resource id is set to invalid.
+ */
+static int vmw_cotable_destroy(struct vmw_resource *res)
+{
+       res->id = -1;
+       return 0;
+}
+
+/**
+ * vmw_cotable_unscrub - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ * This is identical to bind() except the function interface looks different.
+ */
+static int vmw_cotable_unscrub(struct vmw_resource *res)
+{
+       struct vmw_cotable *vcotbl = vmw_cotable(res);
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct ttm_buffer_object *bo = &res->backup->base;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetCOTable body;
+       } *cmd;
+
+       WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+       lockdep_assert_held(&bo->resv->lock.base);
+
+       cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
+       if (!cmd) {
+               DRM_ERROR("Failed reserving FIFO space for cotable "
+                         "binding.\n");
+               return -ENOMEM;
+       }
+
+       WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
+       WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+       cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = vcotbl->ctx->id;
+       cmd->body.type = vcotbl->type;
+       cmd->body.mobid = bo->mem.start;
+       cmd->body.validSizeInBytes = vcotbl->size_read_back;
+
+       vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+       vcotbl->scrubbed = false;
+
+       return 0;
+}
+
+/**
+ * vmw_cotable_bind - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ */
+static int vmw_cotable_bind(struct vmw_resource *res,
+                           struct ttm_validate_buffer *val_buf)
+{
+       /*
+        * The create() callback may have changed @res->backup without
+        * the caller noticing, and with val_buf->bo still pointing to
+        * the old backup buffer. Although hackish, and not used currently,
+        * take the opportunity to correct the value here so that it's not
+        * misused in the future.
+        */
+       val_buf->bo = &res->backup->base;
+
+       return vmw_cotable_unscrub(res);
+}
+
+/**
+ * vmw_cotable_scrub - Scrub the cotable from the device.
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether initiate a readback of the cotable data to the backup
+ * buffer.
+ *
+ * In some situations (context swapouts) it might be desirable to make the
+ * device forget about the cotable without performing a full unbind. A full
+ * unbind requires reserved backup buffers and it might not be possible to
+ * reserve them due to locking order violation issues. The vmw_cotable_scrub
+ * function implements a partial unbind() without that requirement but with the
+ * following restrictions.
+ * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
+ *    be called.
+ * 2) Before the cotable backing buffer is used by the CPU, or during the
+ *    resource destruction, vmw_cotable_unbind() must be called.
+ */
+int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
+{
+       struct vmw_cotable *vcotbl = vmw_cotable(res);
+       struct vmw_private *dev_priv = res->dev_priv;
+       size_t submit_size;
+
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXReadbackCOTable body;
+       } *cmd0;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetCOTable body;
+       } *cmd1;
+
+       if (vcotbl->scrubbed)
+               return 0;
+
+       if (co_info[vcotbl->type].unbind_func)
+               co_info[vcotbl->type].unbind_func(dev_priv,
+                                                 &vcotbl->resource_list,
+                                                 readback);
+       submit_size = sizeof(*cmd1);
+       if (readback)
+               submit_size += sizeof(*cmd0);
+
+       cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
+       if (!cmd1) {
+               DRM_ERROR("Failed reserving FIFO space for cotable "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+
+       vcotbl->size_read_back = 0;
+       if (readback) {
+               cmd0 = (void *) cmd1;
+               cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+               cmd0->header.size = sizeof(cmd0->body);
+               cmd0->body.cid = vcotbl->ctx->id;
+               cmd0->body.type = vcotbl->type;
+               cmd1 = (void *) &cmd0[1];
+               vcotbl->size_read_back = res->backup_size;
+       }
+       cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+       cmd1->header.size = sizeof(cmd1->body);
+       cmd1->body.cid = vcotbl->ctx->id;
+       cmd1->body.type = vcotbl->type;
+       cmd1->body.mobid = SVGA3D_INVALID_ID;
+       cmd1->body.validSizeInBytes = 0;
+       vmw_fifo_commit_flush(dev_priv, submit_size);
+       vcotbl->scrubbed = true;
+
+       /* Trigger a create() on next validate. */
+       res->id = -1;
+
+       return 0;
+}
+
+/**
+ * vmw_cotable_unbind - Cotable resource unbind callback
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether to read back cotable data to the backup buffer.
+ * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * Unbinds the cotable from the device and fences the backup buffer.
+ */
+static int vmw_cotable_unbind(struct vmw_resource *res,
+                             bool readback,
+                             struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_cotable *vcotbl = vmw_cotable(res);
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct ttm_buffer_object *bo = val_buf->bo;
+       struct vmw_fence_obj *fence;
+       int ret;
+
+       if (list_empty(&res->mob_head))
+               return 0;
+
+       WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+       lockdep_assert_held(&bo->resv->lock.base);
+
+       mutex_lock(&dev_priv->binding_mutex);
+       if (!vcotbl->scrubbed)
+               vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
+       mutex_unlock(&dev_priv->binding_mutex);
+       (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+       vmw_fence_single_bo(bo, fence);
+       if (likely(fence != NULL))
+               vmw_fence_obj_unreference(&fence);
+
+       return ret;
+}
+
+/**
+ * vmw_cotable_readback - Read back a cotable without unbinding.
+ *
+ * @res: The cotable resource.
+ *
+ * Reads back a cotable to its backing mob without scrubbing the MOB from
+ * the cotable. The MOB is fenced for subsequent CPU access.
+ */
+static int vmw_cotable_readback(struct vmw_resource *res)
+{
+       struct vmw_cotable *vcotbl = vmw_cotable(res);
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXReadbackCOTable body;
+       } *cmd;
+       struct vmw_fence_obj *fence;
+
+       if (!vcotbl->scrubbed) {
+               cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+                                         SVGA3D_INVALID_ID);
+               if (!cmd) {
+                       DRM_ERROR("Failed reserving FIFO space for cotable "
+                                 "readback.\n");
+                       return -ENOMEM;
+               }
+               cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+               cmd->header.size = sizeof(cmd->body);
+               cmd->body.cid = vcotbl->ctx->id;
+               cmd->body.type = vcotbl->type;
+               vcotbl->size_read_back = res->backup_size;
+               vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       }
+
+       (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+       vmw_fence_single_bo(&res->backup->base, fence);
+       vmw_fence_obj_unreference(&fence);
+
+       return 0;
+}
+
+/**
+ * vmw_cotable_resize - Resize a cotable.
+ *
+ * @res: The cotable resource.
+ * @new_size: The new size.
+ *
+ * Resizes a cotable and binds the new backup buffer.
+ * On failure the cotable is left intact.
+ * Important! This function may not fail once the MOB switch has been
+ * committed to hardware. That would put the device context in an
+ * invalid state which we can't currently recover from.
+ */
+static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_cotable *vcotbl = vmw_cotable(res);
+       struct vmw_dma_buffer *buf, *old_buf = res->backup;
+       struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
+       size_t old_size = res->backup_size;
+       size_t old_size_read_back = vcotbl->size_read_back;
+       size_t cur_size_read_back;
+       struct ttm_bo_kmap_obj old_map, new_map;
+       int ret;
+       size_t i;
+
+       ret = vmw_cotable_readback(res);
+       if (ret)
+               return ret;
+
+       cur_size_read_back = vcotbl->size_read_back;
+       vcotbl->size_read_back = old_size_read_back;
+
+       /*
+        * While device is processing, Allocate and reserve a buffer object
+        * for the new COTable. Initially pin the buffer object to make sure
+        * we can use tryreserve without failure.
+        */
+       buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+                             true, vmw_dmabuf_bo_free);
+       if (ret) {
+               DRM_ERROR("Failed initializing new cotable MOB.\n");
+               return ret;
+       }
+
+       bo = &buf->base;
+       WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
+
+       ret = ttm_bo_wait(old_bo, false, false, false);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed waiting for cotable unbind.\n");
+               goto out_wait;
+       }
+
+       /*
+        * Do a page by page copy of COTables. This eliminates slow vmap()s.
+        * This should really be a TTM utility.
+        */
+       for (i = 0; i < old_bo->num_pages; ++i) {
+               bool dummy;
+
+               ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed mapping old COTable on resize.\n");
+                       goto out_wait;
+               }
+               ret = ttm_bo_kmap(bo, i, 1, &new_map);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed mapping new COTable on resize.\n");
+                       goto out_map_new;
+               }
+               memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
+                      ttm_kmap_obj_virtual(&old_map, &dummy),
+                      PAGE_SIZE);
+               ttm_bo_kunmap(&new_map);
+               ttm_bo_kunmap(&old_map);
+       }
+
+       /* Unpin new buffer, and switch backup buffers. */
+       ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed validating new COTable backup buffer.\n");
+               goto out_wait;
+       }
+
+       res->backup = buf;
+       res->backup_size = new_size;
+       vcotbl->size_read_back = cur_size_read_back;
+
+       /*
+        * Now tell the device to switch. If this fails, then we need to
+        * revert the full resize.
+        */
+       ret = vmw_cotable_unscrub(res);
+       if (ret) {
+               DRM_ERROR("Failed switching COTable backup buffer.\n");
+               res->backup = old_buf;
+               res->backup_size = old_size;
+               vcotbl->size_read_back = old_size_read_back;
+               goto out_wait;
+       }
+
+       /* Let go of the old mob. */
+       list_del(&res->mob_head);
+       list_add_tail(&res->mob_head, &buf->res_list);
+       vmw_dmabuf_unreference(&old_buf);
+       res->id = vcotbl->type;
+
+       return 0;
+
+out_map_new:
+       ttm_bo_kunmap(&old_map);
+out_wait:
+       ttm_bo_unreserve(bo);
+       vmw_dmabuf_unreference(&buf);
+
+       return ret;
+}
+
+/**
+ * vmw_cotable_create - Cotable resource create callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * There is no separate create command for cotables, so this callback, which
+ * is called before bind() in the validation sequence is instead used for two
+ * things.
+ * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
+ *    buffer, that is, if @res->mob_head is non-empty.
+ * 2) Resize the cotable if needed.
+ */
+static int vmw_cotable_create(struct vmw_resource *res)
+{
+       struct vmw_cotable *vcotbl = vmw_cotable(res);
+       size_t new_size = res->backup_size;
+       size_t needed_size;
+       int ret;
+
+       /* Check whether we need to resize the cotable */
+       needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
+       while (needed_size > new_size)
+               new_size *= 2;
+
+       if (likely(new_size <= res->backup_size)) {
+               if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
+                       ret = vmw_cotable_unscrub(res);
+                       if (ret)
+                               return ret;
+               }
+               res->id = vcotbl->type;
+               return 0;
+       }
+
+       return vmw_cotable_resize(res, new_size);
+}
+
+/**
+ * vmw_hw_cotable_destroy - Cotable hw_destroy callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * The final (part of resource destruction) destroy callback.
+ */
+static void vmw_hw_cotable_destroy(struct vmw_resource *res)
+{
+       (void) vmw_cotable_destroy(res);
+}
+
+static size_t cotable_acc_size;
+
+/**
+ * vmw_cotable_free - Cotable resource destructor
+ *
+ * @res: Pointer to a cotable resource.
+ */
+static void vmw_cotable_free(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       kfree(res);
+       ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+}
+
+/**
+ * vmw_cotable_alloc - Create a cotable resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @ctx: Pointer to the context resource.
+ * The cotable resource will not add a refcount.
+ * @type: The cotable type.
+ */
+struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+                                      struct vmw_resource *ctx,
+                                      u32 type)
+{
+       struct vmw_cotable *vcotbl;
+       int ret;
+       u32 num_entries;
+
+       if (unlikely(cotable_acc_size == 0))
+               cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+                                  cotable_acc_size, false, true);
+       if (unlikely(ret))
+               return ERR_PTR(ret);
+
+       vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
+       if (unlikely(vcotbl == NULL)) {
+               ret = -ENOMEM;
+               goto out_no_alloc;
+       }
+
+       ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
+                               vmw_cotable_free, &vmw_cotable_func);
+       if (unlikely(ret != 0))
+               goto out_no_init;
+
+       INIT_LIST_HEAD(&vcotbl->resource_list);
+       vcotbl->res.id = type;
+       vcotbl->res.backup_size = PAGE_SIZE;
+       num_entries = PAGE_SIZE / co_info[type].size;
+       if (num_entries < co_info[type].min_initial_entries) {
+               vcotbl->res.backup_size = co_info[type].min_initial_entries *
+                       co_info[type].size;
+               vcotbl->res.backup_size =
+                       (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+       }
+
+       vcotbl->scrubbed = true;
+       vcotbl->seen_entries = -1;
+       vcotbl->type = type;
+       vcotbl->ctx = ctx;
+
+       vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
+
+       return &vcotbl->res;
+
+out_no_init:
+       kfree(vcotbl);
+out_no_alloc:
+       ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+       return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cotable_notify - Notify the cotable about an item creation
+ *
+ * @res: Pointer to a cotable resource.
+ * @id: Item id.
+ */
+int vmw_cotable_notify(struct vmw_resource *res, int id)
+{
+       struct vmw_cotable *vcotbl = vmw_cotable(res);
+
+       if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
+               DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
+                         (unsigned) vcotbl->type, id);
+               return -EINVAL;
+       }
+
+       if (vcotbl->seen_entries < id) {
+               /* Trigger a call to create() on next validate */
+               res->id = -1;
+               vcotbl->seen_entries = id;
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_cotable_add_view - add a view to the cotable's list of active views.
+ *
+ * @res: pointer struct vmw_resource representing the cotable.
+ * @head: pointer to the struct list_head member of the resource, dedicated
+ * to the cotable active resource list.
+ */
+void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
+{
+       struct vmw_cotable *vcotbl =
+               container_of(res, struct vmw_cotable, res);
+
+       list_add_tail(head, &vcotbl->resource_list);
+}
index b83adea43f3a7243c8fa72046dc5e00b761c3a05..fd0cb8c67d05eb9c23f1be9ba1ad53a6fc547c40 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
 #include <drm/ttm/ttm_placement.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_object.h>
 #define DRM_IOCTL_VMW_SYNCCPU                                  \
        DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,             \
                 struct drm_vmw_synccpu_arg)
+#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT                  \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,    \
+               struct drm_vmw_context_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
@@ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
                      DRM_UNLOCKED | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
                      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
-       VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+       VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
                      DRM_UNLOCKED | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
@@ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
        VMW_IOCTL_DEF(VMW_SYNCCPU,
                      vmw_user_dmabuf_synccpu_ioctl,
                      DRM_UNLOCKED | DRM_RENDER_ALLOW),
+       VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
+                     vmw_extended_context_define_ioctl,
+                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 };
 
 static struct pci_device_id vmw_pci_id_list[] = {
@@ -390,8 +397,10 @@ static int vmw_request_device(struct vmw_private *dev_priv)
        }
        vmw_fence_fifo_up(dev_priv->fman);
        dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
-       if (IS_ERR(dev_priv->cman))
+       if (IS_ERR(dev_priv->cman)) {
                dev_priv->cman = NULL;
+               dev_priv->has_dx = false;
+       }
 
        ret = vmw_request_device_late(dev_priv);
        if (ret)
@@ -848,6 +857,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                }
        }
 
+       if (dev_priv->has_mob) {
+               spin_lock(&dev_priv->cap_lock);
+               vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+               dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+               spin_unlock(&dev_priv->cap_lock);
+       }
+
+
        ret = vmw_kms_init(dev_priv);
        if (unlikely(ret != 0))
                goto out_no_kms;
@@ -857,6 +874,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        if (ret)
                goto out_no_fifo;
 
+       DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
+
        if (dev_priv->enable_fb) {
                vmw_fifo_resource_inc(dev_priv);
                vmw_svga_enable(dev_priv);
@@ -900,6 +919,8 @@ out_err0:
        for (i = vmw_res_context; i < vmw_res_max; ++i)
                idr_destroy(&dev_priv->res_idr[i]);
 
+       if (dev_priv->ctx.staged_bindings)
+               vmw_binding_state_free(dev_priv->ctx.staged_bindings);
        kfree(dev_priv);
        return ret;
 }
@@ -945,6 +966,8 @@ static int vmw_driver_unload(struct drm_device *dev)
        iounmap(dev_priv->mmio_virt);
        arch_phys_wc_del(dev_priv->mmio_mtrr);
        (void)ttm_bo_device_release(&dev_priv->bdev);
+       if (dev_priv->ctx.staged_bindings)
+               vmw_binding_state_free(dev_priv->ctx.staged_bindings);
        vmw_ttm_global_release(dev_priv);
 
        for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -1082,11 +1105,21 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
                const struct drm_ioctl_desc *ioctl =
                        &vmw_ioctls[nr - DRM_COMMAND_BASE];
 
-               if (unlikely(ioctl->cmd != cmd)) {
-                       DRM_ERROR("Invalid command format, ioctl %d\n",
-                                 nr - DRM_COMMAND_BASE);
-                       return -EINVAL;
+               if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
+                       ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
+                       if (unlikely(ret != 0))
+                               return ret;
+
+                       if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
+                               goto out_io_encoding;
+
+                       return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
+                                                       _IOC_SIZE(cmd));
                }
+
+               if (unlikely(ioctl->cmd != cmd))
+                       goto out_io_encoding;
+
                flags = ioctl->flags;
        } else if (!drm_ioctl_flags(nr, &flags))
                return -EINVAL;
@@ -1106,6 +1139,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
                ttm_read_unlock(&vmaster->lock);
 
        return ret;
+
+out_io_encoding:
+       DRM_ERROR("Invalid command format, ioctl %d\n",
+                 nr - DRM_COMMAND_BASE);
+
+       return -EINVAL;
 }
 
 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1156,7 +1195,6 @@ static void vmw_master_destroy(struct drm_device *dev,
        kfree(vmaster);
 }
 
-
 static int vmw_master_set(struct drm_device *dev,
                          struct drm_file *file_priv,
                          bool from_open)
index f513e444125ddbd013ffcdb129723e0515a1f518..b88ea50b7d9592762af2e9869b9e298b0ab56be0 100644 (file)
@@ -59,6 +59,8 @@
 #define VMWGFX_NUM_GB_SHADER 20000
 #define VMWGFX_NUM_GB_SURFACE 32768
 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_DXCONTEXT 256
+#define VMWGFX_NUM_DXQUERY 512
 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
                        VMWGFX_NUM_GB_SHADER +\
                        VMWGFX_NUM_GB_SURFACE +\
@@ -132,6 +134,9 @@ enum vmw_res_type {
        vmw_res_surface,
        vmw_res_stream,
        vmw_res_shader,
+       vmw_res_dx_context,
+       vmw_res_cotable,
+       vmw_res_view,
        vmw_res_max
 };
 
@@ -139,7 +144,8 @@ enum vmw_res_type {
  * Resources that are managed using command streams.
  */
 enum vmw_cmdbuf_res_type {
-       vmw_cmdbuf_res_compat_shader
+       vmw_cmdbuf_res_shader,
+       vmw_cmdbuf_res_view
 };
 
 struct vmw_cmdbuf_res_manager;
@@ -162,11 +168,13 @@ struct vmw_surface {
        struct drm_vmw_size *sizes;
        uint32_t num_sizes;
        bool scanout;
+       uint32_t array_size;
        /* TODO so far just a extra pointer */
        struct vmw_cursor_snooper snooper;
        struct vmw_surface_offset *offsets;
        SVGA3dTextureFilter autogen_filter;
        uint32_t multisample_count;
+       struct list_head view_list;
 };
 
 struct vmw_marker_queue {
@@ -186,6 +194,7 @@ struct vmw_fifo_state {
        struct mutex fifo_mutex;
        struct rw_semaphore rwsem;
        struct vmw_marker_queue marker_queue;
+       bool dx;
 };
 
 struct vmw_relocation {
@@ -265,73 +274,6 @@ struct vmw_piter {
        struct page *(*page)(struct vmw_piter *);
 };
 
-/*
- * enum vmw_ctx_binding_type - abstract resource to context binding types
- */
-enum vmw_ctx_binding_type {
-       vmw_ctx_binding_shader,
-       vmw_ctx_binding_rt,
-       vmw_ctx_binding_tex,
-       vmw_ctx_binding_max
-};
-
-/**
- * struct vmw_ctx_bindinfo - structure representing a single context binding
- *
- * @ctx: Pointer to the context structure. NULL means the binding is not
- * active.
- * @res: Non ref-counted pointer to the bound resource.
- * @bt: The binding type.
- * @i1: Union of information needed to unbind.
- */
-struct vmw_ctx_bindinfo {
-       struct vmw_resource *ctx;
-       struct vmw_resource *res;
-       enum vmw_ctx_binding_type bt;
-       bool scrubbed;
-       union {
-               SVGA3dShaderType shader_type;
-               SVGA3dRenderTargetType rt_type;
-               uint32 texture_stage;
-       } i1;
-};
-
-/**
- * struct vmw_ctx_binding - structure representing a single context binding
- *                        - suitable for tracking in a context
- *
- * @ctx_list: List head for context.
- * @res_list: List head for bound resource.
- * @bi: Binding info
- */
-struct vmw_ctx_binding {
-       struct list_head ctx_list;
-       struct list_head res_list;
-       struct vmw_ctx_bindinfo bi;
-};
-
-
-/**
- * struct vmw_ctx_binding_state - context binding state
- *
- * @list: linked list of individual bindings.
- * @render_targets: Render target bindings.
- * @texture_units: Texture units/samplers bindings.
- * @shaders: Shader bindings.
- *
- * Note that this structure also provides storage space for the individual
- * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
- * for individual bindings.
- *
- */
-struct vmw_ctx_binding_state {
-       struct list_head list;
-       struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
-       struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
-       struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_PREDX_MAX];
-};
-
-
 /*
  * enum vmw_display_unit_type - Describes the display unit
  */
@@ -356,6 +298,7 @@ struct vmw_sw_context{
        uint32_t *cmd_bounce;
        uint32_t cmd_bounce_size;
        struct list_head resource_list;
+       struct list_head ctx_resource_list; /* For contexts and cotables */
        struct vmw_dma_buffer *cur_query_bo;
        struct list_head res_relocations;
        uint32_t *buf_start;
@@ -363,8 +306,13 @@ struct vmw_sw_context{
        struct vmw_resource *last_query_ctx;
        bool needs_post_query_barrier;
        struct vmw_resource *error_resource;
-       struct vmw_ctx_binding_state staged_bindings;
+       struct vmw_ctx_binding_state *staged_bindings;
+       bool staged_bindings_inuse;
        struct list_head staged_cmd_res;
+       struct vmw_resource_val_node *dx_ctx_node;
+       struct vmw_dma_buffer *dx_query_mob;
+       struct vmw_resource *dx_query_ctx;
+       struct vmw_cmdbuf_res_manager *man;
 };
 
 struct vmw_legacy_display;
@@ -382,6 +330,26 @@ struct vmw_vga_topology_state {
        uint32_t pos_y;
 };
 
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size:           Size of the table (page-aligned).
+ * @page_table:     Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+       unsigned long size;
+       struct vmw_mob *page_table;
+       bool enabled;
+};
+
+struct vmw_otable_batch {
+       unsigned num_otables;
+       struct vmw_otable *otables;
+       struct vmw_resource *context;
+       struct ttm_buffer_object *otable_bo;
+};
+
 struct vmw_private {
        struct ttm_bo_device bdev;
        struct ttm_bo_global_ref bo_global_ref;
@@ -417,6 +385,7 @@ struct vmw_private {
        bool has_mob;
        spinlock_t hw_lock;
        spinlock_t cap_lock;
+       bool has_dx;
 
        /*
         * VGA registers.
@@ -552,8 +521,7 @@ struct vmw_private {
        /*
         * Guest Backed stuff
         */
-       struct ttm_buffer_object *otable_bo;
-       struct vmw_otable *otables;
+       struct vmw_otable_batch otable_batch;
 
        struct vmw_cmdbuf_man *cman;
 };
@@ -685,6 +653,7 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
                                  uint32_t *inout_id,
                                  struct vmw_resource **out);
 extern void vmw_resource_unreserve(struct vmw_resource *res,
+                                  bool switch_backup,
                                   struct vmw_dma_buffer *new_backup,
                                   unsigned long new_backup_offset);
 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
@@ -742,7 +711,10 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
 extern void vmw_fifo_release(struct vmw_private *dev_priv,
                             struct vmw_fifo_state *fifo);
 extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
+extern void *
+vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
                               uint32_t *seqno);
 extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
@@ -828,14 +800,15 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter)
  * Command submission - vmwgfx_execbuf.c
  */
 
-extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
-                            struct drm_file *file_priv);
+extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+                            struct drm_file *file_priv, size_t size);
 extern int vmw_execbuf_process(struct drm_file *file_priv,
                               struct vmw_private *dev_priv,
                               void __user *user_commands,
                               void *kernel_commands,
                               uint32_t command_size,
                               uint64_t throttle_us,
+                              uint32_t dx_context_handle,
                               struct drm_vmw_fence_rep __user
                               *user_fence_rep,
                               struct vmw_fence_obj **out_fence);
@@ -960,6 +933,7 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
                     uint32_t handle);
 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
 extern void vmw_resource_unpin(struct vmw_resource *res);
+extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
 
 /**
  * Overlay control - vmwgfx_overlay.c
@@ -1016,27 +990,28 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
 
 extern const struct vmw_user_resource_conv *user_context_converter;
 
-extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
-
 extern int vmw_context_check(struct vmw_private *dev_priv,
                             struct ttm_object_file *tfile,
                             int id,
                             struct vmw_resource **p_res);
 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv);
+extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+                                            struct drm_file *file_priv);
 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
                                     struct drm_file *file_priv);
-extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-                                  const struct vmw_ctx_bindinfo *ci);
-extern void
-vmw_context_binding_state_transfer(struct vmw_resource *res,
-                                  struct vmw_ctx_binding_state *cbs);
-extern void vmw_context_binding_res_list_kill(struct list_head *head);
-extern void vmw_context_binding_res_list_scrub(struct list_head *head);
-extern int vmw_context_rebind_all(struct vmw_resource *ctx);
 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
 extern struct vmw_cmdbuf_res_manager *
 vmw_context_res_man(struct vmw_resource *ctx);
+extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+                                               SVGACOTableType cotable_type);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+struct vmw_ctx_binding_state;
+extern struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx);
+extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+                                         bool readback);
+
 /*
  * Surface management - vmwgfx_surface.c
  */
@@ -1066,6 +1041,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
                               bool for_scanout,
                               uint32_t num_mip_levels,
                               uint32_t multisample_count,
+                              uint32_t array_size,
                               struct drm_vmw_size size,
                               struct vmw_surface **srf_out);
 
@@ -1085,12 +1061,21 @@ extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
                                 SVGA3dShaderType shader_type,
                                 size_t size,
                                 struct list_head *list);
-extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
-                                   u32 user_key, SVGA3dShaderType shader_type,
-                                   struct list_head *list);
+extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+                            u32 user_key, SVGA3dShaderType shader_type,
+                            struct list_head *list);
+extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+                            struct vmw_resource *ctx,
+                            u32 user_key,
+                            SVGA3dShaderType shader_type,
+                            struct list_head *list);
+extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+                                            struct list_head *list,
+                                            bool readback);
+
 extern struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
-                        u32 user_key, SVGA3dShaderType shader_type);
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+                 u32 user_key, SVGA3dShaderType shader_type);
 
 /*
  * Command buffer managed resources - vmwgfx_cmdbuf_res.c
@@ -1114,8 +1099,20 @@ extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
                                 enum vmw_cmdbuf_res_type res_type,
                                 u32 user_key,
-                                struct list_head *list);
+                                struct list_head *list,
+                                struct vmw_resource **res);
 
+/*
+ * COTable management - vmwgfx_cotable.c
+ */
+extern const SVGACOTableType vmw_cotable_scrub_order[];
+extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+                                             struct vmw_resource *ctx,
+                                             u32 type);
+extern int vmw_cotable_notify(struct vmw_resource *res, int id);
+extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
+extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
+                                    struct list_head *head);
 
 /*
  * Command buffer managerment vmwgfx_cmdbuf.c
index 847264f8a33a6859c09d109f9b9e774e223fe772..401305bbb8103208ba08554b45b934b735fc0295 100644 (file)
@@ -29,6 +29,8 @@
 #include "vmwgfx_reg.h"
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
 
 #define VMW_RES_HT_ORDER 12
 
@@ -59,8 +61,11 @@ struct vmw_resource_relocation {
  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  * @first_usage: Set to true the first time the resource is referenced in
  * the command stream.
- * @no_buffer_needed: Resources do not need to allocate buffer backup on
- * reservation. The command stream will provide one.
+ * @switching_backup: The command stream provides a new backup buffer for a
+ * resource.
+ * @no_buffer_needed: This means @switching_backup is true on first buffer
+ * reference. So resource reservation does not need to allocate a backup
+ * buffer for the resource.
  */
 struct vmw_resource_val_node {
        struct list_head head;
@@ -69,8 +74,9 @@ struct vmw_resource_val_node {
        struct vmw_dma_buffer *new_backup;
        struct vmw_ctx_binding_state *staged_bindings;
        unsigned long new_backup_offset;
-       bool first_usage;
-       bool no_buffer_needed;
+       u32 first_usage : 1;
+       u32 switching_backup : 1;
+       u32 no_buffer_needed : 1;
 };
 
 /**
@@ -92,6 +98,10 @@ struct vmw_cmd_entry {
        [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
                                       (_gb_disable), (_gb_enable)}
 
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+                                       struct vmw_sw_context *sw_context,
+                                       struct vmw_resource *ctx);
+
 /**
  * vmw_resource_unreserve - unreserve resources previously reserved for
  * command submission.
@@ -99,15 +109,16 @@ struct vmw_cmd_entry {
  * @list_head: list of resources to unreserve.
  * @backoff: Whether command submission failed.
  */
-static void vmw_resource_list_unreserve(struct list_head *list,
+static void vmw_resource_list_unreserve(struct vmw_sw_context *sw_context,
+                                       struct list_head *list,
                                        bool backoff)
 {
        struct vmw_resource_val_node *val;
 
        list_for_each_entry(val, list, head) {
                struct vmw_resource *res = val->res;
-               struct vmw_dma_buffer *new_backup =
-                       backoff ? NULL : val->new_backup;
+               bool switch_backup =
+                       (backoff) ? false : val->switching_backup;
 
                /*
                 * Transfer staged context bindings to the
@@ -115,18 +126,71 @@ static void vmw_resource_list_unreserve(struct list_head *list,
                 */
                if (unlikely(val->staged_bindings)) {
                        if (!backoff) {
-                               vmw_context_binding_state_transfer
-                                       (val->res, val->staged_bindings);
+                               vmw_binding_state_commit
+                                       (vmw_context_binding_state(val->res),
+                                        val->staged_bindings);
                        }
-                       kfree(val->staged_bindings);
+
+                       if (val->staged_bindings != sw_context->staged_bindings)
+                               vmw_binding_state_free(val->staged_bindings);
+                       else
+                               sw_context->staged_bindings_inuse = false;
                        val->staged_bindings = NULL;
                }
-               vmw_resource_unreserve(res, new_backup,
-                       val->new_backup_offset);
+               vmw_resource_unreserve(res, switch_backup, val->new_backup,
+                                      val->new_backup_offset);
                vmw_dmabuf_unreference(&val->new_backup);
        }
 }
 
+/**
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
+ * added to the validate list.
+ *
+ * @dev_priv: Pointer to the device private:
+ * @sw_context: The validation context:
+ * @node: The validation node holding this context.
+ */
+static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
+                                  struct vmw_sw_context *sw_context,
+                                  struct vmw_resource_val_node *node)
+{
+       int ret;
+
+       ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
+       if (unlikely(ret != 0))
+               goto out_err;
+
+       if (!sw_context->staged_bindings) {
+               sw_context->staged_bindings =
+                       vmw_binding_state_alloc(dev_priv);
+               if (IS_ERR(sw_context->staged_bindings)) {
+                       DRM_ERROR("Failed to allocate context binding "
+                                 "information.\n");
+                       ret = PTR_ERR(sw_context->staged_bindings);
+                       sw_context->staged_bindings = NULL;
+                       goto out_err;
+               }
+       }
+
+       if (sw_context->staged_bindings_inuse) {
+               node->staged_bindings = vmw_binding_state_alloc(dev_priv);
+               if (IS_ERR(node->staged_bindings)) {
+                       DRM_ERROR("Failed to allocate context binding "
+                                 "information.\n");
+                       ret = PTR_ERR(node->staged_bindings);
+                       node->staged_bindings = NULL;
+                       goto out_err;
+               }
+       } else {
+               node->staged_bindings = sw_context->staged_bindings;
+               sw_context->staged_bindings_inuse = true;
+       }
+
+       return 0;
+out_err:
+       return ret;
+}
 
 /**
  * vmw_resource_val_add - Add a resource to the software context's
@@ -141,6 +205,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
                                struct vmw_resource *res,
                                struct vmw_resource_val_node **p_node)
 {
+       struct vmw_private *dev_priv = res->dev_priv;
        struct vmw_resource_val_node *node;
        struct drm_hash_item *hash;
        int ret;
@@ -169,14 +234,90 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
                kfree(node);
                return ret;
        }
-       list_add_tail(&node->head, &sw_context->resource_list);
        node->res = vmw_resource_reference(res);
        node->first_usage = true;
-
        if (unlikely(p_node != NULL))
                *p_node = node;
 
-       return 0;
+       if (!dev_priv->has_mob) {
+               list_add_tail(&node->head, &sw_context->resource_list);
+               return 0;
+       }
+
+       switch (vmw_res_type(res)) {
+       case vmw_res_context:
+       case vmw_res_dx_context:
+               list_add(&node->head, &sw_context->ctx_resource_list);
+               ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
+               break;
+       case vmw_res_cotable:
+               list_add_tail(&node->head, &sw_context->ctx_resource_list);
+               break;
+       default:
+               list_add_tail(&node->head, &sw_context->resource_list);
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to
+ * to the validation list
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view: Pointer to the view resource.
+ *
+ * Returns 0 if success, negative error code otherwise.
+ */
+static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
+                               struct vmw_resource *view)
+{
+       int ret;
+
+       /*
+        * First add the resource the view is pointing to, otherwise
+        * it may be swapped out when the view is validated.
+        */
+       ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
+       if (ret)
+               return ret;
+
+       return vmw_resource_val_add(sw_context, view, NULL);
+}
+
+/**
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's
+ * pointing to to the validation list.
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view_type: The view type to look up.
+ * @id: view id of the view.
+ *
+ * The view is represented by a view id and the DX context it's created on,
+ * or scheduled for creation on. If there is no DX context set, the function
+ * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
+ */
+static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
+                              enum vmw_view_type view_type, u32 id)
+{
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_resource *view;
+       int ret;
+
+       if (!ctx_node) {
+               DRM_ERROR("DX Context not set.\n");
+               return -EINVAL;
+       }
+
+       view = vmw_view_lookup(sw_context->man, view_type, id);
+       if (IS_ERR(view))
+               return PTR_ERR(view);
+
+       ret = vmw_view_res_val_add(sw_context, view);
+       vmw_resource_unreference(&view);
+
+       return ret;
 }
 
 /**
@@ -195,19 +336,41 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
                                        struct vmw_resource *ctx)
 {
        struct list_head *binding_list;
-       struct vmw_ctx_binding *entry;
+       struct vmw_ctx_bindinfo *entry;
        int ret = 0;
        struct vmw_resource *res;
+       u32 i;
+
+       /* Add all cotables to the validation list. */
+       if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+               for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+                       res = vmw_context_cotable(ctx, i);
+                       if (IS_ERR(res))
+                               continue;
+
+                       ret = vmw_resource_val_add(sw_context, res, NULL);
+                       vmw_resource_unreference(&res);
+                       if (unlikely(ret != 0))
+                               return ret;
+               }
+       }
+
 
+       /* Add all resources bound to the context to the validation list */
        mutex_lock(&dev_priv->binding_mutex);
        binding_list = vmw_context_binding_list(ctx);
 
        list_for_each_entry(entry, binding_list, ctx_list) {
-               res = vmw_resource_reference_unless_doomed(entry->bi.res);
+               /* entry->res is not refcounted */
+               res = vmw_resource_reference_unless_doomed(entry->res);
                if (unlikely(res == NULL))
                        continue;
 
-               ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+               if (vmw_res_type(entry->res) == vmw_res_view)
+                       ret = vmw_view_res_val_add(sw_context, entry->res);
+               else
+                       ret = vmw_resource_val_add(sw_context, entry->res,
+                                                  NULL);
                vmw_resource_unreference(&res);
                if (unlikely(ret != 0))
                        break;
@@ -409,6 +572,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 
        list_for_each_entry(val, &sw_context->resource_list, head) {
                struct vmw_resource *res = val->res;
+               struct vmw_dma_buffer *backup = res->backup;
 
                ret = vmw_resource_validate(res);
                if (unlikely(ret != 0)) {
@@ -416,18 +580,29 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
                                DRM_ERROR("Failed to validate resource.\n");
                        return ret;
                }
+
+               /* Check if the resource switched backup buffer */
+               if (backup && res->backup && (backup != res->backup)) {
+                       struct vmw_dma_buffer *vbo = res->backup;
+
+                       ret = vmw_bo_to_validate_list
+                               (sw_context, vbo,
+                                vmw_resource_needs_backup(res), NULL);
+                       if (ret) {
+                               ttm_bo_unreserve(&vbo->base);
+                               return ret;
+                       }
+               }
        }
        return 0;
 }
 
-
 /**
  * vmw_cmd_res_reloc_add - Add a resource to a software context's
  * relocation- and validation lists.
  *
  * @dev_priv: Pointer to a struct vmw_private identifying the device.
  * @sw_context: Pointer to the software context.
- * @res_type: Resource type.
  * @id_loc: Pointer to where the id that needs translation is located.
  * @res: Valid pointer to a struct vmw_resource.
  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
@@ -435,7 +610,6 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
  */
 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
                                 struct vmw_sw_context *sw_context,
-                                enum vmw_res_type res_type,
                                 uint32_t *id_loc,
                                 struct vmw_resource *res,
                                 struct vmw_resource_val_node **p_val)
@@ -454,29 +628,6 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       if (res_type == vmw_res_context && dev_priv->has_mob &&
-           node->first_usage) {
-
-               /*
-                * Put contexts first on the list to be able to exit
-                * list traversal for contexts early.
-                */
-               list_del(&node->head);
-               list_add(&node->head, &sw_context->resource_list);
-
-               ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
-               if (unlikely(ret != 0))
-                       return ret;
-               node->staged_bindings =
-                       kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
-               if (node->staged_bindings == NULL) {
-                       DRM_ERROR("Failed to allocate context binding "
-                                 "information.\n");
-                       return -ENOMEM;
-               }
-               INIT_LIST_HEAD(&node->staged_bindings->list);
-       }
-
        if (p_val)
                *p_val = node;
 
@@ -554,7 +705,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
        rcache->res = res;
        rcache->handle = *id_loc;
 
-       ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
+       ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
                                    res, &node);
        if (unlikely(ret != 0))
                goto out_no_reloc;
@@ -589,7 +740,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
                if (unlikely(!val->staged_bindings))
                        break;
 
-               ret = vmw_context_rebind_all(val->res);
+               ret = vmw_binding_rebind_all
+                       (vmw_context_binding_state(val->res));
                if (unlikely(ret != 0)) {
                        if (ret != -ERESTARTSYS)
                                DRM_ERROR("Failed to rebind context.\n");
@@ -600,6 +752,69 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
        return 0;
 }
 
+/**
+ * vmw_view_bindings_add - Add an array of view bindings to a context
+ * binding state tracker.
+ *
+ * @sw_context: The execbuf state used for this command.
+ * @view_type: View type for the bindings.
+ * @binding_type: Binding type for the bindings.
+ * @shader_slot: The shader slot to user for the bindings.
+ * @view_ids: Array of view ids to be bound.
+ * @num_views: Number of view ids in @view_ids.
+ * @first_slot: The binding slot to be used for the first view id in @view_ids.
+ */
+static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
+                                enum vmw_view_type view_type,
+                                enum vmw_ctx_binding_type binding_type,
+                                uint32 shader_slot,
+                                uint32 view_ids[], u32 num_views,
+                                u32 first_slot)
+{
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_cmdbuf_res_manager *man;
+       u32 i;
+       int ret;
+
+       if (!ctx_node) {
+               DRM_ERROR("DX Context not set.\n");
+               return -EINVAL;
+       }
+
+       man = sw_context->man;
+       for (i = 0; i < num_views; ++i) {
+               struct vmw_ctx_bindinfo_view binding;
+               struct vmw_resource *view = NULL;
+
+               if (view_ids[i] != SVGA3D_INVALID_ID) {
+                       view = vmw_view_lookup(man, view_type, view_ids[i]);
+                       if (IS_ERR(view)) {
+                               DRM_ERROR("View not found.\n");
+                               return PTR_ERR(view);
+                       }
+
+                       ret = vmw_view_res_val_add(sw_context, view);
+                       if (ret) {
+                               DRM_ERROR("Could not add view to "
+                                         "validation list.\n");
+                               vmw_resource_unreference(&view);
+                               return ret;
+                       }
+               }
+               binding.bi.ctx = ctx_node->res;
+               binding.bi.res = view;
+               binding.bi.bt = binding_type;
+               binding.shader_slot = shader_slot;
+               binding.slot = first_slot + i;
+               vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+                               shader_slot, binding.slot);
+               if (view)
+                       vmw_resource_unreference(&view);
+       }
+
+       return 0;
+}
+
 /**
  * vmw_cmd_cid_check - Check a command header for valid context information.
  *
@@ -638,6 +853,12 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
 
+       if (cmd->body.type >= SVGA3D_RT_MAX) {
+               DRM_ERROR("Illegal render target type %u.\n",
+                         (unsigned) cmd->body.type);
+               return -EINVAL;
+       }
+
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
                                user_context_converter, &cmd->body.cid,
                                &ctx_node);
@@ -651,13 +872,14 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
                return ret;
 
        if (dev_priv->has_mob) {
-               struct vmw_ctx_bindinfo bi;
+               struct vmw_ctx_bindinfo_view binding;
 
-               bi.ctx = ctx_node->res;
-               bi.res = res_node ? res_node->res : NULL;
-               bi.bt = vmw_ctx_binding_rt;
-               bi.i1.rt_type = cmd->body.type;
-               return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+               binding.bi.ctx = ctx_node->res;
+               binding.bi.res = res_node ? res_node->res : NULL;
+               binding.bi.bt = vmw_ctx_binding_rt;
+               binding.slot = cmd->body.type;
+               vmw_binding_add(ctx_node->staged_bindings,
+                               &binding.bi, 0, binding.slot);
        }
 
        return 0;
@@ -1364,6 +1586,12 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
                if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
                        continue;
 
+               if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
+                       DRM_ERROR("Illegal texture/sampler unit %u.\n",
+                                 (unsigned) cur_state->stage);
+                       return -EINVAL;
+               }
+
                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                        user_surface_converter,
                                        &cur_state->value, &res_node);
@@ -1371,14 +1599,14 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
                        return ret;
 
                if (dev_priv->has_mob) {
-                       struct vmw_ctx_bindinfo bi;
-
-                       bi.ctx = ctx_node->res;
-                       bi.res = res_node ? res_node->res : NULL;
-                       bi.bt = vmw_ctx_binding_tex;
-                       bi.i1.texture_stage = cur_state->stage;
-                       vmw_context_binding_add(ctx_node->staged_bindings,
-                                               &bi);
+                       struct vmw_ctx_bindinfo_tex binding;
+
+                       binding.bi.ctx = ctx_node->res;
+                       binding.bi.res = res_node ? res_node->res : NULL;
+                       binding.bi.bt = vmw_ctx_binding_tex;
+                       binding.texture_stage = cur_state->stage;
+                       vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+                                       0, binding.texture_stage);
                }
        }
 
@@ -1408,6 +1636,47 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
        return ret;
 }
 
+
+/**
+ * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
+ * switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @val_node: The validation node representing the resource.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
+ */
+static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
+                                    struct vmw_sw_context *sw_context,
+                                    struct vmw_resource_val_node *val_node,
+                                    uint32_t *buf_id,
+                                    unsigned long backup_offset)
+{
+       struct vmw_dma_buffer *dma_buf;
+       int ret;
+
+       ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+       if (ret)
+               return ret;
+
+       val_node->switching_backup = true;
+       if (val_node->first_usage)
+               val_node->no_buffer_needed = true;
+
+       vmw_dmabuf_unreference(&val_node->new_backup);
+       val_node->new_backup = dma_buf;
+       val_node->new_backup_offset = backup_offset;
+
+       return 0;
+}
+
+
 /**
  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
  *
@@ -1421,7 +1690,8 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
  * @backup_offset: Offset of backup into MOB.
  *
  * This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving.
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
  */
 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
                                 struct vmw_sw_context *sw_context,
@@ -1432,27 +1702,16 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
                                 uint32_t *buf_id,
                                 unsigned long backup_offset)
 {
-       int ret;
-       struct vmw_dma_buffer *dma_buf;
        struct vmw_resource_val_node *val_node;
+       int ret;
 
        ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
                                converter, res_id, &val_node);
-       if (unlikely(ret != 0))
-               return ret;
-
-       ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
-       if (unlikely(ret != 0))
+       if (ret)
                return ret;
 
-       if (val_node->first_usage)
-               val_node->no_buffer_needed = true;
-
-       vmw_dmabuf_unreference(&val_node->new_backup);
-       val_node->new_backup = dma_buf;
-       val_node->new_backup_offset = backup_offset;
-
-       return 0;
+       return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
+                                        buf_id, backup_offset);
 }
 
 /**
@@ -1704,10 +1963,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
        if (unlikely(!dev_priv->has_mob))
                return 0;
 
-       ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
-                                      cmd->body.shid,
-                                      cmd->body.type,
-                                      &sw_context->staged_cmd_res);
+       ret = vmw_shader_remove(vmw_context_res_man(val->res),
+                               cmd->body.shid,
+                               cmd->body.type,
+                               &sw_context->staged_cmd_res);
        if (unlikely(ret != 0))
                return ret;
 
@@ -1735,13 +1994,19 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
                SVGA3dCmdSetShader body;
        } *cmd;
        struct vmw_resource_val_node *ctx_node, *res_node = NULL;
-       struct vmw_ctx_bindinfo bi;
+       struct vmw_ctx_bindinfo_shader binding;
        struct vmw_resource *res = NULL;
        int ret;
 
        cmd = container_of(header, struct vmw_set_shader_cmd,
                           header);
 
+       if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
+               DRM_ERROR("Illegal shader type %u.\n",
+                         (unsigned) cmd->body.type);
+               return -EINVAL;
+       }
+
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
                                user_context_converter, &cmd->body.cid,
                                &ctx_node);
@@ -1752,14 +2017,12 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
                return 0;
 
        if (cmd->body.shid != SVGA3D_INVALID_ID) {
-               res = vmw_compat_shader_lookup
-                       (vmw_context_res_man(ctx_node->res),
-                        cmd->body.shid,
-                        cmd->body.type);
+               res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+                                       cmd->body.shid,
+                                       cmd->body.type);
 
                if (!IS_ERR(res)) {
                        ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
-                                                   vmw_res_shader,
                                                    &cmd->body.shid, res,
                                                    &res_node);
                        vmw_resource_unreference(&res);
@@ -1777,11 +2040,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
                        return ret;
        }
 
-       bi.ctx = ctx_node->res;
-       bi.res = res_node ? res_node->res : NULL;
-       bi.bt = vmw_ctx_binding_shader;
-       bi.i1.shader_type = cmd->body.type;
-       return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+       binding.bi.ctx = ctx_node->res;
+       binding.bi.res = res_node ? res_node->res : NULL;
+       binding.bi.bt = vmw_ctx_binding_shader;
+       binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+       vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+                       binding.shader_slot, 0);
+       return 0;
 }
 
 /**
@@ -1843,78 +2108,705 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
                                     cmd->body.offsetInBytes);
 }
 
-static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
-                               struct vmw_sw_context *sw_context,
-                               void *buf, uint32_t *size)
+/**
+ * vmw_cmd_dx_set_single_constant_buffer - Validate an
+ * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int
+vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
+                                     struct vmw_sw_context *sw_context,
+                                     SVGA3dCmdHeader *header)
 {
-       uint32_t size_remaining = *size;
-       uint32_t cmd_id;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetSingleConstantBuffer body;
+       } *cmd;
+       struct vmw_resource_val_node *res_node = NULL;
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_bindinfo_cb binding;
+       int ret;
 
-       cmd_id = ((uint32_t *)buf)[0];
-       switch (cmd_id) {
-       case SVGA_CMD_UPDATE:
-               *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
-               break;
-       case SVGA_CMD_DEFINE_GMRFB:
-               *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
-               break;
-       case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
-               *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
-               break;
-       case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
-               *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
-               break;
-       default:
-               DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
+       if (unlikely(ctx_node == NULL)) {
+               DRM_ERROR("DX Context not set.\n");
                return -EINVAL;
        }
 
-       if (*size > size_remaining) {
-               DRM_ERROR("Invalid SVGA command (size mismatch):"
-                         " %u.\n", cmd_id);
-               return -EINVAL;
-       }
+       cmd = container_of(header, typeof(*cmd), header);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter,
+                               &cmd->body.sid, &res_node);
+       if (unlikely(ret != 0))
+               return ret;
 
-       if (unlikely(!sw_context->kernel)) {
-               DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
-               return -EPERM;
+       binding.bi.ctx = ctx_node->res;
+       binding.bi.res = res_node ? res_node->res : NULL;
+       binding.bi.bt = vmw_ctx_binding_cb;
+       binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+       binding.offset = cmd->body.offsetInBytes;
+       binding.size = cmd->body.sizeInBytes;
+       binding.slot = cmd->body.slot;
+
+       if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
+           binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+               DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
+                         (unsigned) cmd->body.type,
+                         (unsigned) binding.slot);
+               return -EINVAL;
        }
 
-       if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
-               return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
+       vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+                       binding.shader_slot, binding.slot);
 
        return 0;
 }
 
-static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
-       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
-                   false, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
-                   false, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
-                   true, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
-                   true, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
-                   true, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
-                   false, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
-                   false, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
-                   true, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
-                   true, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
-                   true, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
-                   &vmw_cmd_set_render_target_check, true, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
-                   true, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
-                   true, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
-                   true, false, false),
+/**
+ * vmw_cmd_dx_set_shader_res - Validate an
+ * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
+                                    struct vmw_sw_context *sw_context,
+                                    SVGA3dCmdHeader *header)
+{
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetShaderResources body;
+       } *cmd = container_of(header, typeof(*cmd), header);
+       u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
+               sizeof(SVGA3dShaderResourceViewId);
+
+       if ((u64) cmd->body.startView + (u64) num_sr_view >
+           (u64) SVGA3D_DX_MAX_SRVIEWS ||
+           cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+               DRM_ERROR("Invalid shader binding.\n");
+               return -EINVAL;
+       }
+
+       return vmw_view_bindings_add(sw_context, vmw_view_sr,
+                                    vmw_ctx_binding_sr,
+                                    cmd->body.type - SVGA3D_SHADERTYPE_MIN,
+                                    (void *) &cmd[1], num_sr_view,
+                                    cmd->body.startView);
+}
+
+/**
+ * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
+                                struct vmw_sw_context *sw_context,
+                                SVGA3dCmdHeader *header)
+{
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetShader body;
+       } *cmd;
+       struct vmw_resource *res = NULL;
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_bindinfo_shader binding;
+       int ret = 0;
+
+       if (unlikely(ctx_node == NULL)) {
+               DRM_ERROR("DX Context not set.\n");
+               return -EINVAL;
+       }
+
+       cmd = container_of(header, typeof(*cmd), header);
+
+       if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+               DRM_ERROR("Illegal shader type %u.\n",
+                         (unsigned) cmd->body.type);
+               return -EINVAL;
+       }
+
+       if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
+               res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
+               if (IS_ERR(res)) {
+                       DRM_ERROR("Could not find shader for binding.\n");
+                       return PTR_ERR(res);
+               }
+
+               ret = vmw_resource_val_add(sw_context, res, NULL);
+               if (ret)
+                       goto out_unref;
+       }
+
+       binding.bi.ctx = ctx_node->res;
+       binding.bi.res = res;
+       binding.bi.bt = vmw_ctx_binding_dx_shader;
+       binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+
+       vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+                       binding.shader_slot, 0);
+out_unref:
+       if (res)
+               vmw_resource_unreference(&res);
+
+       return ret;
+}
+
+/**
+ * vmw_cmd_dx_set_vertex_buffers - Validates an
+ * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
+                                        struct vmw_sw_context *sw_context,
+                                        SVGA3dCmdHeader *header)
+{
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_bindinfo_vb binding;
+       struct vmw_resource_val_node *res_node;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetVertexBuffers body;
+               SVGA3dVertexBuffer buf[];
+       } *cmd;
+       int i, ret, num;
+
+       if (unlikely(ctx_node == NULL)) {
+               DRM_ERROR("DX Context not set.\n");
+               return -EINVAL;
+       }
+
+       cmd = container_of(header, typeof(*cmd), header);
+       num = (cmd->header.size - sizeof(cmd->body)) /
+               sizeof(SVGA3dVertexBuffer);
+       if ((u64)num + (u64)cmd->body.startBuffer >
+           (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
+               DRM_ERROR("Invalid number of vertex buffers.\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < num; i++) {
+               ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                       user_surface_converter,
+                                       &cmd->buf[i].sid, &res_node);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               binding.bi.ctx = ctx_node->res;
+               binding.bi.bt = vmw_ctx_binding_vb;
+               binding.bi.res = ((res_node) ? res_node->res : NULL);
+               binding.offset = cmd->buf[i].offset;
+               binding.stride = cmd->buf[i].stride;
+               binding.slot = i + cmd->body.startBuffer;
+
+               vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+                               0, binding.slot);
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
+ * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
+                                      struct vmw_sw_context *sw_context,
+                                      SVGA3dCmdHeader *header)
+{
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_bindinfo_ib binding;
+       struct vmw_resource_val_node *res_node;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetIndexBuffer body;
+       } *cmd;
+       int ret;
+
+       if (unlikely(ctx_node == NULL)) {
+               DRM_ERROR("DX Context not set.\n");
+               return -EINVAL;
+       }
+
+       cmd = container_of(header, typeof(*cmd), header);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter,
+                               &cmd->body.sid, &res_node);
+       if (unlikely(ret != 0))
+               return ret;
+
+       binding.bi.ctx = ctx_node->res;
+       binding.bi.res = ((res_node) ? res_node->res : NULL);
+       binding.bi.bt = vmw_ctx_binding_ib;
+       binding.offset = cmd->body.offset;
+       binding.format = cmd->body.format;
+
+       vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
+
+       return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_rendertarget - Validate an
+ * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
+                                       struct vmw_sw_context *sw_context,
+                                       SVGA3dCmdHeader *header)
+{
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXSetRenderTargets body;
+       } *cmd = container_of(header, typeof(*cmd), header);
+       int ret;
+       u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
+               sizeof(SVGA3dRenderTargetViewId);
+
+       if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
+               DRM_ERROR("Invalid DX Rendertarget binding.\n");
+               return -EINVAL;
+       }
+
+       ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
+                                   vmw_ctx_binding_ds, 0,
+                                   &cmd->body.depthStencilViewId, 1, 0);
+       if (ret)
+               return ret;
+
+       return vmw_view_bindings_add(sw_context, vmw_view_rt,
+                                    vmw_ctx_binding_dx_rt, 0,
+                                    (void *)&cmd[1], num_rt_view, 0);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
+                                             struct vmw_sw_context *sw_context,
+                                             SVGA3dCmdHeader *header)
+{
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXClearRenderTargetView body;
+       } *cmd = container_of(header, typeof(*cmd), header);
+
+       return vmw_view_id_val_add(sw_context, vmw_view_rt,
+                                  cmd->body.renderTargetViewId);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
+                                             struct vmw_sw_context *sw_context,
+                                             SVGA3dCmdHeader *header)
+{
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXClearDepthStencilView body;
+       } *cmd = container_of(header, typeof(*cmd), header);
+
+       return vmw_view_id_val_add(sw_context, vmw_view_ds,
+                                  cmd->body.depthStencilViewId);
+}
+
+static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
+                                 struct vmw_sw_context *sw_context,
+                                 SVGA3dCmdHeader *header)
+{
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_resource_val_node *srf_node;
+       struct vmw_resource *res;
+       enum vmw_view_type view_type;
+       int ret;
+       /*
+        * This is based on the fact that all affected define commands have
+        * the same initial command body layout.
+        */
+       struct {
+               SVGA3dCmdHeader header;
+               uint32 defined_id;
+               uint32 sid;
+       } *cmd;
+
+       if (unlikely(ctx_node == NULL)) {
+               DRM_ERROR("DX Context not set.\n");
+               return -EINVAL;
+       }
+
+       view_type = vmw_view_cmd_to_type(header->id);
+       cmd = container_of(header, typeof(*cmd), header);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter,
+                               &cmd->sid, &srf_node);
+       if (unlikely(ret != 0))
+               return ret;
+
+       res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
+       ret = vmw_cotable_notify(res, cmd->defined_id);
+       vmw_resource_unreference(&res);
+       if (unlikely(ret != 0))
+               return ret;
+
+       return vmw_view_add(sw_context->man,
+                           ctx_node->res,
+                           srf_node->res,
+                           view_type,
+                           cmd->defined_id,
+                           header,
+                           header->size + sizeof(*header),
+                           &sw_context->staged_cmd_res);
+}
+
+static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
+                               struct vmw_sw_context *sw_context,
+                               SVGA3dCmdHeader *header)
+{
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_resource *res;
+       /*
+        * This is based on the fact that all affected define commands have
+        * the same initial command body layout.
+        */
+       struct {
+               SVGA3dCmdHeader header;
+               uint32 defined_id;
+       } *cmd;
+       enum vmw_so_type so_type;
+       int ret;
+
+       if (unlikely(ctx_node == NULL)) {
+               DRM_ERROR("DX Context not set.\n");
+               return -EINVAL;
+       }
+
+       so_type = vmw_so_cmd_to_type(header->id);
+       res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
+       cmd = container_of(header, typeof(*cmd), header);
+       ret = vmw_cotable_notify(res, cmd->defined_id);
+       vmw_resource_unreference(&res);
+
+       return ret;
+}
+
+/**
+ * vmw_cmd_dx_check_subresource - Validate an
+ * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
+                                       struct vmw_sw_context *sw_context,
+                                       SVGA3dCmdHeader *header)
+{
+       struct {
+               SVGA3dCmdHeader header;
+               union {
+                       SVGA3dCmdDXReadbackSubResource r_body;
+                       SVGA3dCmdDXInvalidateSubResource i_body;
+                       SVGA3dCmdDXUpdateSubResource u_body;
+                       SVGA3dSurfaceId sid;
+               };
+       } *cmd;
+
+       BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
+                    offsetof(typeof(*cmd), sid));
+       BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
+                    offsetof(typeof(*cmd), sid));
+       BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
+                    offsetof(typeof(*cmd), sid));
+
+       cmd = container_of(header, typeof(*cmd), header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->sid, NULL);
+}
+
+static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
+                               struct vmw_sw_context *sw_context,
+                               SVGA3dCmdHeader *header)
+{
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+
+       if (unlikely(ctx_node == NULL)) {
+               DRM_ERROR("DX Context not set.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_cmd_dx_view_remove - validate a view remove command and
+ * schedule the view resource for removal.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * Check that the view exists, and if it was not created using this
+ * command batch, make sure it's validated (present in the device) so that
+ * the remove command will not confuse the device.
+ */
+static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
+                                 struct vmw_sw_context *sw_context,
+                                 SVGA3dCmdHeader *header)
+{
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct {
+               SVGA3dCmdHeader header;
+               union vmw_view_destroy body;
+       } *cmd = container_of(header, typeof(*cmd), header);
+       enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
+       struct vmw_resource *view;
+       int ret;
+
+       if (!ctx_node) {
+               DRM_ERROR("DX Context not set.\n");
+               return -EINVAL;
+       }
+
+       ret = vmw_view_remove(sw_context->man,
+                             cmd->body.view_id, view_type,
+                             &sw_context->staged_cmd_res,
+                             &view);
+       if (ret || !view)
+               return ret;
+
+       /*
+        * Add view to the validate list iff it was not created using this
+        * command batch.
+        */
+       return vmw_view_res_val_add(sw_context, view);
+}
+
+/**
+ * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
+                                   struct vmw_sw_context *sw_context,
+                                   SVGA3dCmdHeader *header)
+{
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_resource *res;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXDefineShader body;
+       } *cmd = container_of(header, typeof(*cmd), header);
+       int ret;
+
+       if (!ctx_node) {
+               DRM_ERROR("DX Context not set.\n");
+               return -EINVAL;
+       }
+
+       res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
+       ret = vmw_cotable_notify(res, cmd->body.shaderId);
+       vmw_resource_unreference(&res);
+       if (ret)
+               return ret;
+
+       return vmw_dx_shader_add(sw_context->man, ctx_node->res,
+                                cmd->body.shaderId, cmd->body.type,
+                                &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
+                                    struct vmw_sw_context *sw_context,
+                                    SVGA3dCmdHeader *header)
+{
+       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXDestroyShader body;
+       } *cmd = container_of(header, typeof(*cmd), header);
+       int ret;
+
+       if (!ctx_node) {
+               DRM_ERROR("DX Context not set.\n");
+               return -EINVAL;
+       }
+
+       ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
+                               &sw_context->staged_cmd_res);
+       if (ret)
+               DRM_ERROR("Could not find shader to remove.\n");
+
+       return ret;
+}
+
+/**
+ * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
+                                 struct vmw_sw_context *sw_context,
+                                 SVGA3dCmdHeader *header)
+{
+       struct vmw_resource_val_node *ctx_node;
+       struct vmw_resource_val_node *res_node;
+       struct vmw_resource *res;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXBindShader body;
+       } *cmd = container_of(header, typeof(*cmd), header);
+       int ret;
+
+       if (cmd->body.cid != SVGA3D_INVALID_ID) {
+               ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                                       user_context_converter,
+                                       &cmd->body.cid, &ctx_node);
+               if (ret)
+                       return ret;
+       } else {
+               ctx_node = sw_context->dx_ctx_node;
+               if (!ctx_node) {
+                       DRM_ERROR("DX Context not set.\n");
+                       return -EINVAL;
+               }
+       }
+
+       res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+                               cmd->body.shid, 0);
+       if (IS_ERR(res)) {
+               DRM_ERROR("Could not find shader to bind.\n");
+               return PTR_ERR(res);
+       }
+
+       ret = vmw_resource_val_add(sw_context, res, &res_node);
+       if (ret) {
+               DRM_ERROR("Error creating resource validation node.\n");
+               goto out_unref;
+       }
+
+
+       ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
+                                       &cmd->body.mobid,
+                                       cmd->body.offsetInBytes);
+out_unref:
+       vmw_resource_unreference(&res);
+
+       return ret;
+}
+
+static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
+                               struct vmw_sw_context *sw_context,
+                               void *buf, uint32_t *size)
+{
+       uint32_t size_remaining = *size;
+       uint32_t cmd_id;
+
+       cmd_id = ((uint32_t *)buf)[0];
+       switch (cmd_id) {
+       case SVGA_CMD_UPDATE:
+               *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
+               break;
+       case SVGA_CMD_DEFINE_GMRFB:
+               *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
+               break;
+       case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
+               *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+               break;
+       case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
+               *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+               break;
+       default:
+               DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
+               return -EINVAL;
+       }
+
+       if (*size > size_remaining) {
+               DRM_ERROR("Invalid SVGA command (size mismatch):"
+                         " %u.\n", cmd_id);
+               return -EINVAL;
+       }
+
+       if (unlikely(!sw_context->kernel)) {
+               DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
+               return -EPERM;
+       }
+
+       if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
+               return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
+
+       return 0;
+}
+
+static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
+                   &vmw_cmd_set_render_target_check, true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
+                   true, false, false),
        VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
                    true, false, false),
        VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
@@ -2050,7 +2942,136 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
        VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
                    false, false, true),
        VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
-                   true, false, true)
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
+                   false, false, true),
+
+       /*
+        * DX commands
+        */
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
+                   &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
+                   &vmw_cmd_dx_set_shader_res, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
+                   &vmw_cmd_dx_set_vertex_buffers, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
+                   &vmw_cmd_dx_set_index_buffer, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
+                   &vmw_cmd_dx_set_rendertargets, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, &vmw_cmd_dx_cid_check,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
+                   &vmw_cmd_dx_cid_check,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
+                   &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
+                   &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
+                   &vmw_cmd_dx_check_subresource, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
+                   &vmw_cmd_dx_check_subresource, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
+                   &vmw_cmd_dx_check_subresource, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
+                   &vmw_cmd_dx_view_define, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+                   &vmw_cmd_dx_view_remove, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
+                   &vmw_cmd_dx_view_define, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+                   &vmw_cmd_dx_view_remove, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
+                   &vmw_cmd_dx_view_define, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+                   &vmw_cmd_dx_view_remove, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
+                   &vmw_cmd_dx_so_define, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
+                   &vmw_cmd_dx_cid_check, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
+                   &vmw_cmd_dx_so_define, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
+                   &vmw_cmd_dx_cid_check, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
+                   &vmw_cmd_dx_so_define, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
+                   &vmw_cmd_dx_cid_check, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
+                   &vmw_cmd_dx_so_define, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
+                   &vmw_cmd_dx_cid_check, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
+                   &vmw_cmd_dx_so_define, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
+                   &vmw_cmd_dx_cid_check, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
+                   &vmw_cmd_dx_define_shader, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
+                   &vmw_cmd_dx_destroy_shader, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
+                   &vmw_cmd_dx_bind_shader, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
+                   &vmw_cmd_dx_so_define, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
+                   &vmw_cmd_dx_cid_check, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_invalid,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
+                   &vmw_cmd_dx_cid_check, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
+                   &vmw_cmd_dx_cid_check, true, false, true),
 };
 
 static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -2183,7 +3204,8 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
  *
  * @list: The resource list.
  */
-static void vmw_resource_list_unreference(struct list_head *list)
+static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
+                                         struct list_head *list)
 {
        struct vmw_resource_val_node *val, *val_next;
 
@@ -2194,8 +3216,15 @@ static void vmw_resource_list_unreference(struct list_head *list)
        list_for_each_entry_safe(val, val_next, list, head) {
                list_del_init(&val->head);
                vmw_resource_unreference(&val->res);
-               if (unlikely(val->staged_bindings))
-                       kfree(val->staged_bindings);
+
+               if (val->staged_bindings) {
+                       if (val->staged_bindings != sw_context->staged_bindings)
+                               vmw_binding_state_free(val->staged_bindings);
+                       else
+                               sw_context->staged_bindings_inuse = false;
+                       val->staged_bindings = NULL;
+               }
+
                kfree(val);
        }
 }
@@ -2431,8 +3460,13 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
                                   u32 command_size,
                                   struct vmw_sw_context *sw_context)
 {
-       void *cmd = vmw_fifo_reserve(dev_priv, command_size);
+       void *cmd;
 
+       if (sw_context->dx_ctx_node)
+               cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
+                                         sw_context->dx_ctx_node->res->id);
+       else
+               cmd = vmw_fifo_reserve(dev_priv, command_size);
        if (!cmd) {
                DRM_ERROR("Failed reserving fifo space for commands.\n");
                return -ENOMEM;
@@ -2464,8 +3498,10 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
                                     u32 command_size,
                                     struct vmw_sw_context *sw_context)
 {
+       u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
+                 SVGA3D_INVALID_ID);
        void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
-                                      SVGA3D_INVALID_ID, false, header);
+                                      id, false, header);
 
        vmw_apply_relocations(sw_context);
        vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
@@ -2535,12 +3571,44 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
        return kernel_commands;
 }
 
+static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
+                                  struct vmw_sw_context *sw_context,
+                                  uint32_t handle)
+{
+       struct vmw_resource_val_node *ctx_node;
+       struct vmw_resource *res;
+       int ret;
+
+       if (handle == SVGA3D_INVALID_ID)
+               return 0;
+
+       ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
+                                             handle, user_context_converter,
+                                             &res);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Could not find or user DX context 0x%08x.\n",
+                         (unsigned) handle);
+               return ret;
+       }
+
+       ret = vmw_resource_val_add(sw_context, res, &ctx_node);
+       if (unlikely(ret != 0))
+               goto out_err;
+
+       sw_context->dx_ctx_node = ctx_node;
+       sw_context->man = vmw_context_res_man(res);
+out_err:
+       vmw_resource_unreference(&res);
+       return ret;
+}
+
 int vmw_execbuf_process(struct drm_file *file_priv,
                        struct vmw_private *dev_priv,
                        void __user *user_commands,
                        void *kernel_commands,
                        uint32_t command_size,
                        uint64_t throttle_us,
+                       uint32_t dx_context_handle,
                        struct drm_vmw_fence_rep __user *user_fence_rep,
                        struct vmw_fence_obj **out_fence)
 {
@@ -2596,12 +3664,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        sw_context->cur_reloc = 0;
        sw_context->cur_val_buf = 0;
        INIT_LIST_HEAD(&sw_context->resource_list);
+       INIT_LIST_HEAD(&sw_context->ctx_resource_list);
        sw_context->cur_query_bo = dev_priv->pinned_bo;
        sw_context->last_query_ctx = NULL;
        sw_context->needs_post_query_barrier = false;
+       sw_context->dx_ctx_node = NULL;
        memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
        INIT_LIST_HEAD(&sw_context->validate_nodes);
        INIT_LIST_HEAD(&sw_context->res_relocations);
+       if (sw_context->staged_bindings)
+               vmw_binding_state_reset(sw_context->staged_bindings);
+
        if (!sw_context->res_ht_initialized) {
                ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
                if (unlikely(ret != 0))
@@ -2610,11 +3683,20 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        }
        INIT_LIST_HEAD(&sw_context->staged_cmd_res);
        INIT_LIST_HEAD(&resource_list);
+       ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
+       if (unlikely(ret != 0)) {
+               list_splice_init(&sw_context->ctx_resource_list,
+                                &sw_context->resource_list);
+               goto out_err_nores;
+       }
+
        ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
                                command_size);
        if (unlikely(ret != 0))
                goto out_err_nores;
 
+       list_splice_init(&sw_context->ctx_resource_list,
+                        &sw_context->resource_list);
        ret = vmw_resources_reserve(sw_context);
        if (unlikely(ret != 0))
                goto out_err_nores;
@@ -2622,7 +3704,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
                                     true, NULL);
        if (unlikely(ret != 0))
-               goto out_err;
+               goto out_err_nores;
 
        ret = vmw_validate_buffers(dev_priv, sw_context);
        if (unlikely(ret != 0))
@@ -2652,8 +3734,9 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                                                sw_context);
                header = NULL;
        }
+       mutex_unlock(&dev_priv->binding_mutex);
        if (ret)
-               goto out_unlock_binding;
+               goto out_err;
 
        vmw_query_bo_switch_commit(dev_priv, sw_context);
        ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2668,8 +3751,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        if (ret != 0)
                DRM_ERROR("Fence submission error. Syncing.\n");
 
-       vmw_resource_list_unreserve(&sw_context->resource_list, false);
-       mutex_unlock(&dev_priv->binding_mutex);
+       vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
+                                   false);
 
        ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
                                    (void *) fence);
@@ -2698,7 +3781,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
         * Unreference resources outside of the cmdbuf_mutex to
         * avoid deadlocks in resource destruction paths.
         */
-       vmw_resource_list_unreference(&resource_list);
+       vmw_resource_list_unreference(sw_context, &resource_list);
 
        return 0;
 
@@ -2707,7 +3790,8 @@ out_unlock_binding:
 out_err:
        ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
 out_err_nores:
-       vmw_resource_list_unreserve(&sw_context->resource_list, true);
+       vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
+                                   true);
        vmw_resource_relocations_free(&sw_context->res_relocations);
        vmw_free_relocations(sw_context);
        vmw_clear_validations(sw_context);
@@ -2725,7 +3809,7 @@ out_unlock:
         * Unreference resources outside of the cmdbuf_mutex to
         * avoid deadlocks in resource destruction paths.
         */
-       vmw_resource_list_unreference(&resource_list);
+       vmw_resource_list_unreference(sw_context, &resource_list);
        if (unlikely(error_resource != NULL))
                vmw_resource_unreference(&error_resource);
 out_free_header:
@@ -2877,36 +3961,68 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
 
-
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv)
+int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+                     struct drm_file *file_priv, size_t size)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
-       struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+       struct drm_vmw_execbuf_arg arg;
        int ret;
+       static const size_t copy_offset[] = {
+               offsetof(struct drm_vmw_execbuf_arg, context_handle),
+               sizeof(struct drm_vmw_execbuf_arg)};
+
+       if (unlikely(size < copy_offset[0])) {
+               DRM_ERROR("Invalid command size, ioctl %d\n",
+                         DRM_VMW_EXECBUF);
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
+               return -EFAULT;
 
        /*
-        * This will allow us to extend the ioctl argument while
+        * Extend the ioctl argument while
         * maintaining backwards compatibility:
         * We take different code paths depending on the value of
-        * arg->version.
+        * arg.version.
         */
 
-       if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
+       if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
+                    arg.version == 0)) {
                DRM_ERROR("Incorrect execbuf version.\n");
-               DRM_ERROR("You're running outdated experimental "
-                         "vmwgfx user-space drivers.");
                return -EINVAL;
        }
 
+       if (arg.version > 1 &&
+           copy_from_user(&arg.context_handle,
+                          (void __user *) (data + copy_offset[0]),
+                          copy_offset[arg.version - 1] -
+                          copy_offset[0]) != 0)
+               return -EFAULT;
+
+       switch (arg.version) {
+       case 1:
+               arg.context_handle = (uint32_t) -1;
+               break;
+       case 2:
+               if (arg.pad64 != 0) {
+                       DRM_ERROR("Unused IOCTL data not set to zero.\n");
+                       return -EINVAL;
+               }
+               break;
+       default:
+               break;
+       }
+
        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
        if (unlikely(ret != 0))
                return ret;
 
        ret = vmw_execbuf_process(file_priv, dev_priv,
-                                 (void __user *)(unsigned long)arg->commands,
-                                 NULL, arg->command_size, arg->throttle_us,
-                                 (void __user *)(unsigned long)arg->fence_rep,
+                                 (void __user *)(unsigned long)arg.commands,
+                                 NULL, arg.command_size, arg.throttle_us,
+                                 arg.context_handle,
+                                 (void __user *)(unsigned long)arg.fence_rep,
                                  NULL);
        ttm_read_unlock(&dev_priv->reservation_sem);
        if (unlikely(ret != 0))
index cb24936a18c1d6bf333156e618219703d2069bb1..3c876d4826c0f792e52543d9b26dc5d13e8a20ac 100644 (file)
 #include <drm/drmP.h>
 #include <drm/ttm/ttm_placement.h>
 
+struct vmw_temp_set_context {
+       SVGA3dCmdHeader header;
+       SVGA3dCmdDXTempSetContext body;
+};
+
 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 {
        u32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -99,6 +104,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        uint32_t max;
        uint32_t min;
 
+       fifo->dx = false;
        fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
        fifo->static_buffer = vmalloc(fifo->static_buffer_size);
        if (unlikely(fifo->static_buffer == NULL))
@@ -396,15 +402,20 @@ out_err:
        return NULL;
 }
 
-void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+                         int ctx_id)
 {
        void *ret;
 
        if (dev_priv->cman)
                ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
-                                        SVGA3D_INVALID_ID, false, NULL);
-       else
+                                        ctx_id, false, NULL);
+       else if (ctx_id == SVGA3D_INVALID_ID)
                ret = vmw_local_fifo_reserve(dev_priv, bytes);
+       else {
+               WARN_ON("Command buffer has not been allocated.\n");
+               ret = NULL;
+       }
        if (IS_ERR_OR_NULL(ret)) {
                DRM_ERROR("Fifo reserve failure of %u bytes.\n",
                          (unsigned) bytes);
@@ -466,6 +477,10 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
        uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
        bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 
+       if (fifo_state->dx)
+               bytes += sizeof(struct vmw_temp_set_context);
+
+       fifo_state->dx = false;
        BUG_ON((bytes & 3) != 0);
        BUG_ON(bytes > fifo_state->reserved_size);
 
@@ -518,7 +533,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
  * @dev_priv: Pointer to device private structure.
  * @bytes: Number of bytes to commit.
  */
-static void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
 {
        if (dev_priv->cman)
                vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
@@ -706,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
 
        return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
 }
+
+void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+{
+       return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
+}
index 91efe9cdd822e95896202a27d237d253c0218589..dca7f7f41aab2b34779eb7d8de4023dec1acfdd8 100644 (file)
@@ -110,6 +110,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
                param->value =
                        (dev_priv->active_display_unit == vmw_du_screen_target);
                break;
+       case DRM_VMW_PARAM_DX:
+               param->value = dev_priv->has_dx;
+               break;
        default:
                DRM_ERROR("Illegal vmwgfx get param request: %d\n",
                          param->param);
@@ -193,8 +196,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
                uint32_t *bounce32 = (uint32_t *) bounce;
 
                num = size / sizeof(uint32_t);
-               if (num > SVGA3D_DEVCAP_MAX)
-                       num = SVGA3D_DEVCAP_MAX;
+               if (num > SVGA3D_DEVCAP_DX)
+                       num = SVGA3D_DEVCAP_DX;
 
                spin_lock(&dev_priv->cap_lock);
                for (i = 0; i < num; ++i) {
index 34d04bf17dfaa4de973cd533e9aada265a738bdc..f961bb98cdaab9d1654f59ad21396f2578c4a9ad 100644 (file)
@@ -528,7 +528,11 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
                return -EINVAL;
        }
 
-       if (unlikely(format != surface->format)) {
+       /*
+        * For DX, surface format validation is done when surface->scanout
+        * is set.
+        */
+       if (!dev_priv->has_dx && format != surface->format) {
                DRM_ERROR("Invalid surface format for requested mode.\n");
                return -EINVAL;
        }
@@ -754,6 +758,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
                        true, /* can be a scanout buffer */
                        1, /* num of mip levels */
                        0,
+                       0,
                        content_base_size,
                        srf_out);
        if (ret) {
@@ -769,7 +774,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
        vmw_dmabuf_unreference(&res->backup);
        res->backup = vmw_dmabuf_reference(dmabuf_mob);
        res->backup_offset = 0;
-       vmw_resource_unreserve(res, NULL, 0);
+       vmw_resource_unreserve(res, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 
        return 0;
@@ -1869,7 +1874,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
 void vmw_kms_helper_resource_revert(struct vmw_resource *res)
 {
        vmw_kms_helper_buffer_revert(res->backup);
-       vmw_resource_unreserve(res, NULL, 0);
+       vmw_resource_unreserve(res, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 }
 
@@ -1916,7 +1921,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
 out_revert:
        vmw_kms_helper_buffer_revert(res->backup);
 out_unreserve:
-       vmw_resource_unreserve(res, NULL, 0);
+       vmw_resource_unreserve(res, false, NULL, 0);
 out_unlock:
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
        return ret;
@@ -1937,7 +1942,7 @@ void vmw_kms_helper_resource_finish(struct vmw_resource *res,
                vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
                                             out_fence, NULL);
 
-       vmw_resource_unreserve(res, NULL, 0);
+       vmw_resource_unreserve(res, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 }
 
index 5b0287eba30d505fbb4f7b1dfd57af76d923bdd9..a8203a9e1050d0b41a50cdc3ea0867ad96e44e65 100644 (file)
@@ -67,9 +67,23 @@ struct vmw_mob {
  * @size:           Size of the table (page-aligned).
  * @page_table:     Pointer to a struct vmw_mob holding the page table.
  */
-struct vmw_otable {
-       unsigned long size;
-       struct vmw_mob *page_table;
+static const struct vmw_otable pre_dx_tables[] = {
+       {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+       {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+       {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+       {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+       {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+        NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
+};
+
+static const struct vmw_otable dx_tables[] = {
+       {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+       {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+       {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+       {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+       {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+        NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
+       {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
 };
 
 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
@@ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
  */
 static int vmw_setup_otable_base(struct vmw_private *dev_priv,
                                 SVGAOTableType type,
+                                struct ttm_buffer_object *otable_bo,
                                 unsigned long offset,
                                 struct vmw_otable *otable)
 {
@@ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
 
        BUG_ON(otable->page_table != NULL);
 
-       vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
+       vsgt = vmw_bo_sg_table(otable_bo);
        vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
        WARN_ON(!vmw_piter_next(&iter));
 
@@ -193,7 +208,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
                          "takedown.\n");
                return;
        }
+
        memset(cmd, 0, sizeof(*cmd));
        cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
        cmd->header.size = sizeof(cmd->body);
@@ -218,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
        otable->page_table = NULL;
 }
 
-/*
- * vmw_otables_setup - Set up guest backed memory object tables
- *
- * @dev_priv:       Pointer to a device private structure
- *
- * Takes care of the device guest backed surface
- * initialization, by setting up the guest backed memory object tables.
- * Returns 0 on success and various error codes on failure. A succesful return
- * means the object tables can be taken down using the vmw_otables_takedown
- * function.
- */
-int vmw_otables_setup(struct vmw_private *dev_priv)
+
+static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
+                                 struct vmw_otable_batch *batch)
 {
        unsigned long offset;
        unsigned long bo_size;
-       struct vmw_otable *otables;
+       struct vmw_otable *otables = batch->otables;
        SVGAOTableType i;
        int ret;
 
-       otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
-                         GFP_KERNEL);
-       if (unlikely(otables == NULL)) {
-               DRM_ERROR("Failed to allocate space for otable "
-                         "metadata.\n");
-               return -ENOMEM;
-       }
-
-       otables[SVGA_OTABLE_MOB].size =
-               VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
-       otables[SVGA_OTABLE_SURFACE].size =
-               VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
-       otables[SVGA_OTABLE_CONTEXT].size =
-               VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
-       otables[SVGA_OTABLE_SHADER].size =
-               VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
-       otables[SVGA_OTABLE_SCREENTARGET].size =
-               VMWGFX_NUM_GB_SCREEN_TARGET *
-               SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
-
        bo_size = 0;
-       for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
+       for (i = 0; i < batch->num_otables; ++i) {
+               if (!otables[i].enabled)
+                       continue;
+
                otables[i].size =
                        (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
                bo_size += otables[i].size;
@@ -268,63 +257,105 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
                            ttm_bo_type_device,
                            &vmw_sys_ne_placement,
                            0, false, NULL,
-                           &dev_priv->otable_bo);
+                           &batch->otable_bo);
 
        if (unlikely(ret != 0))
                goto out_no_bo;
 
-       ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
+       ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
        BUG_ON(ret != 0);
-       ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
+       ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
        if (unlikely(ret != 0))
                goto out_unreserve;
-       ret = vmw_bo_map_dma(dev_priv->otable_bo);
+       ret = vmw_bo_map_dma(batch->otable_bo);
        if (unlikely(ret != 0))
                goto out_unreserve;
 
-       ttm_bo_unreserve(dev_priv->otable_bo);
+       ttm_bo_unreserve(batch->otable_bo);
 
        offset = 0;
-       for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
-               ret = vmw_setup_otable_base(dev_priv, i, offset,
+       for (i = 0; i < batch->num_otables; ++i) {
+               if (!batch->otables[i].enabled)
+                       continue;
+
+               ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
+                                           offset,
                                            &otables[i]);
                if (unlikely(ret != 0))
                        goto out_no_setup;
                offset += otables[i].size;
        }
 
-       dev_priv->otables = otables;
        return 0;
 
 out_unreserve:
-       ttm_bo_unreserve(dev_priv->otable_bo);
+       ttm_bo_unreserve(batch->otable_bo);
 out_no_setup:
-       for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
-               vmw_takedown_otable_base(dev_priv, i, &otables[i]);
+       for (i = 0; i < batch->num_otables; ++i) {
+               if (batch->otables[i].enabled)
+                       vmw_takedown_otable_base(dev_priv, i,
+                                                &batch->otables[i]);
+       }
 
-       ttm_bo_unref(&dev_priv->otable_bo);
+       ttm_bo_unref(&batch->otable_bo);
 out_no_bo:
-       kfree(otables);
        return ret;
 }
 
-
 /*
- * vmw_otables_takedown - Take down guest backed memory object tables
+ * vmw_otables_setup - Set up guest backed memory object tables
  *
  * @dev_priv:       Pointer to a device private structure
  *
- * Take down the Guest Memory Object tables.
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A successful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
  */
-void vmw_otables_takedown(struct vmw_private *dev_priv)
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+       struct vmw_otable **otables = &dev_priv->otable_batch.otables;
+       int ret;
+
+       if (dev_priv->has_dx) {
+               *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
+               if (*otables == NULL)
+                       return -ENOMEM;
+
+               memcpy(*otables, dx_tables, sizeof(dx_tables));
+               dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
+       } else {
+               *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
+               if (*otables == NULL)
+                       return -ENOMEM;
+
+               memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
+               dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
+       }
+
+       ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
+       if (unlikely(ret != 0))
+               goto out_setup;
+
+       return 0;
+
+out_setup:
+       kfree(*otables);
+       return ret;
+}
+
+static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
+                              struct vmw_otable_batch *batch)
 {
        SVGAOTableType i;
-       struct ttm_buffer_object *bo = dev_priv->otable_bo;
+       struct ttm_buffer_object *bo = batch->otable_bo;
        int ret;
 
-       for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
-               vmw_takedown_otable_base(dev_priv, i,
-                                        &dev_priv->otables[i]);
+       for (i = 0; i < batch->num_otables; ++i)
+               if (batch->otables[i].enabled)
+                       vmw_takedown_otable_base(dev_priv, i,
+                                                &batch->otables[i]);
 
        ret = ttm_bo_reserve(bo, false, true, false, NULL);
        BUG_ON(ret != 0);
@@ -332,11 +363,21 @@ void vmw_otables_takedown(struct vmw_private *dev_priv)
        vmw_fence_single_bo(bo, NULL);
        ttm_bo_unreserve(bo);
 
-       ttm_bo_unref(&dev_priv->otable_bo);
-       kfree(dev_priv->otables);
-       dev_priv->otables = NULL;
+       ttm_bo_unref(&batch->otable_bo);
 }
 
+/*
+ * vmw_otables_takedown - Take down guest backed memory object tables
+ *
+ * @dev_priv:       Pointer to a device private structure
+ *
+ * Take down the Guest Memory Object tables.
+ */
+void vmw_otables_takedown(struct vmw_private *dev_priv)
+{
+       vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
+       kfree(dev_priv->otable_batch.otables);
+}
 
 /*
  * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
@@ -410,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
                goto out_unreserve;
 
        ttm_bo_unreserve(mob->pt_bo);
-       
+
        return 0;
 
 out_unreserve:
index be2809aaa7cb98141a4055c32bb88cf88e950eb6..6186e859dab07c235b0b672e2eceb86a12f262bf 100644 (file)
@@ -31,6 +31,7 @@
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drmP.h>
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 
 #define VMW_RES_EVICT_ERR_COUNT 10
 
@@ -144,10 +145,10 @@ static void vmw_resource_release(struct kref *kref)
        }
 
        if (likely(res->hw_destroy != NULL)) {
-               res->hw_destroy(res);
                mutex_lock(&dev_priv->binding_mutex);
-               vmw_context_binding_res_list_kill(&res->binding_head);
+               vmw_binding_res_list_kill(&res->binding_head);
                mutex_unlock(&dev_priv->binding_mutex);
+               res->hw_destroy(res);
        }
 
        id = res->id;
@@ -1149,14 +1150,16 @@ out_bind_failed:
  * command submission.
  *
  * @res:               Pointer to the struct vmw_resource to unreserve.
+ * @switch_backup:     Backup buffer has been switched.
  * @new_backup:        Pointer to new backup buffer if command submission
- *                     switched.
- * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *                     switched. May be NULL.
+ * @new_backup_offset: New backup offset if @switch_backup is true.
  *
  * Currently unreserving a resource means putting it back on the device's
  * resource lru list, so that it can be evicted if necessary.
  */
 void vmw_resource_unreserve(struct vmw_resource *res,
+                           bool switch_backup,
                            struct vmw_dma_buffer *new_backup,
                            unsigned long new_backup_offset)
 {
@@ -1165,19 +1168,22 @@ void vmw_resource_unreserve(struct vmw_resource *res,
        if (!list_empty(&res->lru_head))
                return;
 
-       if (new_backup && new_backup != res->backup) {
-
+       if (switch_backup && new_backup != res->backup) {
                if (res->backup) {
                        lockdep_assert_held(&res->backup->base.resv->lock.base);
                        list_del_init(&res->mob_head);
                        vmw_dmabuf_unreference(&res->backup);
                }
 
-               res->backup = vmw_dmabuf_reference(new_backup);
-               lockdep_assert_held(&new_backup->base.resv->lock.base);
-               list_add_tail(&res->mob_head, &new_backup->res_list);
+               if (new_backup) {
+                       res->backup = vmw_dmabuf_reference(new_backup);
+                       lockdep_assert_held(&new_backup->base.resv->lock.base);
+                       list_add_tail(&res->mob_head, &new_backup->res_list);
+               } else {
+                       res->backup = NULL;
+               }
        }
-       if (new_backup)
+       if (switch_backup)
                res->backup_offset = new_backup_offset;
 
        if (!res->func->may_evict || res->id == -1 || res->pin_count)
@@ -1269,8 +1275,12 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
        if (res->func->needs_backup && res->backup == NULL &&
            !no_backup) {
                ret = vmw_resource_buf_alloc(res, interruptible);
-               if (unlikely(ret != 0))
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed to allocate a backup buffer "
+                                 "of size %lu. bytes\n",
+                                 (unsigned long) res->backup_size);
                        return ret;
+               }
        }
 
        return 0;
@@ -1354,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res)
        struct ttm_validate_buffer val_buf;
        unsigned err_count = 0;
 
-       if (likely(!res->func->may_evict))
+       if (!res->func->create)
                return 0;
 
        val_buf.bo = NULL;
@@ -1624,7 +1634,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
        res->pin_count++;
 
 out_no_validate:
-       vmw_resource_unreserve(res, NULL, 0UL);
+       vmw_resource_unreserve(res, false, NULL, 0UL);
 out_no_reserve:
        mutex_unlock(&dev_priv->cmdbuf_mutex);
        ttm_write_unlock(&dev_priv->reservation_sem);
@@ -1660,8 +1670,18 @@ void vmw_resource_unpin(struct vmw_resource *res)
                ttm_bo_unreserve(&vbo->base);
        }
 
-       vmw_resource_unreserve(res, NULL, 0UL);
+       vmw_resource_unreserve(res, false, NULL, 0UL);
 
        mutex_unlock(&dev_priv->cmdbuf_mutex);
        ttm_read_unlock(&dev_priv->reservation_sem);
 }
+
+/**
+ * vmw_res_type - Return the resource type
+ *
+ * @res: Pointer to the resource
+ */
+enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
+{
+       return res->func->res_type;
+}
index f3adeed2854c78ff487646cd5641484e8b103a1d..743e2adafed2c2923d1f5647074f788b83059b9a 100644 (file)
 
 #include "vmwgfx_drv.h"
 
+enum vmw_cmdbuf_res_state {
+       VMW_CMDBUF_RES_COMMITTED,
+       VMW_CMDBUF_RES_ADD,
+       VMW_CMDBUF_RES_DEL
+};
+
 /**
  * struct vmw_user_resource_conv - Identify a derived user-exported resource
  * type and provide a function to convert its ttm_base_object pointer to
@@ -55,8 +61,10 @@ struct vmw_user_resource_conv {
  * @bind:              Bind a hardware resource to persistent buffer storage.
  * @unbind:            Unbind a hardware resource from persistent
  *                     buffer storage.
+ * @commit_notify:     If the resource is a command buffer managed resource,
+ *                     callback to notify that a define or remove command
+ *                     has been committed to the device.
  */
-
 struct vmw_res_func {
        enum vmw_res_type res_type;
        bool needs_backup;
@@ -71,6 +79,8 @@ struct vmw_res_func {
        int (*unbind) (struct vmw_resource *res,
                       bool readback,
                       struct ttm_validate_buffer *val_buf);
+       void (*commit_notify)(struct vmw_resource *res,
+                             enum vmw_cmdbuf_res_state state);
 };
 
 int vmw_resource_alloc_id(struct vmw_resource *res);
index 11bc60c2771a517ca4848b55288f27fdbcb313d3..61403ebe3a1ef28fa73250685d4a90b7419bee6d 100644 (file)
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 #include "ttm/ttm_placement.h"
 
 struct vmw_shader {
        struct vmw_resource res;
        SVGA3dShaderType type;
        uint32_t size;
+       uint8_t num_input_sig;
+       uint8_t num_output_sig;
 };
 
 struct vmw_user_shader {
@@ -40,8 +43,18 @@ struct vmw_user_shader {
        struct vmw_shader shader;
 };
 
+struct vmw_dx_shader {
+       struct vmw_resource res;
+       struct vmw_resource *ctx;
+       struct vmw_resource *cotable;
+       u32 id;
+       bool committed;
+       struct list_head cotable_head;
+};
+
 static uint64_t vmw_user_shader_size;
 static uint64_t vmw_shader_size;
+static size_t vmw_shader_dx_size;
 
 static void vmw_user_shader_free(struct vmw_resource *res);
 static struct vmw_resource *
@@ -55,6 +68,18 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
                                 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_shader_destroy(struct vmw_resource *res);
 
+static int vmw_dx_shader_create(struct vmw_resource *res);
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf);
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf);
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+                                       enum vmw_cmdbuf_res_state state);
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
+static uint64_t vmw_user_shader_size;
+
 static const struct vmw_user_resource_conv user_shader_conv = {
        .object_type = VMW_RES_SHADER,
        .base_obj_to_res = vmw_user_shader_base_to_res,
@@ -77,6 +102,24 @@ static const struct vmw_res_func vmw_gb_shader_func = {
        .unbind = vmw_gb_shader_unbind
 };
 
+static const struct vmw_res_func vmw_dx_shader_func = {
+       .res_type = vmw_res_shader,
+       .needs_backup = true,
+       .may_evict = false,
+       .type_name = "dx shaders",
+       .backup_placement = &vmw_mob_placement,
+       .create = vmw_dx_shader_create,
+       /*
+        * The destroy callback is only called with a committed resource on
+        * context destroy, in which case we destroy the cotable anyway,
+        * so there's no need to destroy DX shaders separately.
+        */
+       .destroy = NULL,
+       .bind = vmw_dx_shader_bind,
+       .unbind = vmw_dx_shader_unbind,
+       .commit_notify = vmw_dx_shader_commit_notify,
+};
+
 /**
  * Shader management:
  */
@@ -87,25 +130,42 @@ vmw_res_to_shader(struct vmw_resource *res)
        return container_of(res, struct vmw_shader, res);
 }
 
+/**
+ * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
+ * struct vmw_dx_shader
+ *
+ * @res: Pointer to the struct vmw_resource.
+ */
+static inline struct vmw_dx_shader *
+vmw_res_to_dx_shader(struct vmw_resource *res)
+{
+       return container_of(res, struct vmw_dx_shader, res);
+}
+
 static void vmw_hw_shader_destroy(struct vmw_resource *res)
 {
-       (void) vmw_gb_shader_destroy(res);
+       if (likely(res->func->destroy))
+               (void) res->func->destroy(res);
+       else
+               res->id = -1;
 }
 
+
 static int vmw_gb_shader_init(struct vmw_private *dev_priv,
                              struct vmw_resource *res,
                              uint32_t size,
                              uint64_t offset,
                              SVGA3dShaderType type,
+                             uint8_t num_input_sig,
+                             uint8_t num_output_sig,
                              struct vmw_dma_buffer *byte_code,
                              void (*res_free) (struct vmw_resource *res))
 {
        struct vmw_shader *shader = vmw_res_to_shader(res);
        int ret;
 
-       ret = vmw_resource_init(dev_priv, res, true,
-                               res_free, &vmw_gb_shader_func);
-
+       ret = vmw_resource_init(dev_priv, res, true, res_free,
+                               &vmw_gb_shader_func);
 
        if (unlikely(ret != 0)) {
                if (res_free)
@@ -122,11 +182,17 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
        }
        shader->size = size;
        shader->type = type;
+       shader->num_input_sig = num_input_sig;
+       shader->num_output_sig = num_output_sig;
 
        vmw_resource_activate(res, vmw_hw_shader_destroy);
        return 0;
 }
 
+/*
+ * GB shader code:
+ */
+
 static int vmw_gb_shader_create(struct vmw_resource *res)
 {
        struct vmw_private *dev_priv = res->dev_priv;
@@ -259,7 +325,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
                return 0;
 
        mutex_lock(&dev_priv->binding_mutex);
-       vmw_context_binding_res_list_scrub(&res->binding_head);
+       vmw_binding_res_list_scrub(&res->binding_head);
 
        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
        if (unlikely(cmd == NULL)) {
@@ -280,6 +346,321 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
        return 0;
 }
 
+/*
+ * DX shader code:
+ */
+
+/**
+ * vmw_dx_shader_commit_notify - Notify that a shader operation has been
+ * committed to hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the shader resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+                                       enum vmw_cmdbuf_res_state state)
+{
+       struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       if (state == VMW_CMDBUF_RES_ADD) {
+               mutex_lock(&dev_priv->binding_mutex);
+               vmw_cotable_add_resource(shader->cotable,
+                                        &shader->cotable_head);
+               shader->committed = true;
+               res->id = shader->id;
+               mutex_unlock(&dev_priv->binding_mutex);
+       } else {
+               mutex_lock(&dev_priv->binding_mutex);
+               list_del_init(&shader->cotable_head);
+               shader->committed = false;
+               res->id = -1;
+               mutex_unlock(&dev_priv->binding_mutex);
+       }
+}
+
+/**
+ * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function reverts a scrub operation.
+ */
+static int vmw_dx_shader_unscrub(struct vmw_resource *res)
+{
+       struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXBindShader body;
+       } *cmd;
+
+       if (!list_empty(&shader->cotable_head) || !shader->committed)
+               return 0;
+
+       cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+                                 shader->ctx->id);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for shader "
+                         "scrubbing.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = shader->ctx->id;
+       cmd->body.shid = shader->id;
+       cmd->body.mobid = res->backup->base.mem.start;
+       cmd->body.offsetInBytes = res->backup_offset;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
+
+       return 0;
+}
+
+/**
+ * vmw_dx_shader_create - The DX shader create callback
+ *
+ * @res: The DX shader resource
+ *
+ * The create callback is called as part of resource validation and
+ * makes sure that we unscrub the shader if it's previously been scrubbed.
+ */
+static int vmw_dx_shader_create(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+       int ret = 0;
+
+       WARN_ON_ONCE(!shader->committed);
+
+       if (!list_empty(&res->mob_head)) {
+               mutex_lock(&dev_priv->binding_mutex);
+               ret = vmw_dx_shader_unscrub(res);
+               mutex_unlock(&dev_priv->binding_mutex);
+       }
+
+       res->id = shader->id;
+       return ret;
+}
+
+/**
+ * vmw_dx_shader_bind - The DX shader bind callback
+ *
+ * @res: The DX shader resource
+ * @val_buf: Pointer to the validate buffer.
+ *
+ */
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+                             struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct ttm_buffer_object *bo = val_buf->bo;
+
+       BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+       mutex_lock(&dev_priv->binding_mutex);
+       vmw_dx_shader_unscrub(res);
+       mutex_unlock(&dev_priv->binding_mutex);
+
+       return 0;
+}
+
+/**
+ * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function unbinds a MOB from the DX shader without requiring the
+ * MOB dma_buffer to be reserved. The driver still considers the MOB bound.
+ * However, once the driver eventually decides to unbind the MOB, it doesn't
+ * need to access the context.
+ */
+static int vmw_dx_shader_scrub(struct vmw_resource *res)
+{
+       struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXBindShader body;
+       } *cmd;
+
+       if (list_empty(&shader->cotable_head))
+               return 0;
+
+       WARN_ON_ONCE(!shader->committed);
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for shader "
+                         "scrubbing.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = shader->ctx->id;
+       cmd->body.shid = res->id;
+       cmd->body.mobid = SVGA3D_INVALID_ID;
+       cmd->body.offsetInBytes = 0;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       res->id = -1;
+       list_del_init(&shader->cotable_head);
+
+       return 0;
+}
+
+/**
+ * vmw_dx_shader_unbind - The dx shader unbind callback.
+ *
+ * @res: The shader resource
+ * @readback: Whether this is a readback unbind. Currently unused.
+ * @val_buf: MOB buffer information.
+ */
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+                               bool readback,
+                               struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_fence_obj *fence;
+       int ret;
+
+       BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+       mutex_lock(&dev_priv->binding_mutex);
+       ret = vmw_dx_shader_scrub(res);
+       mutex_unlock(&dev_priv->binding_mutex);
+
+       if (ret)
+               return ret;
+
+       (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+                                         &fence, NULL);
+       vmw_fence_single_bo(val_buf->bo, fence);
+
+       if (likely(fence != NULL))
+               vmw_fence_obj_unreference(&fence);
+
+       return 0;
+}
+
+/**
+ * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
+ * DX shaders.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: The list of cotable resources.
+ * @readback: Whether the call was part of a readback unbind.
+ *
+ * Scrubs all shader MOBs so that any subsequent shader unbind or shader
+ * destroy operation won't need to swap in the context.
+ */
+void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+                                     struct list_head *list,
+                                     bool readback)
+{
+       struct vmw_dx_shader *entry, *next;
+
+       WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+       list_for_each_entry_safe(entry, next, list, cotable_head) {
+               WARN_ON(vmw_dx_shader_scrub(&entry->res));
+               if (!readback)
+                       entry->committed = false;
+       }
+}
+
+/**
+ * vmw_dx_shader_res_free - The DX shader free callback
+ *
+ * @res: The shader resource
+ *
+ * Frees the DX shader resource and updates memory accounting.
+ */
+static void vmw_dx_shader_res_free(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+
+       vmw_resource_unreference(&shader->cotable);
+       kfree(shader);
+       ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+}
+
+/**
+ * vmw_dx_shader_add - Add a shader resource as a command buffer managed
+ * resource.
+ *
+ * @man: The command buffer resource manager.
+ * @ctx: Pointer to the context resource.
+ * @user_key: The id used for this shader.
+ * @shader_type: The shader type.
+ * @list: The list of staged command buffer managed resources.
+ */
+int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+                     struct vmw_resource *ctx,
+                     u32 user_key,
+                     SVGA3dShaderType shader_type,
+                     struct list_head *list)
+{
+       struct vmw_dx_shader *shader;
+       struct vmw_resource *res;
+       struct vmw_private *dev_priv = ctx->dev_priv;
+       int ret;
+
+       if (!vmw_shader_dx_size)
+               vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
+
+       if (!vmw_shader_id_ok(user_key, shader_type))
+               return -EINVAL;
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
+                                  false, true);
+       if (ret) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("Out of graphics memory for shader "
+                                 "creation.\n");
+               return ret;
+       }
+
+       shader = kmalloc(sizeof(*shader), GFP_KERNEL);
+       if (!shader) {
+               ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+               return -ENOMEM;
+       }
+
+       res = &shader->res;
+       shader->ctx = ctx;
+       shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
+       shader->id = user_key;
+       shader->committed = false;
+       INIT_LIST_HEAD(&shader->cotable_head);
+       ret = vmw_resource_init(dev_priv, res, true,
+                               vmw_dx_shader_res_free, &vmw_dx_shader_func);
+       if (ret)
+               goto out_resource_init;
+
+       /*
+        * The user_key name-space is not per shader type for DX shaders,
+        * so when hashing, use a single zero shader type.
+        */
+       ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+                                vmw_shader_key(user_key, 0),
+                                res, list);
+       if (ret)
+               goto out_resource_init;
+
+       res->id = shader->id;
+       vmw_resource_activate(res, vmw_hw_shader_destroy);
+
+out_resource_init:
+       vmw_resource_unreference(&res);
+
+       return ret;
+}
+
+
+
 /**
  * User-space shader management:
  */
@@ -341,6 +722,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
                                 size_t shader_size,
                                 size_t offset,
                                 SVGA3dShaderType shader_type,
+                                uint8_t num_input_sig,
+                                uint8_t num_output_sig,
                                 struct ttm_object_file *tfile,
                                 u32 *handle)
 {
@@ -383,7 +766,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
         */
 
        ret = vmw_gb_shader_init(dev_priv, res, shader_size,
-                                offset, shader_type, buffer,
+                                offset, shader_type, num_input_sig,
+                                num_output_sig, buffer,
                                 vmw_user_shader_free);
        if (unlikely(ret != 0))
                goto out;
@@ -449,7 +833,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
         * From here on, the destructor takes over resource freeing.
         */
        ret = vmw_gb_shader_init(dev_priv, res, shader_size,
-                                offset, shader_type, buffer,
+                                offset, shader_type, 0, 0, buffer,
                                 vmw_shader_free);
 
 out_err:
@@ -457,19 +841,20 @@ out_err:
 }
 
 
-int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
-                            struct drm_file *file_priv)
+static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
+                            enum drm_vmw_shader_type shader_type_drm,
+                            u32 buffer_handle, size_t size, size_t offset,
+                            uint8_t num_input_sig, uint8_t num_output_sig,
+                            uint32_t *shader_handle)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
-       struct drm_vmw_shader_create_arg *arg =
-               (struct drm_vmw_shader_create_arg *)data;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        struct vmw_dma_buffer *buffer = NULL;
        SVGA3dShaderType shader_type;
        int ret;
 
-       if (arg->buffer_handle != SVGA3D_INVALID_ID) {
-               ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+       if (buffer_handle != SVGA3D_INVALID_ID) {
+               ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
                                             &buffer);
                if (unlikely(ret != 0)) {
                        DRM_ERROR("Could not find buffer for shader "
@@ -478,23 +863,20 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
                }
 
                if ((u64)buffer->base.num_pages * PAGE_SIZE <
-                   (u64)arg->size + (u64)arg->offset) {
+                   (u64)size + (u64)offset) {
                        DRM_ERROR("Illegal buffer- or shader size.\n");
                        ret = -EINVAL;
                        goto out_bad_arg;
                }
        }
 
-       switch (arg->shader_type) {
+       switch (shader_type_drm) {
        case drm_vmw_shader_type_vs:
                shader_type = SVGA3D_SHADERTYPE_VS;
                break;
        case drm_vmw_shader_type_ps:
                shader_type = SVGA3D_SHADERTYPE_PS;
                break;
-       case drm_vmw_shader_type_gs:
-               shader_type = SVGA3D_SHADERTYPE_GS;
-               break;
        default:
                DRM_ERROR("Illegal shader type.\n");
                ret = -EINVAL;
@@ -505,8 +887,9 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
        if (unlikely(ret != 0))
                goto out_bad_arg;
 
-       ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
-                                   shader_type, tfile, &arg->shader_handle);
+       ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
+                                   shader_type, num_input_sig,
+                                   num_output_sig, tfile, shader_handle);
 
        ttm_read_unlock(&dev_priv->reservation_sem);
 out_bad_arg:
@@ -515,7 +898,7 @@ out_bad_arg:
 }
 
 /**
- * vmw_compat_shader_id_ok - Check whether a compat shader user key and
+ * vmw_shader_id_ok - Check whether a compat shader user key and
  * shader type are within valid bounds.
  *
  * @user_key: User space id of the shader.
@@ -523,13 +906,13 @@ out_bad_arg:
  *
  * Returns true if valid false if not.
  */
-static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
 {
        return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
 }
 
 /**
- * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
+ * vmw_shader_key - Compute a hash key suitable for a compat shader.
  *
  * @user_key: User space id of the shader.
  * @shader_type: Shader type.
@@ -537,13 +920,13 @@ static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
  * Returns a hash key suitable for a command buffer managed resource
  * manager hash table.
  */
-static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
 {
        return user_key | (shader_type << 20);
 }
 
 /**
- * vmw_compat_shader_remove - Stage a compat shader for removal.
+ * vmw_shader_remove - Stage a compat shader for removal.
  *
  * @man: Pointer to the compat shader manager identifying the shader namespace.
  * @user_key: The key that is used to identify the shader. The key is
@@ -551,17 +934,18 @@ static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
  * @shader_type: Shader type.
  * @list: Caller's list of staged command buffer resource actions.
  */
-int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
-                            u32 user_key, SVGA3dShaderType shader_type,
-                            struct list_head *list)
+int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+                     u32 user_key, SVGA3dShaderType shader_type,
+                     struct list_head *list)
 {
-       if (!vmw_compat_shader_id_ok(user_key, shader_type))
+       struct vmw_resource *dummy;
+
+       if (!vmw_shader_id_ok(user_key, shader_type))
                return -EINVAL;
 
-       return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
-                                    vmw_compat_shader_key(user_key,
-                                                          shader_type),
-                                    list);
+       return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
+                                    vmw_shader_key(user_key, shader_type),
+                                    list, &dummy);
 }
 
 /**
@@ -591,7 +975,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
        int ret;
        struct vmw_resource *res;
 
-       if (!vmw_compat_shader_id_ok(user_key, shader_type))
+       if (!vmw_shader_id_ok(user_key, shader_type))
                return -EINVAL;
 
        /* Allocate and pin a DMA buffer */
@@ -628,8 +1012,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                goto no_reserve;
 
-       ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
-                                vmw_compat_shader_key(user_key, shader_type),
+       ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+                                vmw_shader_key(user_key, shader_type),
                                 res, list);
        vmw_resource_unreference(&res);
 no_reserve:
@@ -639,7 +1023,7 @@ out:
 }
 
 /**
- * vmw_compat_shader_lookup - Look up a compat shader
+ * vmw_shader_lookup - Look up a compat shader
  *
  * @man: Pointer to the command buffer managed resource manager identifying
  * the shader namespace.
@@ -650,14 +1034,26 @@ out:
  * found. An error pointer otherwise.
  */
 struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
-                        u32 user_key,
-                        SVGA3dShaderType shader_type)
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+                 u32 user_key,
+                 SVGA3dShaderType shader_type)
 {
-       if (!vmw_compat_shader_id_ok(user_key, shader_type))
+       if (!vmw_shader_id_ok(user_key, shader_type))
                return ERR_PTR(-EINVAL);
 
-       return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
-                                    vmw_compat_shader_key(user_key,
-                                                          shader_type));
+       return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
+                                    vmw_shader_key(user_key, shader_type));
+}
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
+{
+       struct drm_vmw_shader_create_arg *arg =
+               (struct drm_vmw_shader_create_arg *)data;
+
+       return vmw_shader_define(dev, file_priv, arg->shader_type,
+                                arg->buffer_handle,
+                                arg->size, arg->offset,
+                                0, 0,
+                                &arg->shader_handle);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
new file mode 100644 (file)
index 0000000..4dfdc95
--- /dev/null
@@ -0,0 +1,555 @@
+/**************************************************************************
+ * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
+
+/*
+ * The currently only reason we need to keep track of views is that if we
+ * destroy a hardware surface, all views pointing to it must also be destroyed,
+ * otherwise the device will error.
+ * So in particuar if a surface is evicted, we must destroy all views pointing
+ * to it, and all context bindings of that view. Similarly we must restore
+ * the view bindings, views and surfaces pointed to by the views when a
+ * context is referenced in the command stream.
+ */
+
+/**
+ * struct vmw_view - view metadata
+ *
+ * @res: The struct vmw_resource we derive from
+ * @ctx: Non-refcounted pointer to the context this view belongs to.
+ * @srf: Refcounted pointer to the surface pointed to by this view.
+ * @cotable: Refcounted pointer to the cotable holding this view.
+ * @srf_head: List head for the surface-to-view list.
+ * @cotable_head: List head for the cotable-to_view list.
+ * @view_type: View type.
+ * @view_id: User-space per context view id. Currently used also as per
+ * context device view id.
+ * @cmd_size: Size of the SVGA3D define view command that we've copied from the
+ * command stream.
+ * @committed: Whether the view is actually created or pending creation at the
+ * device level.
+ * @cmd: The SVGA3D define view command copied from the command stream.
+ */
+struct vmw_view {
+       struct rcu_head rcu;
+       struct vmw_resource res;
+       struct vmw_resource *ctx;      /* Immutable */
+       struct vmw_resource *srf;      /* Immutable */
+       struct vmw_resource *cotable;  /* Immutable */
+       struct list_head srf_head;     /* Protected by binding_mutex */
+       struct list_head cotable_head; /* Protected by binding_mutex */
+       unsigned view_type;            /* Immutable */
+       unsigned view_id;              /* Immutable */
+       u32 cmd_size;                  /* Immutable */
+       bool committed;                /* Protected by binding_mutex */
+       u32 cmd[1];                    /* Immutable */
+};
+
+static int vmw_view_create(struct vmw_resource *res);
+static int vmw_view_destroy(struct vmw_resource *res);
+static void vmw_hw_view_destroy(struct vmw_resource *res);
+static void vmw_view_commit_notify(struct vmw_resource *res,
+                                  enum vmw_cmdbuf_res_state state);
+
+static const struct vmw_res_func vmw_view_func = {
+       .res_type = vmw_res_view,
+       .needs_backup = false,
+       .may_evict = false,
+       .type_name = "DX view",
+       .backup_placement = NULL,
+       .create = vmw_view_create,
+       .commit_notify = vmw_view_commit_notify,
+};
+
+/**
+ * struct vmw_view - view define command body stub
+ *
+ * @view_id: The device id of the view being defined
+ * @sid: The surface id of the view being defined
+ *
+ * This generic struct is used by the code to change @view_id and @sid of a
+ * saved view define command.
+ */
+struct vmw_view_define {
+       uint32 view_id;
+       uint32 sid;
+};
+
+/**
+ * vmw_view - Convert a struct vmw_resource to a struct vmw_view
+ *
+ * @res: Pointer to the resource to convert.
+ *
+ * Returns a pointer to a struct vmw_view.
+ */
+static struct vmw_view *vmw_view(struct vmw_resource *res)
+{
+       return container_of(res, struct vmw_view, res);
+}
+
+/**
+ * vmw_view_commit_notify - Notify that a view operation has been committed to
+ * hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the view resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_view_commit_notify(struct vmw_resource *res,
+                                  enum vmw_cmdbuf_res_state state)
+{
+       struct vmw_view *view = vmw_view(res);
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       mutex_lock(&dev_priv->binding_mutex);
+       if (state == VMW_CMDBUF_RES_ADD) {
+               struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+
+               list_add_tail(&view->srf_head, &srf->view_list);
+               vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+               view->committed = true;
+               res->id = view->view_id;
+
+       } else {
+               list_del_init(&view->cotable_head);
+               list_del_init(&view->srf_head);
+               view->committed = false;
+               res->id = -1;
+       }
+       mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_create - Create a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Create a hardware view. Typically used if that view has previously been
+ * destroyed by an eviction operation.
+ */
+static int vmw_view_create(struct vmw_resource *res)
+{
+       struct vmw_view *view = vmw_view(res);
+       struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               struct vmw_view_define body;
+       } *cmd;
+
+       mutex_lock(&dev_priv->binding_mutex);
+       if (!view->committed) {
+               mutex_unlock(&dev_priv->binding_mutex);
+               return 0;
+       }
+
+       cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
+                                 view->ctx->id);
+       if (!cmd) {
+               DRM_ERROR("Failed reserving FIFO space for view creation.\n");
+               mutex_unlock(&dev_priv->binding_mutex);
+               return -ENOMEM;
+       }
+       memcpy(cmd, &view->cmd, view->cmd_size);
+       WARN_ON(cmd->body.view_id != view->view_id);
+       /* Sid may have changed due to surface eviction. */
+       WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
+       cmd->body.sid = view->srf->id;
+       vmw_fifo_commit(res->dev_priv, view->cmd_size);
+       res->id = view->view_id;
+       list_add_tail(&view->srf_head, &srf->view_list);
+       vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+       mutex_unlock(&dev_priv->binding_mutex);
+
+       return 0;
+}
+
+/**
+ * vmw_view_destroy - Destroy a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view. Typically used on unexpected termination of the
+ * owning process or if the surface the view is pointing to is destroyed.
+ */
+static int vmw_view_destroy(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_view *view = vmw_view(res);
+       struct {
+               SVGA3dCmdHeader header;
+               union vmw_view_destroy body;
+       } *cmd;
+
+       WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+       vmw_binding_res_list_scrub(&res->binding_head);
+
+       if (!view->committed || res->id == -1)
+               return 0;
+
+       cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
+       if (!cmd) {
+               DRM_ERROR("Failed reserving FIFO space for view "
+                         "destruction.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = vmw_view_destroy_cmds[view->view_type];
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.view_id = view->view_id;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       res->id = -1;
+       list_del_init(&view->cotable_head);
+       list_del_init(&view->srf_head);
+
+       return 0;
+}
+
+/**
+ * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static void vmw_hw_view_destroy(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       mutex_lock(&dev_priv->binding_mutex);
+       WARN_ON(vmw_view_destroy(res));
+       res->id = -1;
+       mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
+{
+       return user_key | (view_type << 20);
+}
+
+/**
+ * vmw_view_id_ok - Basic view id and type range checks.
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Checks that the view id and type (typically provided by user-space) is
+ * valid.
+ */
+static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
+{
+       return (user_key < SVGA_COTABLE_MAX_IDS &&
+               view_type < vmw_view_max);
+}
+
+/**
+ * vmw_view_res_free - resource res_free callback for view resources
+ *
+ * @res: Pointer to a struct vmw_resource
+ *
+ * Frees memory and memory accounting held by a struct vmw_view.
+ */
+static void vmw_view_res_free(struct vmw_resource *res)
+{
+       struct vmw_view *view = vmw_view(res);
+       size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       vmw_resource_unreference(&view->cotable);
+       vmw_resource_unreference(&view->srf);
+       kfree_rcu(view, rcu);
+       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_view_add - Create a view resource and stage it for addition
+ * as a command buffer managed resource.
+ *
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
+ * @ctx: Pointer to a struct vmw_resource identifying the active context.
+ * @srf: Pointer to a struct vmw_resource identifying the surface the view
+ * points to.
+ * @view_type: The view type deduced from the view create command.
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the view type and to the context.
+ * @cmd: Pointer to the view create command in the command stream.
+ * @cmd_size: Size of the view create command in the command stream.
+ * @list: Caller's list of staged command buffer resource actions.
+ */
+int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+                struct vmw_resource *ctx,
+                struct vmw_resource *srf,
+                enum vmw_view_type view_type,
+                u32 user_key,
+                const void *cmd,
+                size_t cmd_size,
+                struct list_head *list)
+{
+       static const size_t vmw_view_define_sizes[] = {
+               [vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
+               [vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
+               [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
+       };
+
+       struct vmw_private *dev_priv = ctx->dev_priv;
+       struct vmw_resource *res;
+       struct vmw_view *view;
+       size_t size;
+       int ret;
+
+       if (cmd_size != vmw_view_define_sizes[view_type] +
+           sizeof(SVGA3dCmdHeader)) {
+               DRM_ERROR("Illegal view create command size.\n");
+               return -EINVAL;
+       }
+
+       if (!vmw_view_id_ok(user_key, view_type)) {
+               DRM_ERROR("Illegal view add view id.\n");
+               return -EINVAL;
+       }
+
+       size = offsetof(struct vmw_view, cmd) + cmd_size;
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
+       if (ret) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("Out of graphics memory for view"
+                                 " creation.\n");
+               return ret;
+       }
+
+       view = kmalloc(size, GFP_KERNEL);
+       if (!view) {
+               ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+               return -ENOMEM;
+       }
+
+       res = &view->res;
+       view->ctx = ctx;
+       view->srf = vmw_resource_reference(srf);
+       view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
+       view->view_type = view_type;
+       view->view_id = user_key;
+       view->cmd_size = cmd_size;
+       view->committed = false;
+       INIT_LIST_HEAD(&view->srf_head);
+       INIT_LIST_HEAD(&view->cotable_head);
+       memcpy(&view->cmd, cmd, cmd_size);
+       ret = vmw_resource_init(dev_priv, res, true,
+                               vmw_view_res_free, &vmw_view_func);
+       if (ret)
+               goto out_resource_init;
+
+       ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
+                                vmw_view_key(user_key, view_type),
+                                res, list);
+       if (ret)
+               goto out_resource_init;
+
+       res->id = view->view_id;
+       vmw_resource_activate(res, vmw_hw_view_destroy);
+
+out_resource_init:
+       vmw_resource_unreference(&res);
+
+       return ret;
+}
+
+/**
+ * vmw_view_remove - Stage a view for removal.
+ *
+ * @man: Pointer to the view manager identifying the shader namespace.
+ * @user_key: The key that is used to identify the view. The key is
+ * unique to the view type.
+ * @view_type: View type
+ * @list: Caller's list of staged command buffer resource actions.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
+ */
+int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+                   u32 user_key, enum vmw_view_type view_type,
+                   struct list_head *list,
+                   struct vmw_resource **res_p)
+{
+       if (!vmw_view_id_ok(user_key, view_type)) {
+               DRM_ERROR("Illegal view remove view id.\n");
+               return -EINVAL;
+       }
+
+       return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
+                                    vmw_view_key(user_key, view_type),
+                                    list, res_p);
+}
+
+/**
+ * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views belonging to a cotable.
+ * @readback: Unused. Needed for function interface only.
+ *
+ * This function evicts all views belonging to a cotable.
+ * It must be called with the binding_mutex held, and the caller must hold
+ * a reference to the view resource. This is typically called before the
+ * cotable is paged out.
+ */
+void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+                                  struct list_head *list,
+                                  bool readback)
+{
+       struct vmw_view *entry, *next;
+
+       WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+       list_for_each_entry_safe(entry, next, list, cotable_head)
+               WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_surface_list_destroy - Evict all views pointing to a surface
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views pointing to a surface.
+ *
+ * This function evicts all views pointing to a surface. This is typically
+ * called before the surface is evicted.
+ */
+void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+                                  struct list_head *list)
+{
+       struct vmw_view *entry, *next;
+
+       WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+       list_for_each_entry_safe(entry, next, list, srf_head)
+               WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_srf - Return a non-refcounted pointer to the surface a view is
+ * pointing to.
+ *
+ * @res: pointer to a view resource.
+ *
+ * Note that the view itself is holding a reference, so as long
+ * the view resource is alive, the surface resource will be.
+ */
+struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
+{
+       return vmw_view(res)->srf;
+}
+
+/**
+ * vmw_view_lookup - Look up a view.
+ *
+ * @man: The context's cmdbuf ref manager.
+ * @view_type: The view type.
+ * @user_key: The view user id.
+ *
+ * returns a refcounted pointer to a view or an error pointer if not found.
+ */
+struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+                                    enum vmw_view_type view_type,
+                                    u32 user_key)
+{
+       return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
+                                    vmw_view_key(user_key, view_type));
+}
+
+const u32 vmw_view_destroy_cmds[] = {
+       [vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+       [vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+       [vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+};
+
+const SVGACOTableType vmw_view_cotables[] = {
+       [vmw_view_sr] = SVGA_COTABLE_SRVIEW,
+       [vmw_view_rt] = SVGA_COTABLE_RTVIEW,
+       [vmw_view_ds] = SVGA_COTABLE_DSVIEW,
+};
+
+const SVGACOTableType vmw_so_cotables[] = {
+       [vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
+       [vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
+       [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
+       [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
+       [vmw_so_ss] = SVGA_COTABLE_SAMPLER,
+       [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
+};
+
+
+/* To remove unused function warning */
+static void vmw_so_build_asserts(void) __attribute__((used));
+
+
+/*
+ * This function is unused at run-time, and only used to dump various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_so_build_asserts(void)
+{
+       /* Assert that our vmw_view_cmd_to_type() function is correct. */
+       BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
+                    SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
+       BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
+                    SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
+       BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
+                    SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
+       BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
+                    SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
+       BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
+                    SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
+
+       /* Assert that our "one body fits all" assumption is valid */
+       BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
+
+       /* Assert that the view key space can hold all view ids. */
+       BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
+
+       /*
+        * Assert that the offset of sid in all view define commands
+        * is what we assume it to be.
+        */
+       BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+                    offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
+       BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+                    offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
+       BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+                    offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
new file mode 100644 (file)
index 0000000..5ef867a
--- /dev/null
@@ -0,0 +1,160 @@
+/**************************************************************************
+ * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef VMW_SO_H
+#define VMW_SO_H
+
+enum vmw_view_type {
+       vmw_view_sr,
+       vmw_view_rt,
+       vmw_view_ds,
+       vmw_view_max,
+};
+
+enum vmw_so_type {
+       vmw_so_el,
+       vmw_so_bs,
+       vmw_so_ds,
+       vmw_so_rs,
+       vmw_so_ss,
+       vmw_so_so,
+       vmw_so_max,
+};
+
+/**
+ * union vmw_view_destroy - view destruction command body
+ *
+ * @rtv: RenderTarget view destruction command body
+ * @srv: ShaderResource view destruction command body
+ * @dsv: DepthStencil view destruction command body
+ * @view_id: A single u32 view id.
+ *
+ * The assumption here is that all union members are really represented by a
+ * single u32 in the command stream. If that's not the case,
+ * the size of this union will not equal the size of an u32, and the
+ * assumption is invalid, and we detect that at compile time in the
+ * vmw_so_build_asserts() function.
+ */
+union vmw_view_destroy {
+       struct SVGA3dCmdDXDestroyRenderTargetView rtv;
+       struct SVGA3dCmdDXDestroyShaderResourceView srv;
+       struct SVGA3dCmdDXDestroyDepthStencilView dsv;
+       u32 view_id;
+};
+
+/* Map enum vmw_view_type to view destroy command ids*/
+extern const u32 vmw_view_destroy_cmds[];
+
+/* Map enum vmw_view_type to SVGACOTableType */
+extern const SVGACOTableType vmw_view_cotables[];
+
+/* Map enum vmw_so_type to SVGACOTableType */
+extern const SVGACOTableType vmw_so_cotables[];
+
+/*
+ * vmw_view_cmd_to_type - Return the view type for a create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given view create or destroy command id, return the corresponding
+ * enum vmw_view_type. If the command is unknown, return vmw_view_max.
+ * The validity of the simplified calculation is verified in the
+ * vmw_so_build_asserts() function.
+ */
+static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
+{
+       u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
+
+       if (tmp > (u32)vmw_view_max)
+               return vmw_view_max;
+
+       return (enum vmw_view_type) tmp;
+}
+
+/*
+ * vmw_so_cmd_to_type - Return the state object type for a
+ * create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given state object create or destroy command id,
+ * return the corresponding enum vmw_so_type. If the command is uknown,
+ * return vmw_so_max. We should perhaps optimize this function using
+ * a similar strategy as vmw_view_cmd_to_type().
+ */
+static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
+{
+       switch (id) {
+       case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
+       case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
+               return vmw_so_el;
+       case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
+       case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
+               return vmw_so_bs;
+       case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
+       case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
+               return vmw_so_ds;
+       case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
+       case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
+               return vmw_so_rs;
+       case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
+       case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
+               return vmw_so_ss;
+       case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
+       case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
+               return vmw_so_so;
+       default:
+               break;
+       }
+       return vmw_so_max;
+}
+
+/*
+ * View management - vmwgfx_so.c
+ */
+extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+                       struct vmw_resource *ctx,
+                       struct vmw_resource *srf,
+                       enum vmw_view_type view_type,
+                       u32 user_key,
+                       const void *cmd,
+                       size_t cmd_size,
+                       struct list_head *list);
+
+extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+                          u32 user_key, enum vmw_view_type view_type,
+                          struct list_head *list,
+                          struct vmw_resource **res_p);
+
+extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+                                         struct list_head *view_list);
+extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+                                         struct list_head *list,
+                                         bool readback);
+extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
+extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+                                           enum vmw_view_type view_type,
+                                           u32 user_key);
+#endif
index d4a453703eed36a18ef88e846e68d96b9e16c3b5..ae6773e171b00cecf7753f6b2623c6868185a164 100644 (file)
@@ -561,6 +561,7 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
                                true, /* a scanout buffer */
                                content_srf.mip_levels[0],
                                content_srf.multisample_count,
+                               0,
                                display_base_size,
                                &display_srf);
                if (unlikely(ret != 0)) {
index eea1790eed6a037a880f5456545d23b9d258a10d..12ade0cf98d0d5ed55d1f972c59fa7ed59aa4d80 100644 (file)
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
 #include <ttm/ttm_placement.h>
 #include "device_include/svga3d_surfacedefs.h"
 
+
 /**
  * struct vmw_user_surface - User-space visible surface resource
  *
@@ -593,6 +596,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
         * surface validate.
         */
 
+       INIT_LIST_HEAD(&srf->view_list);
        vmw_resource_activate(res, vmw_hw_surface_destroy);
        return ret;
 }
@@ -723,6 +727,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
        desc = svga3dsurface_get_desc(req->format);
        if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
                DRM_ERROR("Invalid surface format for surface creation.\n");
+               DRM_ERROR("Format requested is: %d\n", req->format);
                return -EINVAL;
        }
 
@@ -1018,12 +1023,16 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 {
        struct vmw_private *dev_priv = res->dev_priv;
        struct vmw_surface *srf = vmw_res_to_srf(res);
-       uint32_t cmd_len, submit_len;
+       uint32_t cmd_len, cmd_id, submit_len;
        int ret;
        struct {
                SVGA3dCmdHeader header;
                SVGA3dCmdDefineGBSurface body;
        } *cmd;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDefineGBSurface_v2 body;
+       } *cmd2;
 
        if (likely(res->id != -1))
                return 0;
@@ -1040,9 +1049,19 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
                goto out_no_fifo;
        }
 
-       cmd_len = sizeof(cmd->body);
-       submit_len = sizeof(*cmd);
+       if (srf->array_size > 0) {
+               /* has_dx checked on creation time. */
+               cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
+               cmd_len = sizeof(cmd2->body);
+               submit_len = sizeof(*cmd2);
+       } else {
+               cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+               cmd_len = sizeof(cmd->body);
+               submit_len = sizeof(*cmd);
+       }
+
        cmd = vmw_fifo_reserve(dev_priv, submit_len);
+       cmd2 = (typeof(cmd2))cmd;
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Failed reserving FIFO space for surface "
                          "creation.\n");
@@ -1050,17 +1069,33 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
                goto out_no_fifo;
        }
 
-       cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
-       cmd->header.size = cmd_len;
-       cmd->body.sid = srf->res.id;
-       cmd->body.surfaceFlags = srf->flags;
-       cmd->body.format = srf->format;
-       cmd->body.numMipLevels = srf->mip_levels[0];
-       cmd->body.multisampleCount = srf->multisample_count;
-       cmd->body.autogenFilter = srf->autogen_filter;
-       cmd->body.size.width = srf->base_size.width;
-       cmd->body.size.height = srf->base_size.height;
-       cmd->body.size.depth = srf->base_size.depth;
+       if (srf->array_size > 0) {
+               cmd2->header.id = cmd_id;
+               cmd2->header.size = cmd_len;
+               cmd2->body.sid = srf->res.id;
+               cmd2->body.surfaceFlags = srf->flags;
+               cmd2->body.format = cpu_to_le32(srf->format);
+               cmd2->body.numMipLevels = srf->mip_levels[0];
+               cmd2->body.multisampleCount = srf->multisample_count;
+               cmd2->body.autogenFilter = srf->autogen_filter;
+               cmd2->body.size.width = srf->base_size.width;
+               cmd2->body.size.height = srf->base_size.height;
+               cmd2->body.size.depth = srf->base_size.depth;
+               cmd2->body.arraySize = srf->array_size;
+       } else {
+               cmd->header.id = cmd_id;
+               cmd->header.size = cmd_len;
+               cmd->body.sid = srf->res.id;
+               cmd->body.surfaceFlags = srf->flags;
+               cmd->body.format = cpu_to_le32(srf->format);
+               cmd->body.numMipLevels = srf->mip_levels[0];
+               cmd->body.multisampleCount = srf->multisample_count;
+               cmd->body.autogenFilter = srf->autogen_filter;
+               cmd->body.size.width = srf->base_size.width;
+               cmd->body.size.height = srf->base_size.height;
+               cmd->body.size.depth = srf->base_size.depth;
+       }
+
        vmw_fifo_commit(dev_priv, submit_len);
 
        return 0;
@@ -1188,6 +1223,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
 static int vmw_gb_surface_destroy(struct vmw_resource *res)
 {
        struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_surface *srf = vmw_res_to_srf(res);
        struct {
                SVGA3dCmdHeader header;
                SVGA3dCmdDestroyGBSurface body;
@@ -1197,7 +1233,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
                return 0;
 
        mutex_lock(&dev_priv->binding_mutex);
-       vmw_context_binding_res_list_scrub(&res->binding_head);
+       vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
+       vmw_binding_res_list_scrub(&res->binding_head);
 
        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
        if (unlikely(cmd == NULL)) {
@@ -1259,6 +1296,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
                        req->drm_surface_flags & drm_vmw_surface_flag_scanout,
                        req->mip_levels,
                        req->multisample_count,
+                       req->array_size,
                        req->base_size,
                        &srf);
        if (unlikely(ret != 0))
@@ -1275,10 +1313,17 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
        res = &user_srf->srf.res;
 
 
-       if (req->buffer_handle != SVGA3D_INVALID_ID)
+       if (req->buffer_handle != SVGA3D_INVALID_ID) {
                ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
                                             &res->backup);
-       else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
+               if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
+                   res->backup_size) {
+                       DRM_ERROR("Surface backup buffer is too small.\n");
+                       vmw_dmabuf_unreference(&res->backup);
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+       } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
                ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
                                            res->backup_size,
                                            req->drm_surface_flags &
@@ -1378,6 +1423,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
        rep->creq.drm_surface_flags = 0;
        rep->creq.multisample_count = srf->multisample_count;
        rep->creq.autogen_filter = srf->autogen_filter;
+       rep->creq.array_size = srf->array_size;
        rep->creq.buffer_handle = backup_handle;
        rep->creq.base_size = srf->base_size;
        rep->crep.handle = user_srf->prime.base.hash.key;
@@ -1404,6 +1450,7 @@ out_bad_resource:
  * @for_scanout: true if inteded to be used for scanout buffer
  * @num_mip_levels:  number of MIP levels
  * @multisample_count:
+ * @array_size: Surface array size.
  * @size: width, heigh, depth of the surface requested
  * @user_srf_out: allocated user_srf.  Set to NULL on failure.
  *
@@ -1419,6 +1466,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
                               bool for_scanout,
                               uint32_t num_mip_levels,
                               uint32_t multisample_count,
+                              uint32_t array_size,
                               struct drm_vmw_size size,
                               struct vmw_surface **srf_out)
 {
@@ -1426,7 +1474,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
        struct vmw_user_surface *user_srf;
        struct vmw_surface *srf;
        int ret;
-
+       u32 num_layers;
 
        *srf_out = NULL;
 
@@ -1445,6 +1493,12 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
                }
        }
 
+       /* array_size must be null for non-GL3 host. */
+       if (array_size > 0 && !dev_priv->has_dx) {
+               DRM_ERROR("Tried to create DX surface on non-DX host.\n");
+               return -EINVAL;
+       }
+
        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
        if (unlikely(ret != 0))
                return ret;
@@ -1481,10 +1535,21 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
        srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
        srf->multisample_count = multisample_count;
 
-       srf->res.backup_size   = svga3dsurface_get_serialized_size(srf->format,
-                                       srf->base_size,
-                                       srf->mip_levels[0],
-                                       srf->flags & SVGA3D_SURFACE_CUBEMAP);
+       if (array_size)
+               num_layers = array_size;
+       else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
+               num_layers = SVGA3D_MAX_SURFACE_FACES;
+       else
+               num_layers = 1;
+
+       srf->res.backup_size   =
+               svga3dsurface_get_serialized_size(srf->format,
+                                                 srf->base_size,
+                                                 srf->mip_levels[0],
+                                                 num_layers);
+
+       if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
+               srf->res.backup_size += sizeof(SVGA3dDXSOState);
 
        if (dev_priv->active_display_unit == vmw_du_screen_target &&
            for_scanout)
index c8a86318017446918d54380900b28c9d60df8eb0..c5bcddd9f58cf4ebec1c0adf5c3e87b7e07f2f9f 100644 (file)
@@ -64,6 +64,7 @@
 #define DRM_VMW_GB_SURFACE_CREATE    23
 #define DRM_VMW_GB_SURFACE_REF       24
 #define DRM_VMW_SYNCCPU              25
+#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
 
 /*************************************************************************/
 /**
@@ -89,6 +90,7 @@
 #define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
 #define DRM_VMW_PARAM_MAX_MOB_SIZE     10
 #define DRM_VMW_PARAM_SCREEN_TARGET    11
+#define DRM_VMW_PARAM_DX               12
 
 /**
  * enum drm_vmw_handle_type - handle type for ref ioctls
@@ -297,7 +299,7 @@ union drm_vmw_surface_reference_arg {
  * Argument to the DRM_VMW_EXECBUF Ioctl.
  */
 
-#define DRM_VMW_EXECBUF_VERSION 1
+#define DRM_VMW_EXECBUF_VERSION 2
 
 struct drm_vmw_execbuf_arg {
        uint64_t commands;
@@ -306,6 +308,8 @@ struct drm_vmw_execbuf_arg {
        uint64_t fence_rep;
        uint32_t version;
        uint32_t flags;
+       uint32_t context_handle;
+       uint32_t pad64;
 };
 
 /**
@@ -826,7 +830,6 @@ struct drm_vmw_update_layout_arg {
 enum drm_vmw_shader_type {
        drm_vmw_shader_type_vs = 0,
        drm_vmw_shader_type_ps,
-       drm_vmw_shader_type_gs
 };
 
 
@@ -908,6 +911,8 @@ enum drm_vmw_surface_flags {
  * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
  *                    if none.
  * @base_size         Size of the base mip level for all faces.
+ * @array_size        Must be zero for non-DX hardware, and if non-zero
+ *                    svga3d_flags must have proper bind flags setup.
  *
  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
  * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
@@ -920,7 +925,7 @@ struct drm_vmw_gb_surface_create_req {
        uint32_t multisample_count;
        uint32_t autogen_filter;
        uint32_t buffer_handle;
-       uint32_t pad64;
+       uint32_t array_size;
        struct drm_vmw_size base_size;
 };
 
@@ -1060,4 +1065,28 @@ struct drm_vmw_synccpu_arg {
        uint32_t pad64;
 };
 
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+enum drm_vmw_extended_context {
+       drm_vmw_context_legacy,
+       drm_vmw_context_dx
+};
+
+/**
+ * union drm_vmw_extended_context_arg
+ *
+ * @req: Context type.
+ * @rep: Context identifier.
+ *
+ * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
+ */
+union drm_vmw_extended_context_arg {
+       enum drm_vmw_extended_context req;
+       struct drm_vmw_context_arg rep;
+};
 #endif