For certain surface copies, we don't have a user space handle for
the destination surface. In such cases, we are going to trust that
our caller is giving us the right surface ID.
To do this case, we created a quirk flag that may be useful
in the future for handling other cases.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Sinclair Yeh <syeh@vmware.com>
+#define VMW_QUIRK_SCREENTARGET (1U << 0)
+
struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
struct list_head staged_cmd_res;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
struct list_head staged_cmd_res;
};
struct vmw_legacy_display;
};
struct vmw_legacy_display;
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj **out_fence);
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj **out_fence);
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
+
+ if (sw_context->quirks & VMW_QUIRK_SCREENTARGET)
+ return 0;
+
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
if (unlikely(suffix->maximumOffset > bo_size))
suffix->maximumOffset = bo_size;
if (unlikely(suffix->maximumOffset > bo_size))
suffix->maximumOffset = bo_size;
+ if (sw_context->quirks & VMW_QUIRK_SCREENTARGET)
+ goto out_no_surface;
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->dma.host.sid,
NULL);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->dma.host.sid,
NULL);
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj **out_fence)
{
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj **out_fence)
{
sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
+ sw_context->quirks = quirks;
INIT_LIST_HEAD(&sw_context->resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
INIT_LIST_HEAD(&sw_context->resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
ret = vmw_execbuf_process(file_priv, dev_priv,
(void __user *)(unsigned long)arg->commands,
NULL, arg->command_size, arg->throttle_us,
ret = vmw_execbuf_process(file_priv, dev_priv,
(void __user *)(unsigned long)arg->commands,
NULL, arg->command_size, arg->throttle_us,
(void __user *)(unsigned long)arg->fence_rep,
NULL);
ttm_read_unlock(&dev_priv->reservation_sem);
(void __user *)(unsigned long)arg->fence_rep,
NULL);
ttm_read_unlock(&dev_priv->reservation_sem);
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, NULL);
+ fifo_size, 0, 0, NULL, NULL);
if (unlikely(ret != 0))
break;
if (unlikely(ret != 0))
break;
fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
- 0, user_fence_rep, NULL);
+ 0, 0, user_fence_rep, NULL);
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, out_fence);
+ fifo_size, 0, 0, NULL, out_fence);
if (unlikely(ret != 0))
break;
if (unlikely(ret != 0))
break;
cmd->body.ptr.offset = 0;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
cmd->body.ptr.offset = 0;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, NULL);
+ fifo_size, 0, 0, NULL, NULL);
fifo_size = sizeof(*blits) * hit_num;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
fifo_size = sizeof(*blits) * hit_num;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
- fifo_size, 0, NULL, out_fence);
+ fifo_size, 0, 0, NULL, out_fence);
if (unlikely(ret != 0))
break;
if (unlikely(ret != 0))
break;