drm/ttm: Allow system memory as a busy placement.
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include "ttm/ttm_bo_api.h"
31#include "ttm/ttm_placement.h"
32
33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header)
36{
37 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38}
39
40static int vmw_cmd_ok(struct vmw_private *dev_priv,
41 struct vmw_sw_context *sw_context,
42 SVGA3dCmdHeader *header)
43{
44 return 0;
45}
46
47static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
48 struct vmw_sw_context *sw_context,
49 SVGA3dCmdHeader *header)
50{
51 struct vmw_cid_cmd {
52 SVGA3dCmdHeader header;
53 __le32 cid;
54 } *cmd;
55 int ret;
56
57 cmd = container_of(header, struct vmw_cid_cmd, header);
58 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
59 return 0;
60
61 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
62 if (unlikely(ret != 0)) {
63 DRM_ERROR("Could not find or use context %u\n",
64 (unsigned) cmd->cid);
65 return ret;
66 }
67
68 sw_context->last_cid = cmd->cid;
69 sw_context->cid_valid = true;
70
71 return 0;
72}
73
74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75 struct vmw_sw_context *sw_context,
7a73ba74 76 uint32_t *sid)
fb1d9738 77{
7a73ba74
TH
78 if (*sid == SVGA3D_INVALID_ID)
79 return 0;
80
81 if (unlikely((!sw_context->sid_valid ||
82 *sid != sw_context->last_sid))) {
83 int real_id;
84 int ret = vmw_surface_check(dev_priv, sw_context->tfile,
85 *sid, &real_id);
fb1d9738
JB
86
87 if (unlikely(ret != 0)) {
7a73ba74
TH
88 DRM_ERROR("Could ot find or use surface 0x%08x "
89 "address 0x%08lx\n",
90 (unsigned int) *sid,
91 (unsigned long) sid);
fb1d9738
JB
92 return ret;
93 }
94
7a73ba74 95 sw_context->last_sid = *sid;
fb1d9738 96 sw_context->sid_valid = true;
7a73ba74
TH
97 *sid = real_id;
98 sw_context->sid_translation = real_id;
99 } else
100 *sid = sw_context->sid_translation;
101
fb1d9738
JB
102 return 0;
103}
104
105
106static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107 struct vmw_sw_context *sw_context,
108 SVGA3dCmdHeader *header)
109{
110 struct vmw_sid_cmd {
111 SVGA3dCmdHeader header;
112 SVGA3dCmdSetRenderTarget body;
113 } *cmd;
114 int ret;
115
116 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
117 if (unlikely(ret != 0))
118 return ret;
119
120 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74
TH
121 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122 return ret;
fb1d9738
JB
123}
124
125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
126 struct vmw_sw_context *sw_context,
127 SVGA3dCmdHeader *header)
128{
129 struct vmw_sid_cmd {
130 SVGA3dCmdHeader header;
131 SVGA3dCmdSurfaceCopy body;
132 } *cmd;
133 int ret;
134
135 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74 136 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
fb1d9738
JB
137 if (unlikely(ret != 0))
138 return ret;
7a73ba74 139 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
fb1d9738
JB
140}
141
142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 SVGA3dCmdHeader *header)
145{
146 struct vmw_sid_cmd {
147 SVGA3dCmdHeader header;
148 SVGA3dCmdSurfaceStretchBlt body;
149 } *cmd;
150 int ret;
151
152 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74 153 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
fb1d9738
JB
154 if (unlikely(ret != 0))
155 return ret;
7a73ba74 156 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
fb1d9738
JB
157}
158
159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
160 struct vmw_sw_context *sw_context,
161 SVGA3dCmdHeader *header)
162{
163 struct vmw_sid_cmd {
164 SVGA3dCmdHeader header;
165 SVGA3dCmdBlitSurfaceToScreen body;
166 } *cmd;
167
168 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74 169 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
fb1d9738
JB
170}
171
172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
173 struct vmw_sw_context *sw_context,
174 SVGA3dCmdHeader *header)
175{
176 struct vmw_sid_cmd {
177 SVGA3dCmdHeader header;
178 SVGA3dCmdPresent body;
179 } *cmd;
180
181 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
fb1d9738
JB
183}
184
185static int vmw_cmd_dma(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context,
187 SVGA3dCmdHeader *header)
188{
189 uint32_t handle;
190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo;
192 struct vmw_surface *srf = NULL;
193 struct vmw_dma_cmd {
194 SVGA3dCmdHeader header;
195 SVGA3dCmdSurfaceDMA dma;
196 } *cmd;
197 struct vmw_relocation *reloc;
198 int ret;
199 uint32_t cur_validate_node;
200 struct ttm_validate_buffer *val_buf;
201
fb1d9738 202 cmd = container_of(header, struct vmw_dma_cmd, header);
fb1d9738
JB
203 handle = cmd->dma.guest.ptr.gmrId;
204 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
205 if (unlikely(ret != 0)) {
206 DRM_ERROR("Could not find or use GMR region.\n");
207 return -EINVAL;
208 }
209 bo = &vmw_bo->base;
210
211 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
212 DRM_ERROR("Max number of DMA commands per submission"
213 " exceeded\n");
214 ret = -EINVAL;
215 goto out_no_reloc;
216 }
217
218 reloc = &sw_context->relocs[sw_context->cur_reloc++];
219 reloc->location = &cmd->dma.guest.ptr;
220
221 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
222 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
223 DRM_ERROR("Max number of DMA buffers per submission"
224 " exceeded.\n");
225 ret = -EINVAL;
226 goto out_no_reloc;
227 }
228
229 reloc->index = cur_validate_node;
230 if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
231 val_buf = &sw_context->val_bufs[cur_validate_node];
232 val_buf->bo = ttm_bo_reference(bo);
233 val_buf->new_sync_obj_arg = (void *) dev_priv;
234 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
235 ++sw_context->cur_val_buf;
236 }
237
7a73ba74
TH
238 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
239 cmd->dma.host.sid, &srf);
fb1d9738
JB
240 if (ret) {
241 DRM_ERROR("could not find surface\n");
242 goto out_no_reloc;
243 }
244
7a73ba74
TH
245 /**
246 * Patch command stream with device SID.
247 */
248
249 cmd->dma.host.sid = srf->res.id;
fb1d9738 250 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
7a73ba74
TH
251 /**
252 * FIXME: May deadlock here when called from the
253 * command parsing code.
254 */
fb1d9738
JB
255 vmw_surface_unreference(&srf);
256
257out_no_reloc:
258 vmw_dmabuf_unreference(&vmw_bo);
259 return ret;
260}
261
7a73ba74
TH
262static int vmw_cmd_draw(struct vmw_private *dev_priv,
263 struct vmw_sw_context *sw_context,
264 SVGA3dCmdHeader *header)
265{
266 struct vmw_draw_cmd {
267 SVGA3dCmdHeader header;
268 SVGA3dCmdDrawPrimitives body;
269 } *cmd;
270 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
271 (unsigned long)header + sizeof(*cmd));
272 SVGA3dPrimitiveRange *range;
273 uint32_t i;
274 uint32_t maxnum;
275 int ret;
276
277 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
278 if (unlikely(ret != 0))
279 return ret;
280
281 cmd = container_of(header, struct vmw_draw_cmd, header);
282 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
283
284 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
285 DRM_ERROR("Illegal number of vertex declarations.\n");
286 return -EINVAL;
287 }
288
289 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
290 ret = vmw_cmd_sid_check(dev_priv, sw_context,
291 &decl->array.surfaceId);
292 if (unlikely(ret != 0))
293 return ret;
294 }
295
296 maxnum = (header->size - sizeof(cmd->body) -
297 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
298 if (unlikely(cmd->body.numRanges > maxnum)) {
299 DRM_ERROR("Illegal number of index ranges.\n");
300 return -EINVAL;
301 }
302
303 range = (SVGA3dPrimitiveRange *) decl;
304 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
305 ret = vmw_cmd_sid_check(dev_priv, sw_context,
306 &range->indexArray.surfaceId);
307 if (unlikely(ret != 0))
308 return ret;
309 }
310 return 0;
311}
312
313
314static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
315 struct vmw_sw_context *sw_context,
316 SVGA3dCmdHeader *header)
317{
318 struct vmw_tex_state_cmd {
319 SVGA3dCmdHeader header;
320 SVGA3dCmdSetTextureState state;
321 };
322
323 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
324 ((unsigned long) header + header->size + sizeof(header));
325 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
326 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
327 int ret;
328
329 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
330 if (unlikely(ret != 0))
331 return ret;
332
333 for (; cur_state < last_state; ++cur_state) {
334 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
335 continue;
336
337 ret = vmw_cmd_sid_check(dev_priv, sw_context,
338 &cur_state->value);
339 if (unlikely(ret != 0))
340 return ret;
341 }
342
343 return 0;
344}
345
fb1d9738
JB
346
347typedef int (*vmw_cmd_func) (struct vmw_private *,
348 struct vmw_sw_context *,
349 SVGA3dCmdHeader *);
350
351#define VMW_CMD_DEF(cmd, func) \
352 [cmd - SVGA_3D_CMD_BASE] = func
353
354static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
355 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
356 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
357 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
358 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
359 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
360 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
361 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
362 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
363 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
364 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
365 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
366 &vmw_cmd_set_render_target_check),
7a73ba74 367 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
fb1d9738
JB
368 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
369 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
370 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
371 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
372 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
373 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
374 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
375 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
376 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
377 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
378 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
7a73ba74 379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
fb1d9738
JB
380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
382 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
383 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
384 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
385 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
386 &vmw_cmd_blt_surf_screen_check)
387};
388
389static int vmw_cmd_check(struct vmw_private *dev_priv,
390 struct vmw_sw_context *sw_context,
391 void *buf, uint32_t *size)
392{
393 uint32_t cmd_id;
7a73ba74 394 uint32_t size_remaining = *size;
fb1d9738
JB
395 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
396 int ret;
397
398 cmd_id = ((uint32_t *)buf)[0];
399 if (cmd_id == SVGA_CMD_UPDATE) {
400 *size = 5 << 2;
401 return 0;
402 }
403
404 cmd_id = le32_to_cpu(header->id);
405 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
406
407 cmd_id -= SVGA_3D_CMD_BASE;
7a73ba74
TH
408 if (unlikely(*size > size_remaining))
409 goto out_err;
410
fb1d9738
JB
411 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
412 goto out_err;
413
414 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
415 if (unlikely(ret != 0))
416 goto out_err;
417
418 return 0;
419out_err:
420 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
421 cmd_id + SVGA_3D_CMD_BASE);
422 return -EINVAL;
423}
424
425static int vmw_cmd_check_all(struct vmw_private *dev_priv,
426 struct vmw_sw_context *sw_context,
427 void *buf, uint32_t size)
428{
429 int32_t cur_size = size;
430 int ret;
431
432 while (cur_size > 0) {
7a73ba74 433 size = cur_size;
fb1d9738
JB
434 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
435 if (unlikely(ret != 0))
436 return ret;
437 buf = (void *)((unsigned long) buf + size);
438 cur_size -= size;
439 }
440
441 if (unlikely(cur_size != 0)) {
442 DRM_ERROR("Command verifier out of sync.\n");
443 return -EINVAL;
444 }
445
446 return 0;
447}
448
449static void vmw_free_relocations(struct vmw_sw_context *sw_context)
450{
451 sw_context->cur_reloc = 0;
452}
453
454static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
455{
456 uint32_t i;
457 struct vmw_relocation *reloc;
458 struct ttm_validate_buffer *validate;
459 struct ttm_buffer_object *bo;
460
461 for (i = 0; i < sw_context->cur_reloc; ++i) {
462 reloc = &sw_context->relocs[i];
463 validate = &sw_context->val_bufs[reloc->index];
464 bo = validate->bo;
465 reloc->location->offset += bo->offset;
466 reloc->location->gmrId = vmw_dmabuf_gmr(bo);
467 }
468 vmw_free_relocations(sw_context);
469}
470
471static void vmw_clear_validations(struct vmw_sw_context *sw_context)
472{
473 struct ttm_validate_buffer *entry, *next;
474
475 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
476 head) {
477 list_del(&entry->head);
478 vmw_dmabuf_validate_clear(entry->bo);
479 ttm_bo_unref(&entry->bo);
480 sw_context->cur_val_buf--;
481 }
482 BUG_ON(sw_context->cur_val_buf != 0);
483}
484
485static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
486 struct ttm_buffer_object *bo)
487{
488 int ret;
489
490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
491 return 0;
492
493 ret = vmw_gmr_bind(dev_priv, bo);
3d3a5b32 494 if (likely(ret == 0 || ret == -ERESTARTSYS))
fb1d9738
JB
495 return ret;
496
497
498 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
499 return ret;
500}
501
502
503static int vmw_validate_buffers(struct vmw_private *dev_priv,
504 struct vmw_sw_context *sw_context)
505{
506 struct ttm_validate_buffer *entry;
507 int ret;
508
509 list_for_each_entry(entry, &sw_context->validate_nodes, head) {
510 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
511 if (unlikely(ret != 0))
512 return ret;
513 }
514 return 0;
515}
516
517int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
518 struct drm_file *file_priv)
519{
520 struct vmw_private *dev_priv = vmw_priv(dev);
521 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
522 struct drm_vmw_fence_rep fence_rep;
523 struct drm_vmw_fence_rep __user *user_fence_rep;
524 int ret;
525 void *user_cmd;
526 void *cmd;
527 uint32_t sequence;
528 struct vmw_sw_context *sw_context = &dev_priv->ctx;
529 struct vmw_master *vmaster = vmw_master(file_priv->master);
530
531 ret = ttm_read_lock(&vmaster->lock, true);
532 if (unlikely(ret != 0))
533 return ret;
534
535 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
536 if (unlikely(ret != 0)) {
3d3a5b32 537 ret = -ERESTARTSYS;
fb1d9738
JB
538 goto out_no_cmd_mutex;
539 }
540
541 cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
542 if (unlikely(cmd == NULL)) {
543 DRM_ERROR("Failed reserving fifo space for commands.\n");
544 ret = -ENOMEM;
545 goto out_unlock;
546 }
547
548 user_cmd = (void __user *)(unsigned long)arg->commands;
549 ret = copy_from_user(cmd, user_cmd, arg->command_size);
550
551 if (unlikely(ret != 0)) {
552 DRM_ERROR("Failed copying commands.\n");
553 goto out_commit;
554 }
555
556 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
557 sw_context->cid_valid = false;
558 sw_context->sid_valid = false;
559 sw_context->cur_reloc = 0;
560 sw_context->cur_val_buf = 0;
561
562 INIT_LIST_HEAD(&sw_context->validate_nodes);
563
564 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
565 if (unlikely(ret != 0))
566 goto out_err;
567 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
568 dev_priv->val_seq++);
569 if (unlikely(ret != 0))
570 goto out_err;
571
572 ret = vmw_validate_buffers(dev_priv, sw_context);
573 if (unlikely(ret != 0))
574 goto out_err;
575
576 vmw_apply_relocations(sw_context);
577 vmw_fifo_commit(dev_priv, arg->command_size);
578
579 ret = vmw_fifo_send_fence(dev_priv, &sequence);
580
581 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
582 (void *)(unsigned long) sequence);
583 vmw_clear_validations(sw_context);
584 mutex_unlock(&dev_priv->cmdbuf_mutex);
585
586 /*
587 * This error is harmless, because if fence submission fails,
588 * vmw_fifo_send_fence will sync.
589 */
590
591 if (ret != 0)
592 DRM_ERROR("Fence submission error. Syncing.\n");
593
594 fence_rep.error = ret;
595 fence_rep.fence_seq = (uint64_t) sequence;
596
597 user_fence_rep = (struct drm_vmw_fence_rep __user *)
598 (unsigned long)arg->fence_rep;
599
600 /*
601 * copy_to_user errors will be detected by user space not
602 * seeing fence_rep::error filled in.
603 */
604
605 ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
606
607 vmw_kms_cursor_post_execbuf(dev_priv);
608 ttm_read_unlock(&vmaster->lock);
609 return 0;
610out_err:
611 vmw_free_relocations(sw_context);
612 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
613 vmw_clear_validations(sw_context);
614out_commit:
615 vmw_fifo_commit(dev_priv, 0);
616out_unlock:
617 mutex_unlock(&dev_priv->cmdbuf_mutex);
618out_no_cmd_mutex:
619 ttm_read_unlock(&vmaster->lock);
620 return ret;
621}