vmwgfx: Print error diagnostics if depth doesn't match the host expectation
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include "ttm/ttm_bo_api.h"
31#include "ttm/ttm_placement.h"
32
33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header)
36{
37 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38}
39
40static int vmw_cmd_ok(struct vmw_private *dev_priv,
41 struct vmw_sw_context *sw_context,
42 SVGA3dCmdHeader *header)
43{
44 return 0;
45}
46
47static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
48 struct vmw_sw_context *sw_context,
49 SVGA3dCmdHeader *header)
50{
51 struct vmw_cid_cmd {
52 SVGA3dCmdHeader header;
53 __le32 cid;
54 } *cmd;
55 int ret;
56
57 cmd = container_of(header, struct vmw_cid_cmd, header);
58 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
59 return 0;
60
61 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
62 if (unlikely(ret != 0)) {
63 DRM_ERROR("Could not find or use context %u\n",
64 (unsigned) cmd->cid);
65 return ret;
66 }
67
68 sw_context->last_cid = cmd->cid;
69 sw_context->cid_valid = true;
70
71 return 0;
72}
73
74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75 struct vmw_sw_context *sw_context,
7a73ba74 76 uint32_t *sid)
fb1d9738 77{
7a73ba74
TH
78 if (*sid == SVGA3D_INVALID_ID)
79 return 0;
80
81 if (unlikely((!sw_context->sid_valid ||
82 *sid != sw_context->last_sid))) {
83 int real_id;
84 int ret = vmw_surface_check(dev_priv, sw_context->tfile,
85 *sid, &real_id);
fb1d9738
JB
86
87 if (unlikely(ret != 0)) {
7a73ba74
TH
88 DRM_ERROR("Could ot find or use surface 0x%08x "
89 "address 0x%08lx\n",
90 (unsigned int) *sid,
91 (unsigned long) sid);
fb1d9738
JB
92 return ret;
93 }
94
7a73ba74 95 sw_context->last_sid = *sid;
fb1d9738 96 sw_context->sid_valid = true;
7a73ba74
TH
97 *sid = real_id;
98 sw_context->sid_translation = real_id;
99 } else
100 *sid = sw_context->sid_translation;
101
fb1d9738
JB
102 return 0;
103}
104
105
106static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107 struct vmw_sw_context *sw_context,
108 SVGA3dCmdHeader *header)
109{
110 struct vmw_sid_cmd {
111 SVGA3dCmdHeader header;
112 SVGA3dCmdSetRenderTarget body;
113 } *cmd;
114 int ret;
115
116 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
117 if (unlikely(ret != 0))
118 return ret;
119
120 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74
TH
121 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122 return ret;
fb1d9738
JB
123}
124
125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
126 struct vmw_sw_context *sw_context,
127 SVGA3dCmdHeader *header)
128{
129 struct vmw_sid_cmd {
130 SVGA3dCmdHeader header;
131 SVGA3dCmdSurfaceCopy body;
132 } *cmd;
133 int ret;
134
135 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74 136 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
fb1d9738
JB
137 if (unlikely(ret != 0))
138 return ret;
7a73ba74 139 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
fb1d9738
JB
140}
141
142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 SVGA3dCmdHeader *header)
145{
146 struct vmw_sid_cmd {
147 SVGA3dCmdHeader header;
148 SVGA3dCmdSurfaceStretchBlt body;
149 } *cmd;
150 int ret;
151
152 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74 153 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
fb1d9738
JB
154 if (unlikely(ret != 0))
155 return ret;
7a73ba74 156 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
fb1d9738
JB
157}
158
159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
160 struct vmw_sw_context *sw_context,
161 SVGA3dCmdHeader *header)
162{
163 struct vmw_sid_cmd {
164 SVGA3dCmdHeader header;
165 SVGA3dCmdBlitSurfaceToScreen body;
166 } *cmd;
167
168 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74 169 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
fb1d9738
JB
170}
171
172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
173 struct vmw_sw_context *sw_context,
174 SVGA3dCmdHeader *header)
175{
176 struct vmw_sid_cmd {
177 SVGA3dCmdHeader header;
178 SVGA3dCmdPresent body;
179 } *cmd;
180
181 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
fb1d9738
JB
183}
184
4e4ddd47
TH
185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context,
187 SVGAGuestPtr *ptr,
188 struct vmw_dma_buffer **vmw_bo_p)
fb1d9738 189{
fb1d9738
JB
190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo;
4e4ddd47 192 uint32_t handle = ptr->gmrId;
fb1d9738 193 struct vmw_relocation *reloc;
fb1d9738
JB
194 uint32_t cur_validate_node;
195 struct ttm_validate_buffer *val_buf;
4e4ddd47 196 int ret;
fb1d9738 197
fb1d9738
JB
198 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
199 if (unlikely(ret != 0)) {
200 DRM_ERROR("Could not find or use GMR region.\n");
201 return -EINVAL;
202 }
203 bo = &vmw_bo->base;
204
205 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
4e4ddd47 206 DRM_ERROR("Max number relocations per submission"
fb1d9738
JB
207 " exceeded\n");
208 ret = -EINVAL;
209 goto out_no_reloc;
210 }
211
212 reloc = &sw_context->relocs[sw_context->cur_reloc++];
4e4ddd47 213 reloc->location = ptr;
fb1d9738
JB
214
215 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
216 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
217 DRM_ERROR("Max number of DMA buffers per submission"
218 " exceeded.\n");
219 ret = -EINVAL;
220 goto out_no_reloc;
221 }
222
223 reloc->index = cur_validate_node;
224 if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
225 val_buf = &sw_context->val_bufs[cur_validate_node];
226 val_buf->bo = ttm_bo_reference(bo);
dfadbbdb 227 val_buf->usage = TTM_USAGE_READWRITE;
fb1d9738
JB
228 val_buf->new_sync_obj_arg = (void *) dev_priv;
229 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
230 ++sw_context->cur_val_buf;
231 }
4e4ddd47
TH
232 *vmw_bo_p = vmw_bo;
233 return 0;
234
235out_no_reloc:
236 vmw_dmabuf_unreference(&vmw_bo);
237 vmw_bo_p = NULL;
238 return ret;
239}
240
241static int vmw_cmd_end_query(struct vmw_private *dev_priv,
242 struct vmw_sw_context *sw_context,
243 SVGA3dCmdHeader *header)
244{
245 struct vmw_dma_buffer *vmw_bo;
246 struct vmw_query_cmd {
247 SVGA3dCmdHeader header;
248 SVGA3dCmdEndQuery q;
249 } *cmd;
250 int ret;
251
252 cmd = container_of(header, struct vmw_query_cmd, header);
253 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
254 if (unlikely(ret != 0))
255 return ret;
256
257 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
258 &cmd->q.guestResult,
259 &vmw_bo);
260 if (unlikely(ret != 0))
261 return ret;
262
263 vmw_dmabuf_unreference(&vmw_bo);
264 return 0;
265}
fb1d9738 266
4e4ddd47
TH
267static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
268 struct vmw_sw_context *sw_context,
269 SVGA3dCmdHeader *header)
270{
271 struct vmw_dma_buffer *vmw_bo;
272 struct vmw_query_cmd {
273 SVGA3dCmdHeader header;
274 SVGA3dCmdWaitForQuery q;
275 } *cmd;
276 int ret;
277
278 cmd = container_of(header, struct vmw_query_cmd, header);
279 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
280 if (unlikely(ret != 0))
281 return ret;
282
283 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
284 &cmd->q.guestResult,
285 &vmw_bo);
286 if (unlikely(ret != 0))
287 return ret;
288
289 vmw_dmabuf_unreference(&vmw_bo);
290 return 0;
291}
292
293
294static int vmw_cmd_dma(struct vmw_private *dev_priv,
295 struct vmw_sw_context *sw_context,
296 SVGA3dCmdHeader *header)
297{
298 struct vmw_dma_buffer *vmw_bo = NULL;
299 struct ttm_buffer_object *bo;
300 struct vmw_surface *srf = NULL;
301 struct vmw_dma_cmd {
302 SVGA3dCmdHeader header;
303 SVGA3dCmdSurfaceDMA dma;
304 } *cmd;
305 int ret;
306
307 cmd = container_of(header, struct vmw_dma_cmd, header);
308 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
309 &cmd->dma.guest.ptr,
310 &vmw_bo);
311 if (unlikely(ret != 0))
312 return ret;
313
314 bo = &vmw_bo->base;
7a73ba74
TH
315 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
316 cmd->dma.host.sid, &srf);
fb1d9738
JB
317 if (ret) {
318 DRM_ERROR("could not find surface\n");
319 goto out_no_reloc;
320 }
321
7a73ba74
TH
322 /**
323 * Patch command stream with device SID.
324 */
325
326 cmd->dma.host.sid = srf->res.id;
fb1d9738 327 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
7a73ba74
TH
328 /**
329 * FIXME: May deadlock here when called from the
330 * command parsing code.
331 */
fb1d9738
JB
332 vmw_surface_unreference(&srf);
333
334out_no_reloc:
335 vmw_dmabuf_unreference(&vmw_bo);
336 return ret;
337}
338
7a73ba74
TH
339static int vmw_cmd_draw(struct vmw_private *dev_priv,
340 struct vmw_sw_context *sw_context,
341 SVGA3dCmdHeader *header)
342{
343 struct vmw_draw_cmd {
344 SVGA3dCmdHeader header;
345 SVGA3dCmdDrawPrimitives body;
346 } *cmd;
347 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
348 (unsigned long)header + sizeof(*cmd));
349 SVGA3dPrimitiveRange *range;
350 uint32_t i;
351 uint32_t maxnum;
352 int ret;
353
354 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
355 if (unlikely(ret != 0))
356 return ret;
357
358 cmd = container_of(header, struct vmw_draw_cmd, header);
359 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
360
361 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
362 DRM_ERROR("Illegal number of vertex declarations.\n");
363 return -EINVAL;
364 }
365
366 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
367 ret = vmw_cmd_sid_check(dev_priv, sw_context,
368 &decl->array.surfaceId);
369 if (unlikely(ret != 0))
370 return ret;
371 }
372
373 maxnum = (header->size - sizeof(cmd->body) -
374 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
375 if (unlikely(cmd->body.numRanges > maxnum)) {
376 DRM_ERROR("Illegal number of index ranges.\n");
377 return -EINVAL;
378 }
379
380 range = (SVGA3dPrimitiveRange *) decl;
381 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
382 ret = vmw_cmd_sid_check(dev_priv, sw_context,
383 &range->indexArray.surfaceId);
384 if (unlikely(ret != 0))
385 return ret;
386 }
387 return 0;
388}
389
390
391static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
392 struct vmw_sw_context *sw_context,
393 SVGA3dCmdHeader *header)
394{
395 struct vmw_tex_state_cmd {
396 SVGA3dCmdHeader header;
397 SVGA3dCmdSetTextureState state;
398 };
399
400 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
401 ((unsigned long) header + header->size + sizeof(header));
402 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
403 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
404 int ret;
405
406 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
407 if (unlikely(ret != 0))
408 return ret;
409
410 for (; cur_state < last_state; ++cur_state) {
411 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
412 continue;
413
414 ret = vmw_cmd_sid_check(dev_priv, sw_context,
415 &cur_state->value);
416 if (unlikely(ret != 0))
417 return ret;
418 }
419
420 return 0;
421}
422
fb1d9738
JB
423
424typedef int (*vmw_cmd_func) (struct vmw_private *,
425 struct vmw_sw_context *,
426 SVGA3dCmdHeader *);
427
428#define VMW_CMD_DEF(cmd, func) \
429 [cmd - SVGA_3D_CMD_BASE] = func
430
431static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
432 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
433 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
434 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
435 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
436 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
437 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
438 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
439 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
440 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
441 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
442 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
443 &vmw_cmd_set_render_target_check),
7a73ba74 444 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
fb1d9738
JB
445 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
446 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
447 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
448 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
449 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
450 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
451 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
452 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
453 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
454 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
455 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
7a73ba74 456 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
fb1d9738
JB
457 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
458 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
4e4ddd47
TH
459 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
460 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
fb1d9738
JB
461 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
462 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
463 &vmw_cmd_blt_surf_screen_check)
464};
465
466static int vmw_cmd_check(struct vmw_private *dev_priv,
467 struct vmw_sw_context *sw_context,
468 void *buf, uint32_t *size)
469{
470 uint32_t cmd_id;
7a73ba74 471 uint32_t size_remaining = *size;
fb1d9738
JB
472 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
473 int ret;
474
475 cmd_id = ((uint32_t *)buf)[0];
476 if (cmd_id == SVGA_CMD_UPDATE) {
477 *size = 5 << 2;
478 return 0;
479 }
480
481 cmd_id = le32_to_cpu(header->id);
482 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
483
484 cmd_id -= SVGA_3D_CMD_BASE;
7a73ba74
TH
485 if (unlikely(*size > size_remaining))
486 goto out_err;
487
fb1d9738
JB
488 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
489 goto out_err;
490
491 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
492 if (unlikely(ret != 0))
493 goto out_err;
494
495 return 0;
496out_err:
497 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
498 cmd_id + SVGA_3D_CMD_BASE);
499 return -EINVAL;
500}
501
502static int vmw_cmd_check_all(struct vmw_private *dev_priv,
503 struct vmw_sw_context *sw_context,
504 void *buf, uint32_t size)
505{
506 int32_t cur_size = size;
507 int ret;
508
509 while (cur_size > 0) {
7a73ba74 510 size = cur_size;
fb1d9738
JB
511 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
512 if (unlikely(ret != 0))
513 return ret;
514 buf = (void *)((unsigned long) buf + size);
515 cur_size -= size;
516 }
517
518 if (unlikely(cur_size != 0)) {
519 DRM_ERROR("Command verifier out of sync.\n");
520 return -EINVAL;
521 }
522
523 return 0;
524}
525
526static void vmw_free_relocations(struct vmw_sw_context *sw_context)
527{
528 sw_context->cur_reloc = 0;
529}
530
531static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
532{
533 uint32_t i;
534 struct vmw_relocation *reloc;
535 struct ttm_validate_buffer *validate;
536 struct ttm_buffer_object *bo;
537
538 for (i = 0; i < sw_context->cur_reloc; ++i) {
539 reloc = &sw_context->relocs[i];
540 validate = &sw_context->val_bufs[reloc->index];
541 bo = validate->bo;
135cba0d
TH
542 if (bo->mem.mem_type == TTM_PL_VRAM) {
543 reloc->location->offset += bo->offset;
544 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
545 } else
546 reloc->location->gmrId = bo->mem.start;
fb1d9738
JB
547 }
548 vmw_free_relocations(sw_context);
549}
550
551static void vmw_clear_validations(struct vmw_sw_context *sw_context)
552{
553 struct ttm_validate_buffer *entry, *next;
554
555 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
556 head) {
557 list_del(&entry->head);
558 vmw_dmabuf_validate_clear(entry->bo);
559 ttm_bo_unref(&entry->bo);
560 sw_context->cur_val_buf--;
561 }
562 BUG_ON(sw_context->cur_val_buf != 0);
563}
564
565static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
566 struct ttm_buffer_object *bo)
567{
568 int ret;
569
8ba5152a 570 /**
135cba0d
TH
571 * Put BO in VRAM if there is space, otherwise as a GMR.
572 * If there is no space in VRAM and GMR ids are all used up,
573 * start evicting GMRs to make room. If the DMA buffer can't be
574 * used as a GMR, this will return -ENOMEM.
8ba5152a
TH
575 */
576
135cba0d 577 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
3d3a5b32 578 if (likely(ret == 0 || ret == -ERESTARTSYS))
fb1d9738
JB
579 return ret;
580
8ba5152a
TH
581 /**
582 * If that failed, try VRAM again, this time evicting
583 * previous contents.
584 */
fb1d9738 585
135cba0d 586 DRM_INFO("Falling through to VRAM.\n");
9d87fa21 587 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
fb1d9738
JB
588 return ret;
589}
590
591
592static int vmw_validate_buffers(struct vmw_private *dev_priv,
593 struct vmw_sw_context *sw_context)
594{
595 struct ttm_validate_buffer *entry;
596 int ret;
597
598 list_for_each_entry(entry, &sw_context->validate_nodes, head) {
599 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
600 if (unlikely(ret != 0))
601 return ret;
602 }
603 return 0;
604}
605
606int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
607 struct drm_file *file_priv)
608{
609 struct vmw_private *dev_priv = vmw_priv(dev);
610 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
611 struct drm_vmw_fence_rep fence_rep;
612 struct drm_vmw_fence_rep __user *user_fence_rep;
613 int ret;
614 void *user_cmd;
615 void *cmd;
616 uint32_t sequence;
617 struct vmw_sw_context *sw_context = &dev_priv->ctx;
618 struct vmw_master *vmaster = vmw_master(file_priv->master);
619
620 ret = ttm_read_lock(&vmaster->lock, true);
621 if (unlikely(ret != 0))
622 return ret;
623
624 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
625 if (unlikely(ret != 0)) {
3d3a5b32 626 ret = -ERESTARTSYS;
fb1d9738
JB
627 goto out_no_cmd_mutex;
628 }
629
630 cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
631 if (unlikely(cmd == NULL)) {
632 DRM_ERROR("Failed reserving fifo space for commands.\n");
633 ret = -ENOMEM;
634 goto out_unlock;
635 }
636
637 user_cmd = (void __user *)(unsigned long)arg->commands;
638 ret = copy_from_user(cmd, user_cmd, arg->command_size);
639
640 if (unlikely(ret != 0)) {
9b8eb4d1 641 ret = -EFAULT;
fb1d9738
JB
642 DRM_ERROR("Failed copying commands.\n");
643 goto out_commit;
644 }
645
646 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
647 sw_context->cid_valid = false;
648 sw_context->sid_valid = false;
649 sw_context->cur_reloc = 0;
650 sw_context->cur_val_buf = 0;
651
652 INIT_LIST_HEAD(&sw_context->validate_nodes);
653
654 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
655 if (unlikely(ret != 0))
656 goto out_err;
65705962 657 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
fb1d9738
JB
658 if (unlikely(ret != 0))
659 goto out_err;
660
661 ret = vmw_validate_buffers(dev_priv, sw_context);
662 if (unlikely(ret != 0))
663 goto out_err;
664
665 vmw_apply_relocations(sw_context);
1925d456
TH
666
667 if (arg->throttle_us) {
668 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
669 arg->throttle_us);
670
671 if (unlikely(ret != 0))
672 goto out_err;
673 }
674
fb1d9738
JB
675 vmw_fifo_commit(dev_priv, arg->command_size);
676
677 ret = vmw_fifo_send_fence(dev_priv, &sequence);
678
679 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
680 (void *)(unsigned long) sequence);
681 vmw_clear_validations(sw_context);
682 mutex_unlock(&dev_priv->cmdbuf_mutex);
683
684 /*
685 * This error is harmless, because if fence submission fails,
686 * vmw_fifo_send_fence will sync.
687 */
688
689 if (ret != 0)
690 DRM_ERROR("Fence submission error. Syncing.\n");
691
692 fence_rep.error = ret;
693 fence_rep.fence_seq = (uint64_t) sequence;
dccb2a95 694 fence_rep.pad64 = 0;
fb1d9738
JB
695
696 user_fence_rep = (struct drm_vmw_fence_rep __user *)
697 (unsigned long)arg->fence_rep;
698
699 /*
700 * copy_to_user errors will be detected by user space not
701 * seeing fence_rep::error filled in.
702 */
703
704 ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
705
706 vmw_kms_cursor_post_execbuf(dev_priv);
707 ttm_read_unlock(&vmaster->lock);
708 return 0;
709out_err:
710 vmw_free_relocations(sw_context);
711 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
712 vmw_clear_validations(sw_context);
713out_commit:
714 vmw_fifo_commit(dev_priv, 0);
715out_unlock:
716 mutex_unlock(&dev_priv->cmdbuf_mutex);
717out_no_cmd_mutex:
718 ttm_read_unlock(&vmaster->lock);
719 return ret;
720}