vmwgfx: Make sure we always have a user-space handle to use for objects that are...
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include "ttm/ttm_bo_api.h"
31#include "ttm/ttm_placement.h"
32
33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header)
36{
37 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38}
39
40static int vmw_cmd_ok(struct vmw_private *dev_priv,
41 struct vmw_sw_context *sw_context,
42 SVGA3dCmdHeader *header)
43{
44 return 0;
45}
46
f18c8840
TH
47static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
48 struct vmw_resource **p_res)
be38ab6e 49{
be38ab6e
TH
50 struct vmw_resource *res = *p_res;
51
f18c8840
TH
52 if (list_empty(&res->validate_head)) {
53 list_add_tail(&res->validate_head, &sw_context->resource_list);
54 *p_res = NULL;
55 } else
56 vmw_resource_unreference(p_res);
be38ab6e
TH
57}
58
e2fa3a76
TH
59/**
60 * vmw_bo_to_validate_list - add a bo to a validate list
61 *
62 * @sw_context: The software context used for this command submission batch.
63 * @bo: The buffer object to add.
64 * @fence_flags: Fence flags to be or'ed with any other fence flags for
65 * this buffer on this submission batch.
66 * @p_val_node: If non-NULL Will be updated with the validate node number
67 * on return.
68 *
69 * Returns -EINVAL if the limit of number of buffer objects per command
70 * submission is reached.
71 */
72static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
73 struct ttm_buffer_object *bo,
74 uint32_t fence_flags,
75 uint32_t *p_val_node)
76{
77 uint32_t val_node;
78 struct ttm_validate_buffer *val_buf;
79
80 val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
81
82 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
83 DRM_ERROR("Max number of DMA buffers per submission"
84 " exceeded.\n");
85 return -EINVAL;
86 }
87
88 val_buf = &sw_context->val_bufs[val_node];
89 if (unlikely(val_node == sw_context->cur_val_buf)) {
90 val_buf->new_sync_obj_arg = NULL;
91 val_buf->bo = ttm_bo_reference(bo);
92 val_buf->usage = TTM_USAGE_READWRITE;
93 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
94 ++sw_context->cur_val_buf;
95 }
96
97 val_buf->new_sync_obj_arg = (void *)
98 ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
99 sw_context->fence_flags |= fence_flags;
100
101 if (p_val_node)
102 *p_val_node = val_node;
103
104 return 0;
105}
106
fb1d9738
JB
107static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
108 struct vmw_sw_context *sw_context,
109 SVGA3dCmdHeader *header)
110{
be38ab6e
TH
111 struct vmw_resource *ctx;
112
fb1d9738
JB
113 struct vmw_cid_cmd {
114 SVGA3dCmdHeader header;
115 __le32 cid;
116 } *cmd;
117 int ret;
118
119 cmd = container_of(header, struct vmw_cid_cmd, header);
120 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
121 return 0;
122
be38ab6e
TH
123 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
124 &ctx);
fb1d9738
JB
125 if (unlikely(ret != 0)) {
126 DRM_ERROR("Could not find or use context %u\n",
127 (unsigned) cmd->cid);
128 return ret;
129 }
130
131 sw_context->last_cid = cmd->cid;
132 sw_context->cid_valid = true;
e2fa3a76 133 sw_context->cur_ctx = ctx;
f18c8840
TH
134 vmw_resource_to_validate_list(sw_context, &ctx);
135
136 return 0;
fb1d9738
JB
137}
138
139static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
140 struct vmw_sw_context *sw_context,
7a73ba74 141 uint32_t *sid)
fb1d9738 142{
be38ab6e
TH
143 struct vmw_surface *srf;
144 int ret;
145 struct vmw_resource *res;
146
7a73ba74
TH
147 if (*sid == SVGA3D_INVALID_ID)
148 return 0;
149
be38ab6e
TH
150 if (likely((sw_context->sid_valid &&
151 *sid == sw_context->last_sid))) {
7a73ba74 152 *sid = sw_context->sid_translation;
be38ab6e
TH
153 return 0;
154 }
7a73ba74 155
e2fa3a76
TH
156 ret = vmw_user_surface_lookup_handle(dev_priv,
157 sw_context->tfile,
be38ab6e
TH
158 *sid, &srf);
159 if (unlikely(ret != 0)) {
160 DRM_ERROR("Could ot find or use surface 0x%08x "
161 "address 0x%08lx\n",
162 (unsigned int) *sid,
163 (unsigned long) sid);
164 return ret;
165 }
166
167 sw_context->last_sid = *sid;
168 sw_context->sid_valid = true;
169 sw_context->sid_translation = srf->res.id;
170 *sid = sw_context->sid_translation;
171
172 res = &srf->res;
f18c8840
TH
173 vmw_resource_to_validate_list(sw_context, &res);
174
175 return 0;
fb1d9738
JB
176}
177
178
179static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
180 struct vmw_sw_context *sw_context,
181 SVGA3dCmdHeader *header)
182{
183 struct vmw_sid_cmd {
184 SVGA3dCmdHeader header;
185 SVGA3dCmdSetRenderTarget body;
186 } *cmd;
187 int ret;
188
189 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
190 if (unlikely(ret != 0))
191 return ret;
192
193 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74
TH
194 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
195 return ret;
fb1d9738
JB
196}
197
198static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
199 struct vmw_sw_context *sw_context,
200 SVGA3dCmdHeader *header)
201{
202 struct vmw_sid_cmd {
203 SVGA3dCmdHeader header;
204 SVGA3dCmdSurfaceCopy body;
205 } *cmd;
206 int ret;
207
208 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74 209 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
fb1d9738
JB
210 if (unlikely(ret != 0))
211 return ret;
7a73ba74 212 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
fb1d9738
JB
213}
214
215static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
216 struct vmw_sw_context *sw_context,
217 SVGA3dCmdHeader *header)
218{
219 struct vmw_sid_cmd {
220 SVGA3dCmdHeader header;
221 SVGA3dCmdSurfaceStretchBlt body;
222 } *cmd;
223 int ret;
224
225 cmd = container_of(header, struct vmw_sid_cmd, header);
7a73ba74 226 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
fb1d9738
JB
227 if (unlikely(ret != 0))
228 return ret;
7a73ba74 229 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
fb1d9738
JB
230}
231
232static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
233 struct vmw_sw_context *sw_context,
234 SVGA3dCmdHeader *header)
235{
236 struct vmw_sid_cmd {
237 SVGA3dCmdHeader header;
238 SVGA3dCmdBlitSurfaceToScreen body;
239 } *cmd;
240
241 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6
JB
242
243 if (unlikely(!sw_context->kernel)) {
244 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
245 return -EPERM;
246 }
247
7a73ba74 248 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
fb1d9738
JB
249}
250
251static int vmw_cmd_present_check(struct vmw_private *dev_priv,
252 struct vmw_sw_context *sw_context,
253 SVGA3dCmdHeader *header)
254{
255 struct vmw_sid_cmd {
256 SVGA3dCmdHeader header;
257 SVGA3dCmdPresent body;
258 } *cmd;
259
260 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6
JB
261
262 if (unlikely(!sw_context->kernel)) {
263 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
264 return -EPERM;
265 }
266
7a73ba74 267 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
fb1d9738
JB
268}
269
e2fa3a76
TH
270/**
271 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
272 *
273 * @dev_priv: The device private structure.
274 * @cid: The hardware context for the next query.
275 * @new_query_bo: The new buffer holding query results.
276 * @sw_context: The software context used for this command submission.
277 *
278 * This function checks whether @new_query_bo is suitable for holding
279 * query results, and if another buffer currently is pinned for query
280 * results. If so, the function prepares the state of @sw_context for
281 * switching pinned buffers after successful submission of the current
282 * command batch. It also checks whether we're using a new query context.
283 * In that case, it makes sure we emit a query barrier for the old
284 * context before the current query buffer is fenced.
285 */
286static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
287 uint32_t cid,
288 struct ttm_buffer_object *new_query_bo,
289 struct vmw_sw_context *sw_context)
290{
291 int ret;
292 bool add_cid = false;
293 uint32_t cid_to_add;
294
295 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
296
297 if (unlikely(new_query_bo->num_pages > 4)) {
298 DRM_ERROR("Query buffer too large.\n");
299 return -EINVAL;
300 }
301
302 if (unlikely(sw_context->cur_query_bo != NULL)) {
303 BUG_ON(!sw_context->query_cid_valid);
304 add_cid = true;
305 cid_to_add = sw_context->cur_query_cid;
306 ret = vmw_bo_to_validate_list(sw_context,
307 sw_context->cur_query_bo,
308 DRM_VMW_FENCE_FLAG_EXEC,
309 NULL);
310 if (unlikely(ret != 0))
311 return ret;
312 }
313 sw_context->cur_query_bo = new_query_bo;
314
315 ret = vmw_bo_to_validate_list(sw_context,
316 dev_priv->dummy_query_bo,
317 DRM_VMW_FENCE_FLAG_EXEC,
318 NULL);
319 if (unlikely(ret != 0))
320 return ret;
321
322 }
323
324 if (unlikely(cid != sw_context->cur_query_cid &&
325 sw_context->query_cid_valid)) {
326 add_cid = true;
327 cid_to_add = sw_context->cur_query_cid;
328 }
329
330 sw_context->cur_query_cid = cid;
331 sw_context->query_cid_valid = true;
332
333 if (add_cid) {
334 struct vmw_resource *ctx = sw_context->cur_ctx;
335
336 if (list_empty(&ctx->query_head))
337 list_add_tail(&ctx->query_head,
338 &sw_context->query_list);
339 ret = vmw_bo_to_validate_list(sw_context,
340 dev_priv->dummy_query_bo,
341 DRM_VMW_FENCE_FLAG_EXEC,
342 NULL);
343 if (unlikely(ret != 0))
344 return ret;
345 }
346 return 0;
347}
348
349
350/**
351 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
352 *
353 * @dev_priv: The device private structure.
354 * @sw_context: The software context used for this command submission batch.
355 *
356 * This function will check if we're switching query buffers, and will then,
357 * if no other query waits are issued this command submission batch,
358 * issue a dummy occlusion query wait used as a query barrier. When the fence
359 * object following that query wait has signaled, we are sure that all
360 * preseding queries have finished, and the old query buffer can be unpinned.
361 * However, since both the new query buffer and the old one are fenced with
362 * that fence, we can do an asynchronus unpin now, and be sure that the
363 * old query buffer won't be moved until the fence has signaled.
364 *
365 * As mentioned above, both the new - and old query buffers need to be fenced
366 * using a sequence emitted *after* calling this function.
367 */
368static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
369 struct vmw_sw_context *sw_context)
370{
371
372 struct vmw_resource *ctx, *next_ctx;
373 int ret;
374
375 /*
376 * The validate list should still hold references to all
377 * contexts here.
378 */
379
380 list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
381 query_head) {
382 list_del_init(&ctx->query_head);
383
f18c8840 384 BUG_ON(list_empty(&ctx->validate_head));
e2fa3a76
TH
385
386 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
387
388 if (unlikely(ret != 0))
389 DRM_ERROR("Out of fifo space for dummy query.\n");
390 }
391
392 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
393 if (dev_priv->pinned_bo) {
394 vmw_bo_pin(dev_priv->pinned_bo, false);
395 ttm_bo_unref(&dev_priv->pinned_bo);
396 }
397
398 vmw_bo_pin(sw_context->cur_query_bo, true);
399
400 /*
401 * We pin also the dummy_query_bo buffer so that we
402 * don't need to validate it when emitting
403 * dummy queries in context destroy paths.
404 */
405
406 vmw_bo_pin(dev_priv->dummy_query_bo, true);
407 dev_priv->dummy_query_bo_pinned = true;
408
409 dev_priv->query_cid = sw_context->cur_query_cid;
410 dev_priv->pinned_bo =
411 ttm_bo_reference(sw_context->cur_query_bo);
412 }
413}
414
415/**
416 * vmw_query_switch_backoff - clear query barrier list
417 * @sw_context: The sw context used for this submission batch.
418 *
419 * This function is used as part of an error path, where a previously
420 * set up list of query barriers needs to be cleared.
421 *
422 */
423static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
424{
425 struct list_head *list, *next;
426
427 list_for_each_safe(list, next, &sw_context->query_list) {
428 list_del_init(list);
429 }
430}
431
4e4ddd47
TH
432static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
433 struct vmw_sw_context *sw_context,
434 SVGAGuestPtr *ptr,
435 struct vmw_dma_buffer **vmw_bo_p)
fb1d9738 436{
fb1d9738
JB
437 struct vmw_dma_buffer *vmw_bo = NULL;
438 struct ttm_buffer_object *bo;
4e4ddd47 439 uint32_t handle = ptr->gmrId;
fb1d9738 440 struct vmw_relocation *reloc;
4e4ddd47 441 int ret;
fb1d9738 442
fb1d9738
JB
443 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
444 if (unlikely(ret != 0)) {
445 DRM_ERROR("Could not find or use GMR region.\n");
446 return -EINVAL;
447 }
448 bo = &vmw_bo->base;
449
450 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
4e4ddd47 451 DRM_ERROR("Max number relocations per submission"
fb1d9738
JB
452 " exceeded\n");
453 ret = -EINVAL;
454 goto out_no_reloc;
455 }
456
457 reloc = &sw_context->relocs[sw_context->cur_reloc++];
4e4ddd47 458 reloc->location = ptr;
fb1d9738 459
e2fa3a76
TH
460 ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
461 &reloc->index);
462 if (unlikely(ret != 0))
fb1d9738 463 goto out_no_reloc;
fb1d9738 464
4e4ddd47
TH
465 *vmw_bo_p = vmw_bo;
466 return 0;
467
468out_no_reloc:
469 vmw_dmabuf_unreference(&vmw_bo);
470 vmw_bo_p = NULL;
471 return ret;
472}
473
474static int vmw_cmd_end_query(struct vmw_private *dev_priv,
475 struct vmw_sw_context *sw_context,
476 SVGA3dCmdHeader *header)
477{
478 struct vmw_dma_buffer *vmw_bo;
479 struct vmw_query_cmd {
480 SVGA3dCmdHeader header;
481 SVGA3dCmdEndQuery q;
482 } *cmd;
483 int ret;
484
485 cmd = container_of(header, struct vmw_query_cmd, header);
486 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
487 if (unlikely(ret != 0))
488 return ret;
489
490 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
491 &cmd->q.guestResult,
492 &vmw_bo);
493 if (unlikely(ret != 0))
494 return ret;
495
e2fa3a76
TH
496 ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
497 &vmw_bo->base, sw_context);
498
4e4ddd47 499 vmw_dmabuf_unreference(&vmw_bo);
e2fa3a76 500 return ret;
4e4ddd47 501}
fb1d9738 502
4e4ddd47
TH
503static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
504 struct vmw_sw_context *sw_context,
505 SVGA3dCmdHeader *header)
506{
507 struct vmw_dma_buffer *vmw_bo;
508 struct vmw_query_cmd {
509 SVGA3dCmdHeader header;
510 SVGA3dCmdWaitForQuery q;
511 } *cmd;
512 int ret;
e2fa3a76 513 struct vmw_resource *ctx;
4e4ddd47
TH
514
515 cmd = container_of(header, struct vmw_query_cmd, header);
516 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
517 if (unlikely(ret != 0))
518 return ret;
519
520 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
521 &cmd->q.guestResult,
522 &vmw_bo);
523 if (unlikely(ret != 0))
524 return ret;
525
526 vmw_dmabuf_unreference(&vmw_bo);
e2fa3a76
TH
527
528 /*
529 * This wait will act as a barrier for previous waits for this
530 * context.
531 */
532
533 ctx = sw_context->cur_ctx;
534 if (!list_empty(&ctx->query_head))
535 list_del_init(&ctx->query_head);
536
4e4ddd47
TH
537 return 0;
538}
539
4e4ddd47
TH
540static int vmw_cmd_dma(struct vmw_private *dev_priv,
541 struct vmw_sw_context *sw_context,
542 SVGA3dCmdHeader *header)
543{
544 struct vmw_dma_buffer *vmw_bo = NULL;
545 struct ttm_buffer_object *bo;
546 struct vmw_surface *srf = NULL;
547 struct vmw_dma_cmd {
548 SVGA3dCmdHeader header;
549 SVGA3dCmdSurfaceDMA dma;
550 } *cmd;
551 int ret;
be38ab6e 552 struct vmw_resource *res;
4e4ddd47
TH
553
554 cmd = container_of(header, struct vmw_dma_cmd, header);
555 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
556 &cmd->dma.guest.ptr,
557 &vmw_bo);
558 if (unlikely(ret != 0))
559 return ret;
560
561 bo = &vmw_bo->base;
7a73ba74
TH
562 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
563 cmd->dma.host.sid, &srf);
fb1d9738
JB
564 if (ret) {
565 DRM_ERROR("could not find surface\n");
566 goto out_no_reloc;
567 }
568
be38ab6e 569 /*
7a73ba74
TH
570 * Patch command stream with device SID.
571 */
7a73ba74 572 cmd->dma.host.sid = srf->res.id;
fb1d9738 573 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
be38ab6e
TH
574
575 vmw_dmabuf_unreference(&vmw_bo);
576
577 res = &srf->res;
f18c8840
TH
578 vmw_resource_to_validate_list(sw_context, &res);
579
580 return 0;
fb1d9738
JB
581
582out_no_reloc:
583 vmw_dmabuf_unreference(&vmw_bo);
584 return ret;
585}
586
7a73ba74
TH
587static int vmw_cmd_draw(struct vmw_private *dev_priv,
588 struct vmw_sw_context *sw_context,
589 SVGA3dCmdHeader *header)
590{
591 struct vmw_draw_cmd {
592 SVGA3dCmdHeader header;
593 SVGA3dCmdDrawPrimitives body;
594 } *cmd;
595 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
596 (unsigned long)header + sizeof(*cmd));
597 SVGA3dPrimitiveRange *range;
598 uint32_t i;
599 uint32_t maxnum;
600 int ret;
601
602 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
603 if (unlikely(ret != 0))
604 return ret;
605
606 cmd = container_of(header, struct vmw_draw_cmd, header);
607 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
608
609 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
610 DRM_ERROR("Illegal number of vertex declarations.\n");
611 return -EINVAL;
612 }
613
614 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
615 ret = vmw_cmd_sid_check(dev_priv, sw_context,
616 &decl->array.surfaceId);
617 if (unlikely(ret != 0))
618 return ret;
619 }
620
621 maxnum = (header->size - sizeof(cmd->body) -
622 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
623 if (unlikely(cmd->body.numRanges > maxnum)) {
624 DRM_ERROR("Illegal number of index ranges.\n");
625 return -EINVAL;
626 }
627
628 range = (SVGA3dPrimitiveRange *) decl;
629 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
630 ret = vmw_cmd_sid_check(dev_priv, sw_context,
631 &range->indexArray.surfaceId);
632 if (unlikely(ret != 0))
633 return ret;
634 }
635 return 0;
636}
637
638
639static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
640 struct vmw_sw_context *sw_context,
641 SVGA3dCmdHeader *header)
642{
643 struct vmw_tex_state_cmd {
644 SVGA3dCmdHeader header;
645 SVGA3dCmdSetTextureState state;
646 };
647
648 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
649 ((unsigned long) header + header->size + sizeof(header));
650 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
651 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
652 int ret;
653
654 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
655 if (unlikely(ret != 0))
656 return ret;
657
658 for (; cur_state < last_state; ++cur_state) {
659 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
660 continue;
661
662 ret = vmw_cmd_sid_check(dev_priv, sw_context,
663 &cur_state->value);
664 if (unlikely(ret != 0))
665 return ret;
666 }
667
668 return 0;
669}
670
4084fb89
JB
671static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
672 struct vmw_sw_context *sw_context,
673 void *buf)
674{
675 struct vmw_dma_buffer *vmw_bo;
676 int ret;
677
678 struct {
679 uint32_t header;
680 SVGAFifoCmdDefineGMRFB body;
681 } *cmd = buf;
682
683 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
684 &cmd->body.ptr,
685 &vmw_bo);
686 if (unlikely(ret != 0))
687 return ret;
688
689 vmw_dmabuf_unreference(&vmw_bo);
690
691 return ret;
692}
693
694static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
695 struct vmw_sw_context *sw_context,
696 void *buf, uint32_t *size)
697{
698 uint32_t size_remaining = *size;
4084fb89
JB
699 uint32_t cmd_id;
700
701 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
702 switch (cmd_id) {
703 case SVGA_CMD_UPDATE:
704 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
4084fb89
JB
705 break;
706 case SVGA_CMD_DEFINE_GMRFB:
707 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
708 break;
709 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
710 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
711 break;
712 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
713 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
714 break;
715 default:
716 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
717 return -EINVAL;
718 }
719
720 if (*size > size_remaining) {
721 DRM_ERROR("Invalid SVGA command (size mismatch):"
722 " %u.\n", cmd_id);
723 return -EINVAL;
724 }
725
0cff60c6 726 if (unlikely(!sw_context->kernel)) {
4084fb89
JB
727 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
728 return -EPERM;
729 }
730
731 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
732 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
733
734 return 0;
735}
fb1d9738
JB
736
737typedef int (*vmw_cmd_func) (struct vmw_private *,
738 struct vmw_sw_context *,
739 SVGA3dCmdHeader *);
740
741#define VMW_CMD_DEF(cmd, func) \
742 [cmd - SVGA_3D_CMD_BASE] = func
743
744static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
745 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
746 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
747 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
748 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
749 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
750 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
751 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
752 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
753 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
754 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
755 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
756 &vmw_cmd_set_render_target_check),
7a73ba74 757 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
fb1d9738
JB
758 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
759 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
760 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
761 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
762 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
763 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
764 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
765 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
766 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
767 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
768 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
7a73ba74 769 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
fb1d9738
JB
770 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
771 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
4e4ddd47
TH
772 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
773 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
fb1d9738
JB
774 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
775 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
776 &vmw_cmd_blt_surf_screen_check)
777};
778
779static int vmw_cmd_check(struct vmw_private *dev_priv,
780 struct vmw_sw_context *sw_context,
781 void *buf, uint32_t *size)
782{
783 uint32_t cmd_id;
7a73ba74 784 uint32_t size_remaining = *size;
fb1d9738
JB
785 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
786 int ret;
787
4084fb89
JB
788 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
789 /* Handle any none 3D commands */
790 if (unlikely(cmd_id < SVGA_CMD_MAX))
791 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
792
fb1d9738
JB
793
794 cmd_id = le32_to_cpu(header->id);
795 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
796
797 cmd_id -= SVGA_3D_CMD_BASE;
7a73ba74
TH
798 if (unlikely(*size > size_remaining))
799 goto out_err;
800
fb1d9738
JB
801 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
802 goto out_err;
803
804 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
805 if (unlikely(ret != 0))
806 goto out_err;
807
808 return 0;
809out_err:
810 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
811 cmd_id + SVGA_3D_CMD_BASE);
812 return -EINVAL;
813}
814
815static int vmw_cmd_check_all(struct vmw_private *dev_priv,
816 struct vmw_sw_context *sw_context,
922ade0d 817 void *buf,
be38ab6e 818 uint32_t size)
fb1d9738
JB
819{
820 int32_t cur_size = size;
821 int ret;
822
823 while (cur_size > 0) {
7a73ba74 824 size = cur_size;
fb1d9738
JB
825 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
826 if (unlikely(ret != 0))
827 return ret;
828 buf = (void *)((unsigned long) buf + size);
829 cur_size -= size;
830 }
831
832 if (unlikely(cur_size != 0)) {
833 DRM_ERROR("Command verifier out of sync.\n");
834 return -EINVAL;
835 }
836
837 return 0;
838}
839
840static void vmw_free_relocations(struct vmw_sw_context *sw_context)
841{
842 sw_context->cur_reloc = 0;
843}
844
845static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
846{
847 uint32_t i;
848 struct vmw_relocation *reloc;
849 struct ttm_validate_buffer *validate;
850 struct ttm_buffer_object *bo;
851
852 for (i = 0; i < sw_context->cur_reloc; ++i) {
853 reloc = &sw_context->relocs[i];
854 validate = &sw_context->val_bufs[reloc->index];
855 bo = validate->bo;
135cba0d
TH
856 if (bo->mem.mem_type == TTM_PL_VRAM) {
857 reloc->location->offset += bo->offset;
858 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
859 } else
860 reloc->location->gmrId = bo->mem.start;
fb1d9738
JB
861 }
862 vmw_free_relocations(sw_context);
863}
864
865static void vmw_clear_validations(struct vmw_sw_context *sw_context)
866{
867 struct ttm_validate_buffer *entry, *next;
f18c8840 868 struct vmw_resource *res, *res_next;
fb1d9738 869
be38ab6e
TH
870 /*
871 * Drop references to DMA buffers held during command submission.
872 */
fb1d9738
JB
873 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
874 head) {
875 list_del(&entry->head);
876 vmw_dmabuf_validate_clear(entry->bo);
877 ttm_bo_unref(&entry->bo);
878 sw_context->cur_val_buf--;
879 }
880 BUG_ON(sw_context->cur_val_buf != 0);
be38ab6e
TH
881
882 /*
883 * Drop references to resources held during command submission.
884 */
f18c8840
TH
885 list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
886 validate_head) {
887 list_del_init(&res->validate_head);
888 vmw_resource_unreference(&res);
be38ab6e 889 }
fb1d9738
JB
890}
891
892static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
893 struct ttm_buffer_object *bo)
894{
895 int ret;
896
e2fa3a76
TH
897
898 /*
899 * Don't validate pinned buffers.
900 */
901
902 if (bo == dev_priv->pinned_bo ||
903 (bo == dev_priv->dummy_query_bo &&
904 dev_priv->dummy_query_bo_pinned))
905 return 0;
906
8ba5152a 907 /**
135cba0d
TH
908 * Put BO in VRAM if there is space, otherwise as a GMR.
909 * If there is no space in VRAM and GMR ids are all used up,
910 * start evicting GMRs to make room. If the DMA buffer can't be
911 * used as a GMR, this will return -ENOMEM.
8ba5152a
TH
912 */
913
135cba0d 914 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
3d3a5b32 915 if (likely(ret == 0 || ret == -ERESTARTSYS))
fb1d9738
JB
916 return ret;
917
8ba5152a
TH
918 /**
919 * If that failed, try VRAM again, this time evicting
920 * previous contents.
921 */
fb1d9738 922
135cba0d 923 DRM_INFO("Falling through to VRAM.\n");
9d87fa21 924 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
fb1d9738
JB
925 return ret;
926}
927
928
929static int vmw_validate_buffers(struct vmw_private *dev_priv,
930 struct vmw_sw_context *sw_context)
931{
932 struct ttm_validate_buffer *entry;
933 int ret;
934
935 list_for_each_entry(entry, &sw_context->validate_nodes, head) {
936 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
937 if (unlikely(ret != 0))
938 return ret;
939 }
940 return 0;
941}
942
be38ab6e
TH
943static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
944 uint32_t size)
945{
946 if (likely(sw_context->cmd_bounce_size >= size))
947 return 0;
948
949 if (sw_context->cmd_bounce_size == 0)
950 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
951
952 while (sw_context->cmd_bounce_size < size) {
953 sw_context->cmd_bounce_size =
954 PAGE_ALIGN(sw_context->cmd_bounce_size +
955 (sw_context->cmd_bounce_size >> 1));
956 }
957
958 if (sw_context->cmd_bounce != NULL)
959 vfree(sw_context->cmd_bounce);
960
961 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
962
963 if (sw_context->cmd_bounce == NULL) {
964 DRM_ERROR("Failed to allocate command bounce buffer.\n");
965 sw_context->cmd_bounce_size = 0;
966 return -ENOMEM;
967 }
968
969 return 0;
970}
971
ae2a1040
TH
972/**
973 * vmw_execbuf_fence_commands - create and submit a command stream fence
974 *
975 * Creates a fence object and submits a command stream marker.
976 * If this fails for some reason, We sync the fifo and return NULL.
977 * It is then safe to fence buffers with a NULL pointer.
6070e9fa
JB
978 *
979 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
980 * a userspace handle if @p_handle is not NULL, otherwise not.
ae2a1040
TH
981 */
982
983int vmw_execbuf_fence_commands(struct drm_file *file_priv,
984 struct vmw_private *dev_priv,
985 struct vmw_fence_obj **p_fence,
986 uint32_t *p_handle)
987{
988 uint32_t sequence;
989 int ret;
990 bool synced = false;
991
6070e9fa
JB
992 /* p_handle implies file_priv. */
993 BUG_ON(p_handle != NULL && file_priv == NULL);
ae2a1040
TH
994
995 ret = vmw_fifo_send_fence(dev_priv, &sequence);
996 if (unlikely(ret != 0)) {
997 DRM_ERROR("Fence submission error. Syncing.\n");
998 synced = true;
999 }
1000
1001 if (p_handle != NULL)
1002 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
1003 sequence,
1004 DRM_VMW_FENCE_FLAG_EXEC,
1005 p_fence, p_handle);
1006 else
1007 ret = vmw_fence_create(dev_priv->fman, sequence,
1008 DRM_VMW_FENCE_FLAG_EXEC,
1009 p_fence);
1010
1011 if (unlikely(ret != 0 && !synced)) {
1012 (void) vmw_fallback_wait(dev_priv, false, false,
1013 sequence, false,
1014 VMW_FENCE_WAIT_TIMEOUT);
1015 *p_fence = NULL;
1016 }
1017
1018 return 0;
1019}
1020
922ade0d
TH
1021int vmw_execbuf_process(struct drm_file *file_priv,
1022 struct vmw_private *dev_priv,
1023 void __user *user_commands,
1024 void *kernel_commands,
1025 uint32_t command_size,
1026 uint64_t throttle_us,
1027 struct drm_vmw_fence_rep __user *user_fence_rep)
fb1d9738 1028{
fb1d9738 1029 struct vmw_sw_context *sw_context = &dev_priv->ctx;
922ade0d 1030 struct drm_vmw_fence_rep fence_rep;
ae2a1040
TH
1031 struct vmw_fence_obj *fence;
1032 uint32_t handle;
922ade0d
TH
1033 void *cmd;
1034 int ret;
fb1d9738 1035
922ade0d 1036 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
fb1d9738 1037 if (unlikely(ret != 0))
922ade0d 1038 return -ERESTARTSYS;
fb1d9738 1039
922ade0d
TH
1040 if (kernel_commands == NULL) {
1041 sw_context->kernel = false;
fb1d9738 1042
922ade0d
TH
1043 ret = vmw_resize_cmd_bounce(sw_context, command_size);
1044 if (unlikely(ret != 0))
1045 goto out_unlock;
fb1d9738 1046
fb1d9738 1047
922ade0d
TH
1048 ret = copy_from_user(sw_context->cmd_bounce,
1049 user_commands, command_size);
1050
1051 if (unlikely(ret != 0)) {
1052 ret = -EFAULT;
1053 DRM_ERROR("Failed copying commands.\n");
1054 goto out_unlock;
1055 }
1056 kernel_commands = sw_context->cmd_bounce;
1057 } else
1058 sw_context->kernel = true;
fb1d9738
JB
1059
1060 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
1061 sw_context->cid_valid = false;
1062 sw_context->sid_valid = false;
1063 sw_context->cur_reloc = 0;
1064 sw_context->cur_val_buf = 0;
e2fa3a76
TH
1065 sw_context->fence_flags = 0;
1066 INIT_LIST_HEAD(&sw_context->query_list);
f18c8840 1067 INIT_LIST_HEAD(&sw_context->resource_list);
e2fa3a76
TH
1068 sw_context->cur_query_bo = dev_priv->pinned_bo;
1069 sw_context->cur_query_cid = dev_priv->query_cid;
1070 sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
fb1d9738
JB
1071
1072 INIT_LIST_HEAD(&sw_context->validate_nodes);
1073
922ade0d
TH
1074 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
1075 command_size);
fb1d9738
JB
1076 if (unlikely(ret != 0))
1077 goto out_err;
be38ab6e 1078
65705962 1079 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
fb1d9738
JB
1080 if (unlikely(ret != 0))
1081 goto out_err;
1082
1083 ret = vmw_validate_buffers(dev_priv, sw_context);
1084 if (unlikely(ret != 0))
1085 goto out_err;
1086
1087 vmw_apply_relocations(sw_context);
1925d456 1088
922ade0d 1089 if (throttle_us) {
6bcd8d3c 1090 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
922ade0d 1091 throttle_us);
1925d456
TH
1092
1093 if (unlikely(ret != 0))
be38ab6e
TH
1094 goto out_throttle;
1095 }
1096
922ade0d 1097 cmd = vmw_fifo_reserve(dev_priv, command_size);
be38ab6e
TH
1098 if (unlikely(cmd == NULL)) {
1099 DRM_ERROR("Failed reserving fifo space for commands.\n");
1100 ret = -ENOMEM;
922ade0d 1101 goto out_throttle;
1925d456
TH
1102 }
1103
922ade0d
TH
1104 memcpy(cmd, kernel_commands, command_size);
1105 vmw_fifo_commit(dev_priv, command_size);
fb1d9738 1106
e2fa3a76 1107 vmw_query_bo_switch_commit(dev_priv, sw_context);
ae2a1040
TH
1108 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1109 &fence,
1110 (user_fence_rep) ? &handle : NULL);
fb1d9738
JB
1111 /*
1112 * This error is harmless, because if fence submission fails,
ae2a1040
TH
1113 * vmw_fifo_send_fence will sync. The error will be propagated to
1114 * user-space in @fence_rep
fb1d9738
JB
1115 */
1116
1117 if (ret != 0)
1118 DRM_ERROR("Fence submission error. Syncing.\n");
1119
ae2a1040
TH
1120 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
1121 (void *) fence);
fb1d9738 1122
ae2a1040 1123 vmw_clear_validations(sw_context);
fb1d9738 1124
ae2a1040
TH
1125 if (user_fence_rep) {
1126 fence_rep.error = ret;
1127 fence_rep.handle = handle;
1128 fence_rep.seqno = fence->seqno;
1129 vmw_update_seqno(dev_priv, &dev_priv->fifo);
1130 fence_rep.passed_seqno = dev_priv->last_read_seqno;
1131
1132 /*
1133 * copy_to_user errors will be detected by user space not
1134 * seeing fence_rep::error filled in. Typically
1135 * user-space would have pre-set that member to -EFAULT.
1136 */
1137 ret = copy_to_user(user_fence_rep, &fence_rep,
1138 sizeof(fence_rep));
1139
1140 /*
1141 * User-space lost the fence object. We need to sync
1142 * and unreference the handle.
1143 */
1144 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
1145 BUG_ON(fence == NULL);
1146
1147 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1148 handle, TTM_REF_USAGE);
1149 DRM_ERROR("Fence copy error. Syncing.\n");
1150 (void) vmw_fence_obj_wait(fence,
1151 fence->signal_mask,
1152 false, false,
1153 VMW_FENCE_WAIT_TIMEOUT);
1154 }
1155 }
fb1d9738 1156
ae2a1040
TH
1157 if (likely(fence != NULL))
1158 vmw_fence_obj_unreference(&fence);
fb1d9738 1159
922ade0d 1160 mutex_unlock(&dev_priv->cmdbuf_mutex);
fb1d9738 1161 return 0;
922ade0d 1162
fb1d9738
JB
1163out_err:
1164 vmw_free_relocations(sw_context);
be38ab6e 1165out_throttle:
e2fa3a76 1166 vmw_query_switch_backoff(sw_context);
fb1d9738
JB
1167 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
1168 vmw_clear_validations(sw_context);
fb1d9738
JB
1169out_unlock:
1170 mutex_unlock(&dev_priv->cmdbuf_mutex);
922ade0d
TH
1171 return ret;
1172}
1173
e2fa3a76
TH
1174/**
1175 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
1176 *
1177 * @dev_priv: The device private structure.
1178 *
1179 * This function is called to idle the fifo and unpin the query buffer
1180 * if the normal way to do this hits an error, which should typically be
1181 * extremely rare.
1182 */
1183static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
1184{
1185 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
1186
1187 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
1188 vmw_bo_pin(dev_priv->pinned_bo, false);
1189 vmw_bo_pin(dev_priv->dummy_query_bo, false);
1190 dev_priv->dummy_query_bo_pinned = false;
1191}
1192
1193
1194/**
1195 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1196 * query bo.
1197 *
1198 * @dev_priv: The device private structure.
1199 * @only_on_cid_match: Only flush and unpin if the current active query cid
1200 * matches @cid.
1201 * @cid: Optional context id to match.
1202 *
1203 * This function should be used to unpin the pinned query bo, or
1204 * as a query barrier when we need to make sure that all queries have
1205 * finished before the next fifo command. (For example on hardware
1206 * context destructions where the hardware may otherwise leak unfinished
1207 * queries).
1208 *
1209 * This function does not return any failure codes, but make attempts
1210 * to do safe unpinning in case of errors.
1211 *
1212 * The function will synchronize on the previous query barrier, and will
1213 * thus not finish until that barrier has executed.
1214 */
1215void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1216 bool only_on_cid_match, uint32_t cid)
1217{
1218 int ret = 0;
1219 struct list_head validate_list;
1220 struct ttm_validate_buffer pinned_val, query_val;
1221 struct vmw_fence_obj *fence;
1222
1223 mutex_lock(&dev_priv->cmdbuf_mutex);
1224
1225 if (dev_priv->pinned_bo == NULL)
1226 goto out_unlock;
1227
1228 if (only_on_cid_match && cid != dev_priv->query_cid)
1229 goto out_unlock;
1230
1231 INIT_LIST_HEAD(&validate_list);
1232
1233 pinned_val.new_sync_obj_arg = (void *)(unsigned long)
1234 DRM_VMW_FENCE_FLAG_EXEC;
1235 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
1236 list_add_tail(&pinned_val.head, &validate_list);
1237
1238 query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
1239 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
1240 list_add_tail(&query_val.head, &validate_list);
1241
1242 do {
1243 ret = ttm_eu_reserve_buffers(&validate_list);
1244 } while (ret == -ERESTARTSYS);
1245
1246 if (unlikely(ret != 0)) {
1247 vmw_execbuf_unpin_panic(dev_priv);
1248 goto out_no_reserve;
1249 }
1250
1251 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
1252 if (unlikely(ret != 0)) {
1253 vmw_execbuf_unpin_panic(dev_priv);
1254 goto out_no_emit;
1255 }
1256
1257 vmw_bo_pin(dev_priv->pinned_bo, false);
1258 vmw_bo_pin(dev_priv->dummy_query_bo, false);
1259 dev_priv->dummy_query_bo_pinned = false;
1260
1261 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1262 ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
1263
1264 ttm_bo_unref(&query_val.bo);
1265 ttm_bo_unref(&pinned_val.bo);
1266 ttm_bo_unref(&dev_priv->pinned_bo);
1267
1268out_unlock:
1269 mutex_unlock(&dev_priv->cmdbuf_mutex);
1270 return;
1271
1272out_no_emit:
1273 ttm_eu_backoff_reservation(&validate_list);
1274out_no_reserve:
1275 ttm_bo_unref(&query_val.bo);
1276 ttm_bo_unref(&pinned_val.bo);
1277 ttm_bo_unref(&dev_priv->pinned_bo);
1278 mutex_unlock(&dev_priv->cmdbuf_mutex);
1279}
1280
922ade0d
TH
1281
1282int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
1283 struct drm_file *file_priv)
1284{
1285 struct vmw_private *dev_priv = vmw_priv(dev);
1286 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
1287 struct vmw_master *vmaster = vmw_master(file_priv->master);
1288 int ret;
1289
1290 /*
1291 * This will allow us to extend the ioctl argument while
1292 * maintaining backwards compatibility:
1293 * We take different code paths depending on the value of
1294 * arg->version.
1295 */
1296
1297 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
1298 DRM_ERROR("Incorrect execbuf version.\n");
1299 DRM_ERROR("You're running outdated experimental "
1300 "vmwgfx user-space drivers.");
1301 return -EINVAL;
1302 }
1303
1304 ret = ttm_read_lock(&vmaster->lock, true);
1305 if (unlikely(ret != 0))
1306 return ret;
1307
1308 ret = vmw_execbuf_process(file_priv, dev_priv,
1309 (void __user *)(unsigned long)arg->commands,
1310 NULL, arg->command_size, arg->throttle_us,
1311 (void __user *)(unsigned long)arg->fence_rep);
1312
1313 if (unlikely(ret != 0))
1314 goto out_unlock;
1315
1316 vmw_kms_cursor_post_execbuf(dev_priv);
1317
1318out_unlock:
fb1d9738
JB
1319 ttm_read_unlock(&vmaster->lock);
1320 return ret;
1321}