Merge tag 'pull-18-rc1-work.namei' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_context.c
CommitLineData
dff96888 1// SPDX-License-Identifier: GPL-2.0 OR MIT
543831cf
TH
2/**************************************************************************
3 *
dff96888 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
543831cf
TH
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
008be682
MY
28#include <drm/ttm/ttm_placement.h>
29
543831cf
TH
30#include "vmwgfx_drv.h"
31#include "vmwgfx_resource_priv.h"
d80efd5c 32#include "vmwgfx_binding.h"
543831cf
TH
33
34struct vmw_user_context {
35 struct ttm_base_object base;
36 struct vmw_resource res;
d80efd5c 37 struct vmw_ctx_binding_state *cbs;
18e4a466 38 struct vmw_cmdbuf_res_manager *man;
5e8ec0d9 39 struct vmw_resource *cotables[SVGA_COTABLE_MAX];
d80efd5c 40 spinlock_t cotable_lock;
f1d34bfd 41 struct vmw_buffer_object *dx_query_mob;
543831cf
TH
42};
43
44static void vmw_user_context_free(struct vmw_resource *res);
45static struct vmw_resource *
46vmw_user_context_base_to_res(struct ttm_base_object *base);
47
58a0c5f0
TH
48static int vmw_gb_context_create(struct vmw_resource *res);
49static int vmw_gb_context_bind(struct vmw_resource *res,
50 struct ttm_validate_buffer *val_buf);
51static int vmw_gb_context_unbind(struct vmw_resource *res,
52 bool readback,
53 struct ttm_validate_buffer *val_buf);
54static int vmw_gb_context_destroy(struct vmw_resource *res);
d80efd5c
TH
55static int vmw_dx_context_create(struct vmw_resource *res);
56static int vmw_dx_context_bind(struct vmw_resource *res,
57 struct ttm_validate_buffer *val_buf);
58static int vmw_dx_context_unbind(struct vmw_resource *res,
59 bool readback,
60 struct ttm_validate_buffer *val_buf);
61static int vmw_dx_context_destroy(struct vmw_resource *res);
62
543831cf
TH
63static const struct vmw_user_resource_conv user_context_conv = {
64 .object_type = VMW_RES_CONTEXT,
65 .base_obj_to_res = vmw_user_context_base_to_res,
66 .res_free = vmw_user_context_free
67};
68
69const struct vmw_user_resource_conv *user_context_converter =
70 &user_context_conv;
71
72
73static const struct vmw_res_func vmw_legacy_context_func = {
74 .res_type = vmw_res_context,
75 .needs_backup = false,
76 .may_evict = false,
77 .type_name = "legacy contexts",
78 .backup_placement = NULL,
79 .create = NULL,
80 .destroy = NULL,
81 .bind = NULL,
82 .unbind = NULL
83};
84
58a0c5f0
TH
85static const struct vmw_res_func vmw_gb_context_func = {
86 .res_type = vmw_res_context,
87 .needs_backup = true,
88 .may_evict = true,
a0a63940
TH
89 .prio = 3,
90 .dirty_prio = 3,
58a0c5f0
TH
91 .type_name = "guest backed contexts",
92 .backup_placement = &vmw_mob_placement,
93 .create = vmw_gb_context_create,
94 .destroy = vmw_gb_context_destroy,
95 .bind = vmw_gb_context_bind,
96 .unbind = vmw_gb_context_unbind
97};
98
d80efd5c
TH
99static const struct vmw_res_func vmw_dx_context_func = {
100 .res_type = vmw_res_dx_context,
101 .needs_backup = true,
102 .may_evict = true,
a0a63940
TH
103 .prio = 3,
104 .dirty_prio = 3,
d80efd5c
TH
105 .type_name = "dx contexts",
106 .backup_placement = &vmw_mob_placement,
107 .create = vmw_dx_context_create,
108 .destroy = vmw_dx_context_destroy,
109 .bind = vmw_dx_context_bind,
110 .unbind = vmw_dx_context_unbind
111};
b5c3b1a6 112
ea716197 113/*
543831cf
TH
114 * Context management:
115 */
116
5e8ec0d9
DR
117static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
118 struct vmw_user_context *uctx)
d80efd5c
TH
119{
120 struct vmw_resource *res;
121 int i;
5e8ec0d9
DR
122 u32 cotable_max = has_sm5_context(dev_priv) ?
123 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
d80efd5c 124
5e8ec0d9 125 for (i = 0; i < cotable_max; ++i) {
d80efd5c
TH
126 spin_lock(&uctx->cotable_lock);
127 res = uctx->cotables[i];
128 uctx->cotables[i] = NULL;
129 spin_unlock(&uctx->cotable_lock);
fd11a3c0
SY
130
131 if (res)
132 vmw_resource_unreference(&res);
d80efd5c
TH
133 }
134}
135
543831cf
TH
136static void vmw_hw_context_destroy(struct vmw_resource *res)
137{
18e4a466
TH
138 struct vmw_user_context *uctx =
139 container_of(res, struct vmw_user_context, res);
543831cf
TH
140 struct vmw_private *dev_priv = res->dev_priv;
141 struct {
142 SVGA3dCmdHeader header;
143 SVGA3dCmdDestroyContext body;
144 } *cmd;
145
146
d80efd5c
TH
147 if (res->func->destroy == vmw_gb_context_destroy ||
148 res->func->destroy == vmw_dx_context_destroy) {
58a0c5f0 149 mutex_lock(&dev_priv->cmdbuf_mutex);
18e4a466 150 vmw_cmdbuf_res_man_destroy(uctx->man);
30f82d81 151 mutex_lock(&dev_priv->binding_mutex);
d80efd5c
TH
152 vmw_binding_state_kill(uctx->cbs);
153 (void) res->func->destroy(res);
c8e5e010 154 mutex_unlock(&dev_priv->binding_mutex);
58a0c5f0
TH
155 if (dev_priv->pinned_bo != NULL &&
156 !dev_priv->query_cid_valid)
157 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
158 mutex_unlock(&dev_priv->cmdbuf_mutex);
5e8ec0d9 159 vmw_context_cotables_unref(dev_priv, uctx);
58a0c5f0
TH
160 return;
161 }
162
543831cf 163 vmw_execbuf_release_pinned_bo(dev_priv);
8426ed9c 164 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
11c45419 165 if (unlikely(cmd == NULL))
543831cf 166 return;
543831cf 167
b9eb1a61
TH
168 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
169 cmd->header.size = sizeof(cmd->body);
170 cmd->body.cid = res->id;
543831cf 171
8426ed9c 172 vmw_cmd_commit(dev_priv, sizeof(*cmd));
153b3d5b 173 vmw_fifo_resource_dec(dev_priv);
543831cf
TH
174}
175
58a0c5f0 176static int vmw_gb_context_init(struct vmw_private *dev_priv,
d80efd5c 177 bool dx,
58a0c5f0 178 struct vmw_resource *res,
d80efd5c 179 void (*res_free)(struct vmw_resource *res))
58a0c5f0 180{
d80efd5c 181 int ret, i;
173fb7d4
TH
182 struct vmw_user_context *uctx =
183 container_of(res, struct vmw_user_context, res);
58a0c5f0 184
d80efd5c 185 res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
ebc9ac7c 186 sizeof(SVGAGBContextData));
58a0c5f0 187 ret = vmw_resource_init(dev_priv, res, true,
d80efd5c
TH
188 res_free,
189 dx ? &vmw_dx_context_func :
190 &vmw_gb_context_func);
18e4a466
TH
191 if (unlikely(ret != 0))
192 goto out_err;
58a0c5f0 193
18e4a466
TH
194 if (dev_priv->has_mob) {
195 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
55579cfe 196 if (IS_ERR(uctx->man)) {
18e4a466
TH
197 ret = PTR_ERR(uctx->man);
198 uctx->man = NULL;
199 goto out_err;
200 }
58a0c5f0
TH
201 }
202
d80efd5c
TH
203 uctx->cbs = vmw_binding_state_alloc(dev_priv);
204 if (IS_ERR(uctx->cbs)) {
205 ret = PTR_ERR(uctx->cbs);
206 goto out_err;
207 }
208
209 spin_lock_init(&uctx->cotable_lock);
210
211 if (dx) {
5e8ec0d9
DR
212 u32 cotable_max = has_sm5_context(dev_priv) ?
213 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
214 for (i = 0; i < cotable_max; ++i) {
d80efd5c
TH
215 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
216 &uctx->res, i);
4efa6661 217 if (IS_ERR(uctx->cotables[i])) {
d7f48231 218 ret = PTR_ERR(uctx->cotables[i]);
d80efd5c
TH
219 goto out_cotables;
220 }
221 }
222 }
223
13289241 224 res->hw_destroy = vmw_hw_context_destroy;
58a0c5f0 225 return 0;
18e4a466 226
d80efd5c 227out_cotables:
5e8ec0d9 228 vmw_context_cotables_unref(dev_priv, uctx);
18e4a466
TH
229out_err:
230 if (res_free)
231 res_free(res);
232 else
233 kfree(res);
234 return ret;
58a0c5f0
TH
235}
236
543831cf
TH
237static int vmw_context_init(struct vmw_private *dev_priv,
238 struct vmw_resource *res,
d80efd5c
TH
239 void (*res_free)(struct vmw_resource *res),
240 bool dx)
543831cf
TH
241{
242 int ret;
243
244 struct {
245 SVGA3dCmdHeader header;
246 SVGA3dCmdDefineContext body;
247 } *cmd;
248
58a0c5f0 249 if (dev_priv->has_mob)
d80efd5c 250 return vmw_gb_context_init(dev_priv, dx, res, res_free);
58a0c5f0 251
543831cf
TH
252 ret = vmw_resource_init(dev_priv, res, false,
253 res_free, &vmw_legacy_context_func);
254
255 if (unlikely(ret != 0)) {
256 DRM_ERROR("Failed to allocate a resource id.\n");
257 goto out_early;
258 }
259
ebc9ac7c 260 if (unlikely(res->id >= SVGA3D_HB_MAX_CONTEXT_IDS)) {
543831cf
TH
261 DRM_ERROR("Out of hw context ids.\n");
262 vmw_resource_unreference(&res);
263 return -ENOMEM;
264 }
265
8426ed9c 266 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
543831cf 267 if (unlikely(cmd == NULL)) {
543831cf
TH
268 vmw_resource_unreference(&res);
269 return -ENOMEM;
270 }
271
b9eb1a61
TH
272 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
273 cmd->header.size = sizeof(cmd->body);
274 cmd->body.cid = res->id;
543831cf 275
8426ed9c 276 vmw_cmd_commit(dev_priv, sizeof(*cmd));
153b3d5b 277 vmw_fifo_resource_inc(dev_priv);
13289241 278 res->hw_destroy = vmw_hw_context_destroy;
543831cf
TH
279 return 0;
280
281out_early:
282 if (res_free == NULL)
283 kfree(res);
284 else
285 res_free(res);
286 return ret;
287}
288
543831cf 289
d80efd5c
TH
290/*
291 * GB context.
292 */
58a0c5f0
TH
293
294static int vmw_gb_context_create(struct vmw_resource *res)
295{
296 struct vmw_private *dev_priv = res->dev_priv;
297 int ret;
298 struct {
299 SVGA3dCmdHeader header;
300 SVGA3dCmdDefineGBContext body;
301 } *cmd;
302
303 if (likely(res->id != -1))
304 return 0;
305
306 ret = vmw_resource_alloc_id(res);
307 if (unlikely(ret != 0)) {
308 DRM_ERROR("Failed to allocate a context id.\n");
309 goto out_no_id;
310 }
311
312 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
313 ret = -EBUSY;
314 goto out_no_fifo;
315 }
316
8426ed9c 317 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
58a0c5f0 318 if (unlikely(cmd == NULL)) {
58a0c5f0
TH
319 ret = -ENOMEM;
320 goto out_no_fifo;
321 }
322
323 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
324 cmd->header.size = sizeof(cmd->body);
325 cmd->body.cid = res->id;
8426ed9c 326 vmw_cmd_commit(dev_priv, sizeof(*cmd));
153b3d5b 327 vmw_fifo_resource_inc(dev_priv);
58a0c5f0
TH
328
329 return 0;
330
331out_no_fifo:
332 vmw_resource_release_id(res);
333out_no_id:
334 return ret;
335}
336
337static int vmw_gb_context_bind(struct vmw_resource *res,
338 struct ttm_validate_buffer *val_buf)
339{
340 struct vmw_private *dev_priv = res->dev_priv;
341 struct {
342 SVGA3dCmdHeader header;
343 SVGA3dCmdBindGBContext body;
344 } *cmd;
345 struct ttm_buffer_object *bo = val_buf->bo;
346
d3116756 347 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
58a0c5f0 348
8426ed9c 349 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
11c45419 350 if (unlikely(cmd == NULL))
58a0c5f0 351 return -ENOMEM;
11c45419 352
58a0c5f0
TH
353 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
354 cmd->header.size = sizeof(cmd->body);
355 cmd->body.cid = res->id;
d3116756 356 cmd->body.mobid = bo->resource->start;
58a0c5f0
TH
357 cmd->body.validContents = res->backup_dirty;
358 res->backup_dirty = false;
8426ed9c 359 vmw_cmd_commit(dev_priv, sizeof(*cmd));
58a0c5f0
TH
360
361 return 0;
362}
363
364static int vmw_gb_context_unbind(struct vmw_resource *res,
365 bool readback,
366 struct ttm_validate_buffer *val_buf)
367{
368 struct vmw_private *dev_priv = res->dev_priv;
369 struct ttm_buffer_object *bo = val_buf->bo;
370 struct vmw_fence_obj *fence;
173fb7d4
TH
371 struct vmw_user_context *uctx =
372 container_of(res, struct vmw_user_context, res);
58a0c5f0
TH
373
374 struct {
375 SVGA3dCmdHeader header;
376 SVGA3dCmdReadbackGBContext body;
377 } *cmd1;
378 struct {
379 SVGA3dCmdHeader header;
380 SVGA3dCmdBindGBContext body;
381 } *cmd2;
382 uint32_t submit_size;
383 uint8_t *cmd;
384
385
d3116756 386 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
58a0c5f0 387
173fb7d4 388 mutex_lock(&dev_priv->binding_mutex);
d80efd5c 389 vmw_binding_state_scrub(uctx->cbs);
173fb7d4 390
58a0c5f0
TH
391 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
392
8426ed9c 393 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
58a0c5f0 394 if (unlikely(cmd == NULL)) {
173fb7d4 395 mutex_unlock(&dev_priv->binding_mutex);
58a0c5f0
TH
396 return -ENOMEM;
397 }
398
399 cmd2 = (void *) cmd;
400 if (readback) {
401 cmd1 = (void *) cmd;
402 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
403 cmd1->header.size = sizeof(cmd1->body);
404 cmd1->body.cid = res->id;
405 cmd2 = (void *) (&cmd1[1]);
406 }
407 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
408 cmd2->header.size = sizeof(cmd2->body);
409 cmd2->body.cid = res->id;
410 cmd2->body.mobid = SVGA3D_INVALID_ID;
411
8426ed9c 412 vmw_cmd_commit(dev_priv, submit_size);
173fb7d4 413 mutex_unlock(&dev_priv->binding_mutex);
58a0c5f0
TH
414
415 /*
416 * Create a fence object and fence the backup buffer.
417 */
418
419 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
420 &fence, NULL);
421
e9431ea5 422 vmw_bo_fence_single(bo, fence);
58a0c5f0
TH
423
424 if (likely(fence != NULL))
425 vmw_fence_obj_unreference(&fence);
426
427 return 0;
428}
429
430static int vmw_gb_context_destroy(struct vmw_resource *res)
431{
432 struct vmw_private *dev_priv = res->dev_priv;
433 struct {
434 SVGA3dCmdHeader header;
435 SVGA3dCmdDestroyGBContext body;
436 } *cmd;
437
438 if (likely(res->id == -1))
439 return 0;
440
8426ed9c 441 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
11c45419 442 if (unlikely(cmd == NULL))
58a0c5f0 443 return -ENOMEM;
58a0c5f0
TH
444
445 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
446 cmd->header.size = sizeof(cmd->body);
447 cmd->body.cid = res->id;
8426ed9c 448 vmw_cmd_commit(dev_priv, sizeof(*cmd));
58a0c5f0
TH
449 if (dev_priv->query_cid == res->id)
450 dev_priv->query_cid_valid = false;
451 vmw_resource_release_id(res);
153b3d5b 452 vmw_fifo_resource_dec(dev_priv);
58a0c5f0
TH
453
454 return 0;
455}
456
d80efd5c
TH
457/*
458 * DX context.
459 */
460
461static int vmw_dx_context_create(struct vmw_resource *res)
462{
463 struct vmw_private *dev_priv = res->dev_priv;
464 int ret;
465 struct {
466 SVGA3dCmdHeader header;
467 SVGA3dCmdDXDefineContext body;
468 } *cmd;
469
470 if (likely(res->id != -1))
471 return 0;
472
473 ret = vmw_resource_alloc_id(res);
474 if (unlikely(ret != 0)) {
475 DRM_ERROR("Failed to allocate a context id.\n");
476 goto out_no_id;
477 }
478
479 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
480 ret = -EBUSY;
481 goto out_no_fifo;
482 }
483
8426ed9c 484 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
d80efd5c 485 if (unlikely(cmd == NULL)) {
d80efd5c
TH
486 ret = -ENOMEM;
487 goto out_no_fifo;
488 }
489
490 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
491 cmd->header.size = sizeof(cmd->body);
492 cmd->body.cid = res->id;
8426ed9c 493 vmw_cmd_commit(dev_priv, sizeof(*cmd));
d80efd5c
TH
494 vmw_fifo_resource_inc(dev_priv);
495
496 return 0;
497
498out_no_fifo:
499 vmw_resource_release_id(res);
500out_no_id:
501 return ret;
502}
503
504static int vmw_dx_context_bind(struct vmw_resource *res,
505 struct ttm_validate_buffer *val_buf)
506{
507 struct vmw_private *dev_priv = res->dev_priv;
508 struct {
509 SVGA3dCmdHeader header;
510 SVGA3dCmdDXBindContext body;
511 } *cmd;
512 struct ttm_buffer_object *bo = val_buf->bo;
513
d3116756 514 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
d80efd5c 515
8426ed9c 516 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
11c45419 517 if (unlikely(cmd == NULL))
d80efd5c 518 return -ENOMEM;
d80efd5c
TH
519
520 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
521 cmd->header.size = sizeof(cmd->body);
522 cmd->body.cid = res->id;
d3116756 523 cmd->body.mobid = bo->resource->start;
d80efd5c
TH
524 cmd->body.validContents = res->backup_dirty;
525 res->backup_dirty = false;
8426ed9c 526 vmw_cmd_commit(dev_priv, sizeof(*cmd));
d80efd5c
TH
527
528
529 return 0;
530}
531
532/**
533 * vmw_dx_context_scrub_cotables - Scrub all bindings and
534 * cotables from a context
535 *
536 * @ctx: Pointer to the context resource
537 * @readback: Whether to save the otable contents on scrubbing.
538 *
539 * COtables must be unbound before their context, but unbinding requires
540 * the backup buffer being reserved, whereas scrubbing does not.
541 * This function scrubs all cotables of a context, potentially reading back
542 * the contents into their backup buffers. However, scrubbing cotables
543 * also makes the device context invalid, so scrub all bindings first so
544 * that doesn't have to be done later with an invalid context.
545 */
546void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
547 bool readback)
548{
549 struct vmw_user_context *uctx =
550 container_of(ctx, struct vmw_user_context, res);
5e8ec0d9
DR
551 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
552 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
d80efd5c
TH
553 int i;
554
555 vmw_binding_state_scrub(uctx->cbs);
5e8ec0d9 556 for (i = 0; i < cotable_max; ++i) {
d80efd5c
TH
557 struct vmw_resource *res;
558
559 /* Avoid racing with ongoing cotable destruction. */
560 spin_lock(&uctx->cotable_lock);
561 res = uctx->cotables[vmw_cotable_scrub_order[i]];
562 if (res)
563 res = vmw_resource_reference_unless_doomed(res);
564 spin_unlock(&uctx->cotable_lock);
565 if (!res)
566 continue;
567
568 WARN_ON(vmw_cotable_scrub(res, readback));
569 vmw_resource_unreference(&res);
570 }
571}
572
573static int vmw_dx_context_unbind(struct vmw_resource *res,
574 bool readback,
575 struct ttm_validate_buffer *val_buf)
576{
577 struct vmw_private *dev_priv = res->dev_priv;
578 struct ttm_buffer_object *bo = val_buf->bo;
579 struct vmw_fence_obj *fence;
fd11a3c0
SY
580 struct vmw_user_context *uctx =
581 container_of(res, struct vmw_user_context, res);
d80efd5c
TH
582
583 struct {
584 SVGA3dCmdHeader header;
585 SVGA3dCmdDXReadbackContext body;
586 } *cmd1;
587 struct {
588 SVGA3dCmdHeader header;
589 SVGA3dCmdDXBindContext body;
590 } *cmd2;
591 uint32_t submit_size;
592 uint8_t *cmd;
593
594
d3116756 595 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
d80efd5c
TH
596
597 mutex_lock(&dev_priv->binding_mutex);
598 vmw_dx_context_scrub_cotables(res, readback);
599
fd11a3c0
SY
600 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
601 readback) {
602 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
603 if (vmw_query_readback_all(uctx->dx_query_mob))
604 DRM_ERROR("Failed to read back query states\n");
605 }
606
d80efd5c
TH
607 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
608
8426ed9c 609 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
d80efd5c 610 if (unlikely(cmd == NULL)) {
d80efd5c
TH
611 mutex_unlock(&dev_priv->binding_mutex);
612 return -ENOMEM;
613 }
614
615 cmd2 = (void *) cmd;
616 if (readback) {
617 cmd1 = (void *) cmd;
618 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
619 cmd1->header.size = sizeof(cmd1->body);
620 cmd1->body.cid = res->id;
621 cmd2 = (void *) (&cmd1[1]);
622 }
623 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
624 cmd2->header.size = sizeof(cmd2->body);
625 cmd2->body.cid = res->id;
626 cmd2->body.mobid = SVGA3D_INVALID_ID;
627
8426ed9c 628 vmw_cmd_commit(dev_priv, submit_size);
d80efd5c
TH
629 mutex_unlock(&dev_priv->binding_mutex);
630
631 /*
632 * Create a fence object and fence the backup buffer.
633 */
634
635 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
636 &fence, NULL);
637
e9431ea5 638 vmw_bo_fence_single(bo, fence);
d80efd5c
TH
639
640 if (likely(fence != NULL))
641 vmw_fence_obj_unreference(&fence);
642
643 return 0;
644}
645
646static int vmw_dx_context_destroy(struct vmw_resource *res)
647{
648 struct vmw_private *dev_priv = res->dev_priv;
649 struct {
650 SVGA3dCmdHeader header;
651 SVGA3dCmdDXDestroyContext body;
652 } *cmd;
653
654 if (likely(res->id == -1))
655 return 0;
656
8426ed9c 657 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
11c45419 658 if (unlikely(cmd == NULL))
d80efd5c 659 return -ENOMEM;
d80efd5c
TH
660
661 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
662 cmd->header.size = sizeof(cmd->body);
663 cmd->body.cid = res->id;
8426ed9c 664 vmw_cmd_commit(dev_priv, sizeof(*cmd));
d80efd5c
TH
665 if (dev_priv->query_cid == res->id)
666 dev_priv->query_cid_valid = false;
667 vmw_resource_release_id(res);
668 vmw_fifo_resource_dec(dev_priv);
58a0c5f0
TH
669
670 return 0;
671}
672
ea716197 673/*
543831cf
TH
674 * User-space context management:
675 */
676
677static struct vmw_resource *
678vmw_user_context_base_to_res(struct ttm_base_object *base)
679{
680 return &(container_of(base, struct vmw_user_context, base)->res);
681}
682
683static void vmw_user_context_free(struct vmw_resource *res)
684{
685 struct vmw_user_context *ctx =
686 container_of(res, struct vmw_user_context, res);
543831cf 687
d80efd5c
TH
688 if (ctx->cbs)
689 vmw_binding_state_free(ctx->cbs);
fd11a3c0
SY
690
691 (void) vmw_context_bind_dx_query(res, NULL);
692
543831cf 693 ttm_base_object_kfree(ctx, base);
543831cf
TH
694}
695
ea716197 696/*
543831cf
TH
697 * This function is called when user space has no more references on the
698 * base object. It releases the base-object's reference on the resource object.
699 */
700
701static void vmw_user_context_base_release(struct ttm_base_object **p_base)
702{
703 struct ttm_base_object *base = *p_base;
704 struct vmw_user_context *ctx =
705 container_of(base, struct vmw_user_context, base);
706 struct vmw_resource *res = &ctx->res;
707
708 *p_base = NULL;
709 vmw_resource_unreference(&res);
710}
711
712int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
713 struct drm_file *file_priv)
714{
715 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
716 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
717
8afa13a0 718 return ttm_ref_object_base_unref(tfile, arg->cid);
543831cf
TH
719}
720
d80efd5c
TH
721static int vmw_context_define(struct drm_device *dev, void *data,
722 struct drm_file *file_priv, bool dx)
543831cf
TH
723{
724 struct vmw_private *dev_priv = vmw_priv(dev);
725 struct vmw_user_context *ctx;
726 struct vmw_resource *res;
727 struct vmw_resource *tmp;
728 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
729 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
543831cf
TH
730 int ret;
731
878c6ecd 732 if (!has_sm4_context(dev_priv) && dx) {
5724f899 733 VMW_DEBUG_USER("DX contexts not supported by device.\n");
d80efd5c
TH
734 return -EINVAL;
735 }
543831cf 736
543831cf 737 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1a4adb05 738 if (unlikely(!ctx)) {
543831cf 739 ret = -ENOMEM;
8211783f 740 goto out_ret;
543831cf
TH
741 }
742
743 res = &ctx->res;
744 ctx->base.shareable = false;
745 ctx->base.tfile = NULL;
746
747 /*
748 * From here on, the destructor takes over resource freeing.
749 */
750
d80efd5c 751 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
543831cf 752 if (unlikely(ret != 0))
8211783f 753 goto out_ret;
543831cf
TH
754
755 tmp = vmw_resource_reference(&ctx->res);
756 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
8afa13a0 757 &vmw_user_context_base_release);
543831cf
TH
758
759 if (unlikely(ret != 0)) {
760 vmw_resource_unreference(&tmp);
761 goto out_err;
762 }
763
c7eae626 764 arg->cid = ctx->base.handle;
543831cf
TH
765out_err:
766 vmw_resource_unreference(&res);
8211783f 767out_ret:
543831cf 768 return ret;
b5c3b1a6
TH
769}
770
d80efd5c
TH
771int vmw_context_define_ioctl(struct drm_device *dev, void *data,
772 struct drm_file *file_priv)
b5c3b1a6 773{
d80efd5c 774 return vmw_context_define(dev, data, file_priv, false);
b5c3b1a6
TH
775}
776
d80efd5c
TH
777int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
778 struct drm_file *file_priv)
b5c3b1a6 779{
d80efd5c
TH
780 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
781 struct drm_vmw_context_arg *rep = &arg->rep;
782
783 switch (arg->req) {
784 case drm_vmw_context_legacy:
785 return vmw_context_define(dev, rep, file_priv, false);
786 case drm_vmw_context_dx:
787 return vmw_context_define(dev, rep, file_priv, true);
b5c3b1a6 788 default:
173fb7d4 789 break;
30f82d81 790 }
d80efd5c 791 return -EINVAL;
173fb7d4
TH
792}
793
b5c3b1a6 794/**
d80efd5c 795 * vmw_context_binding_list - Return a list of context bindings
b5c3b1a6 796 *
d80efd5c 797 * @ctx: The context resource
b5c3b1a6 798 *
d80efd5c
TH
799 * Returns the current list of bindings of the given context. Note that
800 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
b5c3b1a6 801 */
d80efd5c 802struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
b5c3b1a6 803{
173fb7d4
TH
804 struct vmw_user_context *uctx =
805 container_of(ctx, struct vmw_user_context, res);
b5c3b1a6 806
d80efd5c 807 return vmw_binding_state_list(uctx->cbs);
173fb7d4
TH
808}
809
d80efd5c 810struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
30f82d81 811{
d80efd5c 812 return container_of(ctx, struct vmw_user_context, res)->man;
30f82d81
TH
813}
814
d80efd5c
TH
815struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
816 SVGACOTableType cotable_type)
173fb7d4 817{
5e8ec0d9
DR
818 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
819 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
820
821 if (cotable_type >= cotable_max)
d80efd5c 822 return ERR_PTR(-EINVAL);
173fb7d4 823
1b9a01d6
TH
824 return container_of(ctx, struct vmw_user_context, res)->
825 cotables[cotable_type];
173fb7d4
TH
826}
827
30f82d81 828/**
d80efd5c
TH
829 * vmw_context_binding_state -
830 * Return a pointer to a context binding state structure
30f82d81 831 *
30f82d81 832 * @ctx: The context resource
30f82d81 833 *
d80efd5c
TH
834 * Returns the current state of bindings of the given context. Note that
835 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
30f82d81 836 */
d80efd5c
TH
837struct vmw_ctx_binding_state *
838vmw_context_binding_state(struct vmw_resource *ctx)
30f82d81 839{
d80efd5c 840 return container_of(ctx, struct vmw_user_context, res)->cbs;
30f82d81
TH
841}
842
173fb7d4 843/**
fd11a3c0
SY
844 * vmw_context_bind_dx_query -
845 * Sets query MOB for the context. If @mob is NULL, then this function will
846 * remove the association between the MOB and the context. This function
847 * assumes the binding_mutex is held.
173fb7d4 848 *
fd11a3c0
SY
849 * @ctx_res: The context resource
850 * @mob: a reference to the query MOB
173fb7d4 851 *
fd11a3c0
SY
852 * Returns -EINVAL if a MOB has already been set and does not match the one
853 * specified in the parameter. 0 otherwise.
173fb7d4 854 */
fd11a3c0 855int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
f1d34bfd 856 struct vmw_buffer_object *mob)
173fb7d4
TH
857{
858 struct vmw_user_context *uctx =
fd11a3c0 859 container_of(ctx_res, struct vmw_user_context, res);
30f82d81 860
fd11a3c0
SY
861 if (mob == NULL) {
862 if (uctx->dx_query_mob) {
863 uctx->dx_query_mob->dx_query_ctx = NULL;
f1d34bfd 864 vmw_bo_unreference(&uctx->dx_query_mob);
fd11a3c0
SY
865 uctx->dx_query_mob = NULL;
866 }
30f82d81 867
fd11a3c0
SY
868 return 0;
869 }
30f82d81 870
fd11a3c0
SY
871 /* Can only have one MOB per context for queries */
872 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
873 return -EINVAL;
30f82d81 874
fd11a3c0 875 mob->dx_query_ctx = ctx_res;
30f82d81 876
fd11a3c0 877 if (!uctx->dx_query_mob)
f1d34bfd 878 uctx->dx_query_mob = vmw_bo_reference(mob);
30f82d81
TH
879
880 return 0;
881}
882
883/**
fd11a3c0 884 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
30f82d81 885 *
fd11a3c0 886 * @ctx_res: The context resource
30f82d81 887 */
f1d34bfd 888struct vmw_buffer_object *
fd11a3c0 889vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
30f82d81 890{
fd11a3c0
SY
891 struct vmw_user_context *uctx =
892 container_of(ctx_res, struct vmw_user_context, res);
18e4a466 893
fd11a3c0 894 return uctx->dx_query_mob;
18e4a466 895}