Merge tag 'soc-ep93xx-dt-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_kms.c
CommitLineData
dff96888 1// SPDX-License-Identifier: GPL-2.0 OR MIT
fb1d9738
JB
2/**************************************************************************
3 *
d6667f0d
ZR
4 * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
fb1d9738
JB
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
da7ffb96 28#include "vmwgfx_kms.h"
09881d29
ZR
29
30#include "vmwgfx_bo.h"
cd2eb57d 31#include "vmwgfx_vkms.h"
da7ffb96
ZR
32#include "vmw_surface_cache.h"
33
9c2542a4
SY
34#include <drm/drm_atomic.h>
35#include <drm/drm_atomic_helper.h>
e41774c0 36#include <drm/drm_damage_helper.h>
d5c1f011 37#include <drm/drm_fourcc.h>
d5c1f011
SR
38#include <drm/drm_rect.h>
39#include <drm/drm_sysfs.h>
935f7950 40#include <drm/drm_edid.h>
d5c1f011 41
cd2eb57d
ZR
42void vmw_du_init(struct vmw_display_unit *du)
43{
7b006203 44 vmw_vkms_crtc_init(&du->crtc);
cd2eb57d
ZR
45}
46
c8261a96 47void vmw_du_cleanup(struct vmw_display_unit *du)
fb1d9738 48{
2cd80dbd 49 struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
7b006203
ZR
50
51 vmw_vkms_crtc_cleanup(&du->crtc);
36cc79bc 52 drm_plane_cleanup(&du->primary);
2cd80dbd 53 if (vmw_cmd_supported(dev_priv))
485d98d4 54 drm_plane_cleanup(&du->cursor.base);
36cc79bc 55
34ea3d38 56 drm_connector_unregister(&du->connector);
fb1d9738
JB
57 drm_crtc_cleanup(&du->crtc);
58 drm_encoder_cleanup(&du->encoder);
59 drm_connector_cleanup(&du->connector);
60}
61
62/*
63 * Display Unit Cursor functions
64 */
65
53bc3f6f 66static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
bb6780aa
MB
67static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
68 struct vmw_plane_state *vps,
69 u32 *image, u32 width, u32 height,
70 u32 hotspotX, u32 hotspotY);
485d98d4
MK
71
72struct vmw_svga_fifo_cmd_define_cursor {
73 u32 cmd;
74 SVGAFifoCmdDefineAlphaCursor cursor;
75};
76
53bc3f6f
MB
77/**
78 * vmw_send_define_cursor_cmd - queue a define cursor command
79 * @dev_priv: the private driver struct
80 * @image: buffer which holds the cursor image
81 * @width: width of the mouse cursor image
82 * @height: height of the mouse cursor image
83 * @hotspotX: the horizontal position of mouse hotspot
84 * @hotspotY: the vertical position of mouse hotspot
85 */
86static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
87 u32 *image, u32 width, u32 height,
88 u32 hotspotX, u32 hotspotY)
fb1d9738 89{
485d98d4
MK
90 struct vmw_svga_fifo_cmd_define_cursor *cmd;
91 const u32 image_size = width * height * sizeof(*image);
92 const u32 cmd_size = sizeof(*cmd) + image_size;
fb1d9738 93
485d98d4
MK
94 /* Try to reserve fifocmd space and swallow any failures;
95 such reservations cannot be left unconsumed for long
96 under the risk of clogging other fifocmd users, so
97 we treat reservations separtely from the way we treat
98 other fallible KMS-atomic resources at prepare_fb */
8426ed9c 99 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
485d98d4 100
92f59ac4 101 if (unlikely(!cmd))
485d98d4 102 return;
fb1d9738
JB
103
104 memset(cmd, 0, sizeof(*cmd));
105
106 memcpy(&cmd[1], image, image_size);
107
b9eb1a61
TH
108 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
109 cmd->cursor.id = 0;
110 cmd->cursor.width = width;
111 cmd->cursor.height = height;
112 cmd->cursor.hotspotX = hotspotX;
113 cmd->cursor.hotspotY = hotspotY;
fb1d9738 114
8426ed9c 115 vmw_cmd_commit_flush(dev_priv, cmd_size);
fb1d9738
JB
116}
117
53bc3f6f
MB
118/**
119 * vmw_cursor_update_image - update the cursor image on the provided plane
120 * @dev_priv: the private driver struct
121 * @vps: the plane state of the cursor plane
122 * @image: buffer which holds the cursor image
123 * @width: width of the mouse cursor image
124 * @height: height of the mouse cursor image
125 * @hotspotX: the horizontal position of mouse hotspot
126 * @hotspotY: the vertical position of mouse hotspot
127 */
128static void vmw_cursor_update_image(struct vmw_private *dev_priv,
129 struct vmw_plane_state *vps,
130 u32 *image, u32 width, u32 height,
131 u32 hotspotX, u32 hotspotY)
132{
92f59ac4 133 if (vps->cursor.bo)
bb6780aa
MB
134 vmw_cursor_update_mob(dev_priv, vps, image,
135 vps->base.crtc_w, vps->base.crtc_h,
136 hotspotX, hotspotY);
137
53bc3f6f
MB
138 else
139 vmw_send_define_cursor_cmd(dev_priv, image, width, height,
140 hotspotX, hotspotY);
141}
142
143
485d98d4
MK
144/**
145 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
146 *
53bc3f6f
MB
147 * Called from inside vmw_du_cursor_plane_atomic_update to actually
148 * make the cursor-image live.
149 *
485d98d4 150 * @dev_priv: device to work with
53bc3f6f 151 * @vps: the plane state of the cursor plane
485d98d4
MK
152 * @image: cursor source data to fill the MOB with
153 * @width: source data width
154 * @height: source data height
155 * @hotspotX: cursor hotspot x
156 * @hotspotY: cursor hotspot Y
157 */
158static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
53bc3f6f 159 struct vmw_plane_state *vps,
485d98d4
MK
160 u32 *image, u32 width, u32 height,
161 u32 hotspotX, u32 hotspotY)
6a91d97e 162{
485d98d4
MK
163 SVGAGBCursorHeader *header;
164 SVGAGBAlphaCursorHeader *alpha_header;
165 const u32 image_size = width * height * sizeof(*image);
6a91d97e 166
668b2066 167 header = vmw_bo_map_and_cache(vps->cursor.bo);
485d98d4
MK
168 alpha_header = &header->header.alphaHeader;
169
53bc3f6f
MB
170 memset(header, 0, sizeof(*header));
171
485d98d4
MK
172 header->type = SVGA_ALPHA_CURSOR;
173 header->sizeInBytes = image_size;
174
175 alpha_header->hotspotX = hotspotX;
176 alpha_header->hotspotY = hotspotY;
177 alpha_header->width = width;
178 alpha_header->height = height;
179
180 memcpy(header + 1, image, image_size);
53bc3f6f 181 vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
668b2066 182 vps->cursor.bo->tbo.resource->start);
485d98d4
MK
183}
184
bb6780aa 185
53bc3f6f 186static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
485d98d4 187{
53bc3f6f 188 return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
485d98d4 189}
6a91d97e 190
bb6780aa
MB
191/**
192 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
193 * @vps: cursor plane state
194 */
195static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
40f9e40b 196{
d6667f0d
ZR
197 struct vmw_surface *surf;
198
199 if (vmw_user_object_is_null(&vps->uo))
200 return NULL;
201
202 surf = vmw_user_object_surface(&vps->uo);
203 if (surf && !vmw_user_object_is_mapped(&vps->uo))
204 return surf->snooper.image;
205
206 return vmw_user_object_map(&vps->uo);
bb6780aa 207}
40f9e40b 208
bb6780aa
MB
209static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
210 struct vmw_plane_state *new_vps)
211{
212 void *old_image;
213 void *new_image;
214 u32 size;
215 bool changed;
40f9e40b
MB
216
217 if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
218 old_vps->base.crtc_h != new_vps->base.crtc_h)
219 return true;
220
bb6780aa
MB
221 if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
222 old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
223 return true;
40f9e40b 224
bb6780aa 225 size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
40f9e40b 226
bb6780aa
MB
227 old_image = vmw_du_cursor_plane_acquire_image(old_vps);
228 new_image = vmw_du_cursor_plane_acquire_image(new_vps);
40f9e40b 229
bb6780aa 230 changed = false;
5703fc05 231 if (old_image && new_image && old_image != new_image)
bb6780aa
MB
232 changed = memcmp(old_image, new_image, size) != 0;
233
234 return changed;
40f9e40b
MB
235}
236
668b2066 237static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
485d98d4 238{
668b2066 239 if (!(*vbo))
53bc3f6f 240 return;
485d98d4 241
668b2066
ZR
242 ttm_bo_unpin(&(*vbo)->tbo);
243 vmw_bo_unreference(vbo);
53bc3f6f 244}
485d98d4 245
53bc3f6f
MB
246static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
247 struct vmw_plane_state *vps)
248{
249 u32 i;
485d98d4 250
92f59ac4 251 if (!vps->cursor.bo)
53bc3f6f 252 return;
485d98d4 253
53bc3f6f 254 vmw_du_cursor_plane_unmap_cm(vps);
485d98d4 255
53bc3f6f
MB
256 /* Look for a free slot to return this mob to the cache. */
257 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
92f59ac4 258 if (!vcp->cursor_mobs[i]) {
53bc3f6f
MB
259 vcp->cursor_mobs[i] = vps->cursor.bo;
260 vps->cursor.bo = NULL;
261 return;
262 }
263 }
485d98d4 264
53bc3f6f
MB
265 /* Cache is full: See if this mob is bigger than an existing mob. */
266 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
668b2066
ZR
267 if (vcp->cursor_mobs[i]->tbo.base.size <
268 vps->cursor.bo->tbo.base.size) {
53bc3f6f
MB
269 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
270 vcp->cursor_mobs[i] = vps->cursor.bo;
271 vps->cursor.bo = NULL;
272 return;
485d98d4 273 }
53bc3f6f 274 }
485d98d4 275
53bc3f6f
MB
276 /* Destroy it if it's not worth caching. */
277 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
278}
485d98d4 279
53bc3f6f
MB
280static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
281 struct vmw_plane_state *vps)
282{
283 struct vmw_private *dev_priv = vcp->base.dev->dev_private;
284 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
285 u32 i;
286 u32 cursor_max_dim, mob_max_size;
ed96cf7a 287 struct vmw_fence_obj *fence = NULL;
53bc3f6f 288 int ret;
485d98d4 289
53bc3f6f
MB
290 if (!dev_priv->has_mob ||
291 (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
292 return -EINVAL;
485d98d4 293
53bc3f6f
MB
294 mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
295 cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
485d98d4 296
53bc3f6f
MB
297 if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
298 vps->base.crtc_h > cursor_max_dim)
299 return -EINVAL;
485d98d4 300
92f59ac4 301 if (vps->cursor.bo) {
668b2066 302 if (vps->cursor.bo->tbo.base.size >= size)
53bc3f6f
MB
303 return 0;
304 vmw_du_put_cursor_mob(vcp, vps);
305 }
485d98d4 306
53bc3f6f
MB
307 /* Look for an unused mob in the cache. */
308 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
92f59ac4 309 if (vcp->cursor_mobs[i] &&
668b2066 310 vcp->cursor_mobs[i]->tbo.base.size >= size) {
53bc3f6f
MB
311 vps->cursor.bo = vcp->cursor_mobs[i];
312 vcp->cursor_mobs[i] = NULL;
313 return 0;
314 }
315 }
316 /* Create a new mob if we can't find an existing one. */
668b2066
ZR
317 ret = vmw_bo_create_and_populate(dev_priv, size,
318 VMW_BO_DOMAIN_MOB,
319 &vps->cursor.bo);
6a91d97e 320
53bc3f6f
MB
321 if (ret != 0)
322 return ret;
6a91d97e 323
53bc3f6f 324 /* Fence the mob creation so we are guarateed to have the mob */
668b2066 325 ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
53bc3f6f
MB
326 if (ret != 0)
327 goto teardown;
485d98d4 328
ed96cf7a
MK
329 ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
330 if (ret != 0) {
331 ttm_bo_unreserve(&vps->cursor.bo->tbo);
332 goto teardown;
333 }
334
335 dma_fence_wait(&fence->base, false);
336 dma_fence_put(&fence->base);
337
668b2066 338 ttm_bo_unreserve(&vps->cursor.bo->tbo);
53bc3f6f 339 return 0;
485d98d4 340
53bc3f6f
MB
341teardown:
342 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
343 return ret;
485d98d4
MK
344}
345
6a91d97e 346
36cc79bc
SY
347static void vmw_cursor_update_position(struct vmw_private *dev_priv,
348 bool show, int x, int y)
fb1d9738 349{
485d98d4
MK
350 const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
351 : SVGA_CURSOR_ON_HIDE;
fb1d9738
JB
352 uint32_t count;
353
36cc79bc 354 spin_lock(&dev_priv->cursor_lock);
485d98d4
MK
355 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
356 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
357 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
358 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
359 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
207d2073 360 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
485d98d4
MK
361 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
362 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
2cd80dbd
ZR
363 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
364 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
365 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
366 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
367 } else {
368 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
369 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
485d98d4 370 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
2cd80dbd 371 }
36cc79bc 372 spin_unlock(&dev_priv->cursor_lock);
fb1d9738
JB
373}
374
fb1d9738
JB
375void vmw_kms_cursor_snoop(struct vmw_surface *srf,
376 struct ttm_object_file *tfile,
377 struct ttm_buffer_object *bo,
378 SVGA3dCmdHeader *header)
379{
380 struct ttm_bo_kmap_obj map;
381 unsigned long kmap_offset;
382 unsigned long kmap_num;
383 SVGA3dCopyBox *box;
384 unsigned box_count;
385 void *virtual;
e0029da9 386 bool is_iomem;
fb1d9738
JB
387 struct vmw_dma_cmd {
388 SVGA3dCmdHeader header;
389 SVGA3dCmdSurfaceDMA dma;
390 } *cmd;
2ac86371 391 int i, ret;
da7ffb96
ZR
392 const struct SVGA3dSurfaceDesc *desc =
393 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
394 const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
fb1d9738
JB
395
396 cmd = container_of(header, struct vmw_dma_cmd, header);
397
92f59ac4 398 /* No snooper installed, nothing to copy */
fb1d9738
JB
399 if (!srf->snooper.image)
400 return;
401
402 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
403 DRM_ERROR("face and mipmap for cursors should never != 0\n");
404 return;
405 }
406
407 if (cmd->header.size < 64) {
408 DRM_ERROR("at least one full copy box must be given\n");
409 return;
410 }
411
412 box = (SVGA3dCopyBox *)&cmd[1];
413 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
414 sizeof(SVGA3dCopyBox);
415
2ac86371 416 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
fb1d9738
JB
417 box->x != 0 || box->y != 0 || box->z != 0 ||
418 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
4cf949c7 419 box->d != 1 || box_count != 1 ||
da7ffb96 420 box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
fb1d9738 421 /* TODO handle none page aligned offsets */
2ac86371
JB
422 /* TODO handle more dst & src != 0 */
423 /* TODO handle more then one copy */
e97644eb 424 DRM_ERROR("Can't snoop dma request for cursor!\n");
2ac86371
JB
425 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
426 box->srcx, box->srcy, box->srcz,
427 box->x, box->y, box->z,
428 box->w, box->h, box->d, box_count,
429 cmd->dma.guest.ptr.offset);
fb1d9738
JB
430 return;
431 }
432
433 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
da7ffb96 434 kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
fb1d9738 435
dfd5e50e 436 ret = ttm_bo_reserve(bo, true, false, NULL);
fb1d9738
JB
437 if (unlikely(ret != 0)) {
438 DRM_ERROR("reserve failed\n");
439 return;
440 }
441
442 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
443 if (unlikely(ret != 0))
444 goto err_unreserve;
445
e0029da9 446 virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
fb1d9738 447
da7ffb96
ZR
448 if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
449 memcpy(srf->snooper.image, virtual,
450 VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
2ac86371
JB
451 } else {
452 /* Image is unsigned pointer. */
453 for (i = 0; i < box->h; i++)
da7ffb96 454 memcpy(srf->snooper.image + i * image_pitch,
2ac86371 455 virtual + i * cmd->dma.guest.pitch,
da7ffb96 456 box->w * desc->pitchBytesPerBlock);
2ac86371
JB
457 }
458
fb1d9738
JB
459 srf->snooper.age++;
460
fb1d9738
JB
461 ttm_bo_kunmap(&map);
462err_unreserve:
463 ttm_bo_unreserve(bo);
464}
465
8fbf9d92
TH
466/**
467 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
468 *
469 * @dev_priv: Pointer to the device private struct.
470 *
471 * Clears all legacy hotspots.
472 */
473void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
474{
9703bb32 475 struct drm_device *dev = &dev_priv->drm;
8fbf9d92
TH
476 struct vmw_display_unit *du;
477 struct drm_crtc *crtc;
478
e7b48185 479 drm_modeset_lock_all(dev);
8fbf9d92
TH
480 drm_for_each_crtc(crtc, dev) {
481 du = vmw_crtc_to_du(crtc);
482
483 du->hotspot_x = 0;
484 du->hotspot_y = 0;
485 }
e7b48185 486 drm_modeset_unlock_all(dev);
8fbf9d92
TH
487}
488
fb1d9738
JB
489void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
490{
9703bb32 491 struct drm_device *dev = &dev_priv->drm;
fb1d9738
JB
492 struct vmw_display_unit *du;
493 struct drm_crtc *crtc;
494
495 mutex_lock(&dev->mode_config.mutex);
496
497 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
498 du = vmw_crtc_to_du(crtc);
499 if (!du->cursor_surface ||
92f59ac4
MB
500 du->cursor_age == du->cursor_surface->snooper.age ||
501 !du->cursor_surface->snooper.image)
fb1d9738
JB
502 continue;
503
504 du->cursor_age = du->cursor_surface->snooper.age;
53bc3f6f
MB
505 vmw_send_define_cursor_cmd(dev_priv,
506 du->cursor_surface->snooper.image,
da7ffb96
ZR
507 VMW_CURSOR_SNOOP_WIDTH,
508 VMW_CURSOR_SNOOP_HEIGHT,
53bc3f6f
MB
509 du->hotspot_x + du->core_hotspot_x,
510 du->hotspot_y + du->core_hotspot_y);
fb1d9738
JB
511 }
512
513 mutex_unlock(&dev->mode_config.mutex);
514}
515
36cc79bc 516
36cc79bc
SY
517void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
518{
53bc3f6f
MB
519 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
520 u32 i;
521
36cc79bc 522 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
53bc3f6f
MB
523
524 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
525 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
526
36cc79bc
SY
527 drm_plane_cleanup(plane);
528}
529
530
531void vmw_du_primary_plane_destroy(struct drm_plane *plane)
532{
533 drm_plane_cleanup(plane);
534
535 /* Planes are static in our case so we don't free it */
536}
537
538
060e2ad5 539/**
2cd80dbd 540 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
060e2ad5
SY
541 *
542 * @vps: plane state associated with the display surface
060e2ad5 543 */
d6667f0d 544void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
060e2ad5 545{
d6667f0d
ZR
546 struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
547
548 if (surf) {
060e2ad5 549 if (vps->pinned) {
d6667f0d 550 vmw_resource_unpin(&surf->res);
060e2ad5
SY
551 vps->pinned--;
552 }
060e2ad5
SY
553 }
554}
555
556
557/**
485d98d4 558 * vmw_du_plane_cleanup_fb - Unpins the plane surface
060e2ad5
SY
559 *
560 * @plane: display plane
561 * @old_state: Contains the FB to clean up
562 *
563 * Unpins the framebuffer surface
564 *
565 * Returns 0 on success
566 */
567void
568vmw_du_plane_cleanup_fb(struct drm_plane *plane,
569 struct drm_plane_state *old_state)
570{
571 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
572
d6667f0d 573 vmw_du_plane_unpin_surf(vps);
060e2ad5
SY
574}
575
576
53bc3f6f
MB
577/**
578 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
579 *
580 * @vps: plane_state
581 *
582 * Returns 0 on success
583 */
584
585static int
586vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
587{
588 int ret;
589 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
668b2066 590 struct ttm_buffer_object *bo;
53bc3f6f 591
668b2066 592 if (!vps->cursor.bo)
53bc3f6f
MB
593 return -EINVAL;
594
668b2066
ZR
595 bo = &vps->cursor.bo->tbo;
596
53bc3f6f
MB
597 if (bo->base.size < size)
598 return -EINVAL;
599
668b2066 600 if (vps->cursor.bo->map.virtual)
53bc3f6f
MB
601 return 0;
602
603 ret = ttm_bo_reserve(bo, false, false, NULL);
53bc3f6f
MB
604 if (unlikely(ret != 0))
605 return -ENOMEM;
606
668b2066 607 vmw_bo_map_and_cache(vps->cursor.bo);
53bc3f6f
MB
608
609 ttm_bo_unreserve(bo);
610
611 if (unlikely(ret != 0))
612 return -ENOMEM;
613
53bc3f6f
MB
614 return 0;
615}
616
617
618/**
619 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
620 *
621 * @vps: state of the cursor plane
622 *
623 * Returns 0 on success
624 */
625
626static int
627vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
628{
629 int ret = 0;
668b2066 630 struct vmw_bo *vbo = vps->cursor.bo;
53bc3f6f 631
668b2066 632 if (!vbo || !vbo->map.virtual)
53bc3f6f
MB
633 return 0;
634
668b2066 635 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
53bc3f6f 636 if (likely(ret == 0)) {
668b2066
ZR
637 vmw_bo_unmap(vbo);
638 ttm_bo_unreserve(&vbo->tbo);
53bc3f6f
MB
639 }
640
641 return ret;
642}
643
644
485d98d4
MK
645/**
646 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
647 *
648 * @plane: cursor plane
649 * @old_state: contains the state to clean up
650 *
651 * Unmaps all cursor bo mappings and unpins the cursor surface
652 *
653 * Returns 0 on success
654 */
655void
656vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
657 struct drm_plane_state *old_state)
658{
53bc3f6f 659 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
485d98d4 660 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
485d98d4 661
d6667f0d
ZR
662 if (!vmw_user_object_is_null(&vps->uo))
663 vmw_user_object_unmap(&vps->uo);
92f59ac4 664
53bc3f6f
MB
665 vmw_du_cursor_plane_unmap_cm(vps);
666 vmw_du_put_cursor_mob(vcp, vps);
485d98d4 667
d6667f0d
ZR
668 vmw_du_plane_unpin_surf(vps);
669 vmw_user_object_unref(&vps->uo);
485d98d4
MK
670}
671
53bc3f6f 672
060e2ad5
SY
673/**
674 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
675 *
676 * @plane: display plane
677 * @new_state: info on the new plane state, including the FB
678 *
679 * Returns 0 on success
680 */
681int
682vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
683 struct drm_plane_state *new_state)
684{
685 struct drm_framebuffer *fb = new_state->fb;
485d98d4 686 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
060e2ad5 687 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
d6667f0d 688 struct vmw_bo *bo = NULL;
485d98d4 689 int ret = 0;
060e2ad5 690
d6667f0d
ZR
691 if (!vmw_user_object_is_null(&vps->uo)) {
692 vmw_user_object_unmap(&vps->uo);
693 vmw_user_object_unref(&vps->uo);
485d98d4 694 }
060e2ad5
SY
695
696 if (fb) {
f1d34bfd 697 if (vmw_framebuffer_to_vfb(fb)->bo) {
d6667f0d
ZR
698 vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
699 vps->uo.surface = NULL;
060e2ad5 700 } else {
d6667f0d 701 memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
060e2ad5 702 }
d6667f0d 703 vmw_user_object_ref(&vps->uo);
060e2ad5
SY
704 }
705
d6667f0d
ZR
706 bo = vmw_user_object_buffer(&vps->uo);
707 if (bo) {
708 struct ttm_operation_ctx ctx = {false, false};
485d98d4 709
d6667f0d
ZR
710 ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
711 if (ret != 0)
485d98d4
MK
712 return -ENOMEM;
713
d6667f0d
ZR
714 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
715 if (ret != 0)
485d98d4 716 return -ENOMEM;
92f59ac4 717
d6667f0d
ZR
718 vmw_bo_pin_reserved(bo, true);
719 if (vmw_framebuffer_to_vfb(fb)->bo) {
720 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
721
722 (void)vmw_bo_map_and_cache_size(bo, size);
723 } else {
724 vmw_bo_map_and_cache(bo);
725 }
726 ttm_bo_unreserve(&bo->tbo);
485d98d4
MK
727 }
728
d6667f0d 729 if (!vmw_user_object_is_null(&vps->uo)) {
53bc3f6f
MB
730 vmw_du_get_cursor_mob(vcp, vps);
731 vmw_du_cursor_plane_map_cm(vps);
485d98d4
MK
732 }
733
060e2ad5
SY
734 return 0;
735}
736
737
060e2ad5
SY
738void
739vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
977697e2 740 struct drm_atomic_state *state)
060e2ad5 741{
977697e2
MR
742 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
743 plane);
744 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
745 plane);
e05162c0 746 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
060e2ad5
SY
747 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
748 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
e05162c0 749 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
40f9e40b 750 struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
d6667f0d
ZR
751 struct vmw_bo *old_bo = NULL;
752 struct vmw_bo *new_bo = NULL;
060e2ad5 753 s32 hotspot_x, hotspot_y;
d6667f0d 754 int ret;
060e2ad5 755
cd549942
ZR
756 hotspot_x = du->hotspot_x + new_state->hotspot_x;
757 hotspot_y = du->hotspot_y + new_state->hotspot_y;
14979adb 758
d6667f0d 759 du->cursor_surface = vmw_user_object_surface(&vps->uo);
060e2ad5 760
d6667f0d 761 if (vmw_user_object_is_null(&vps->uo)) {
53bc3f6f
MB
762 vmw_cursor_update_position(dev_priv, false, 0, 0);
763 return;
764 }
765
bb6780aa
MB
766 vps->cursor.hotspot_x = hotspot_x;
767 vps->cursor.hotspot_y = hotspot_y;
768
d6667f0d 769 if (du->cursor_surface)
060e2ad5 770 du->cursor_age = du->cursor_surface->snooper.age;
d6667f0d
ZR
771
772 if (!vmw_user_object_is_null(&old_vps->uo)) {
773 old_bo = vmw_user_object_buffer(&old_vps->uo);
774 ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
775 if (ret != 0)
776 return;
bb6780aa 777 }
40f9e40b 778
d6667f0d
ZR
779 if (!vmw_user_object_is_null(&vps->uo)) {
780 new_bo = vmw_user_object_buffer(&vps->uo);
781 if (old_bo != new_bo) {
782 ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
783 if (ret != 0)
784 return;
785 } else {
786 new_bo = NULL;
787 }
788 }
bb6780aa 789 if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
40f9e40b
MB
790 /*
791 * If it hasn't changed, avoid making the device do extra
bb6780aa 792 * work by keeping the old cursor active.
40f9e40b
MB
793 */
794 struct vmw_cursor_plane_state tmp = old_vps->cursor;
795 old_vps->cursor = vps->cursor;
796 vps->cursor = tmp;
bb6780aa
MB
797 } else {
798 void *image = vmw_du_cursor_plane_acquire_image(vps);
799 if (image)
800 vmw_cursor_update_image(dev_priv, vps, image,
801 new_state->crtc_w,
802 new_state->crtc_h,
803 hotspot_x, hotspot_y);
804 }
805
d6667f0d
ZR
806 if (old_bo)
807 ttm_bo_unreserve(&old_bo->tbo);
808 if (new_bo)
809 ttm_bo_unreserve(&new_bo->tbo);
810
485d98d4
MK
811 du->cursor_x = new_state->crtc_x + du->set_gui_x;
812 du->cursor_y = new_state->crtc_y + du->set_gui_y;
060e2ad5 813
485d98d4
MK
814 vmw_cursor_update_position(dev_priv, true,
815 du->cursor_x + hotspot_x,
816 du->cursor_y + hotspot_y);
14979adb 817
485d98d4
MK
818 du->core_hotspot_x = hotspot_x - du->hotspot_x;
819 du->core_hotspot_y = hotspot_y - du->hotspot_y;
060e2ad5
SY
820}
821
822
823/**
824 * vmw_du_primary_plane_atomic_check - check if the new state is okay
825 *
826 * @plane: display plane
827 * @state: info on the new plane state, including the FB
828 *
829 * Check if the new state is settable given the current state. Other
830 * than what the atomic helper checks, we care about crtc fitting
831 * the FB and maintaining one active framebuffer.
832 *
833 * Returns 0 on success
834 */
835int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
7c11b99a 836 struct drm_atomic_state *state)
060e2ad5 837{
7c11b99a
MR
838 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
839 plane);
b83b2a80
JMC
840 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
841 plane);
58a275aa 842 struct drm_crtc_state *crtc_state = NULL;
ba5c1649 843 struct drm_framebuffer *new_fb = new_state->fb;
b83b2a80 844 struct drm_framebuffer *old_fb = old_state->fb;
060e2ad5
SY
845 int ret;
846
b83b2a80
JMC
847 /*
848 * Ignore damage clips if the framebuffer attached to the plane's state
849 * has changed since the last plane update (page-flip). In this case, a
850 * full plane update should happen because uploads are done per-buffer.
851 */
852 if (old_fb != new_fb)
853 new_state->ignore_damage_clips = true;
854
ba5c1649 855 if (new_state->crtc)
dec92020 856 crtc_state = drm_atomic_get_new_crtc_state(state,
ba5c1649 857 new_state->crtc);
060e2ad5 858
ba5c1649 859 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
cce32e4e
TZ
860 DRM_PLANE_NO_SCALING,
861 DRM_PLANE_NO_SCALING,
a01cb8ba 862 false, true);
060e2ad5
SY
863 return ret;
864}
865
866
867/**
868 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
869 *
870 * @plane: cursor plane
2cd80dbd 871 * @state: info on the new plane state
060e2ad5
SY
872 *
873 * This is a chance to fail if the new cursor state does not fit
874 * our requirements.
875 *
876 * Returns 0 on success
877 */
878int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
7c11b99a 879 struct drm_atomic_state *state)
060e2ad5 880{
7c11b99a
MR
881 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
882 plane);
060e2ad5 883 int ret = 0;
40e3defd 884 struct drm_crtc_state *crtc_state = NULL;
060e2ad5
SY
885 struct vmw_surface *surface = NULL;
886 struct drm_framebuffer *fb = new_state->fb;
887
40e3defd
TH
888 if (new_state->crtc)
889 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
890 new_state->crtc);
060e2ad5 891
40e3defd 892 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
cce32e4e
TZ
893 DRM_PLANE_NO_SCALING,
894 DRM_PLANE_NO_SCALING,
40e3defd
TH
895 true, true);
896 if (ret)
060e2ad5
SY
897 return ret;
898
40e3defd
TH
899 /* Turning off */
900 if (!fb)
901 return 0;
25db8754 902
060e2ad5
SY
903 /* A lot of the code assumes this */
904 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
905 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
906 new_state->crtc_w, new_state->crtc_h);
485d98d4 907 return -EINVAL;
060e2ad5
SY
908 }
909
92f59ac4 910 if (!vmw_framebuffer_to_vfb(fb)->bo) {
d6667f0d 911 surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
060e2ad5 912
92f59ac4
MB
913 WARN_ON(!surface);
914
915 if (!surface ||
668b2066 916 (!surface->snooper.image && !surface->res.guest_memory_bo)) {
92f59ac4
MB
917 DRM_ERROR("surface not suitable for cursor\n");
918 return -EINVAL;
919 }
060e2ad5
SY
920 }
921
485d98d4 922 return 0;
060e2ad5
SY
923}
924
925
06ec4190 926int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
29b77ad7 927 struct drm_atomic_state *state)
06ec4190 928{
a60ccade 929 struct vmw_private *vmw = vmw_priv(crtc->dev);
29b77ad7
MR
930 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
931 crtc);
06ec4190 932 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
ea632725 933 int connector_mask = drm_connector_mask(&du->connector);
06ec4190 934 bool has_primary = new_state->plane_mask &
ea632725 935 drm_plane_mask(crtc->primary);
06ec4190 936
a60ccade
ZR
937 /*
938 * This is fine in general, but broken userspace might expect
939 * some actual rendering so give a clue as why it's blank.
940 */
941 if (new_state->enable && !has_primary)
942 drm_dbg_driver(&vmw->drm,
943 "CRTC without a primary plane will be blank.\n");
06ec4190
SY
944
945
946 if (new_state->connector_mask != connector_mask &&
947 new_state->connector_mask != 0) {
948 DRM_ERROR("Invalid connectors configuration\n");
949 return -EINVAL;
950 }
951
952 /*
953 * Our virtual device does not have a dot clock, so use the logical
954 * clock value as the dot clock.
955 */
956 if (new_state->mode.crtc_clock == 0)
957 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
958
959 return 0;
960}
961
962
963void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
f6ebe9f9 964 struct drm_atomic_state *state)
06ec4190 965{
7b006203 966 vmw_vkms_crtc_atomic_begin(crtc, state);
06ec4190
SY
967}
968
9c2542a4
SY
969/**
970 * vmw_du_crtc_duplicate_state - duplicate crtc state
971 * @crtc: DRM crtc
972 *
973 * Allocates and returns a copy of the crtc state (both common and
974 * vmw-specific) for the specified crtc.
975 *
976 * Returns: The newly allocated crtc state, or NULL on failure.
977 */
978struct drm_crtc_state *
979vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
980{
981 struct drm_crtc_state *state;
982 struct vmw_crtc_state *vcs;
983
984 if (WARN_ON(!crtc->state))
985 return NULL;
986
987 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
988
989 if (!vcs)
990 return NULL;
991
992 state = &vcs->base;
993
994 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
995
996 return state;
997}
998
999
1000/**
1001 * vmw_du_crtc_reset - creates a blank vmw crtc state
1002 * @crtc: DRM crtc
1003 *
1004 * Resets the atomic state for @crtc by freeing the state pointer (which
1005 * might be NULL, e.g. at driver load time) and allocating a new empty state
1006 * object.
1007 */
1008void vmw_du_crtc_reset(struct drm_crtc *crtc)
1009{
1010 struct vmw_crtc_state *vcs;
1011
1012
1013 if (crtc->state) {
1014 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1015
1016 kfree(vmw_crtc_state_to_vcs(crtc->state));
1017 }
1018
1019 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1020
1021 if (!vcs) {
1022 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1023 return;
1024 }
1025
a1643473 1026 __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
9c2542a4
SY
1027}
1028
1029
1030/**
1031 * vmw_du_crtc_destroy_state - destroy crtc state
1032 * @crtc: DRM crtc
1033 * @state: state object to destroy
1034 *
1035 * Destroys the crtc state (both common and vmw-specific) for the
1036 * specified plane.
1037 */
1038void
1039vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1040 struct drm_crtc_state *state)
1041{
1042 drm_atomic_helper_crtc_destroy_state(crtc, state);
1043}
1044
1045
cc5ec459
SY
1046/**
1047 * vmw_du_plane_duplicate_state - duplicate plane state
1048 * @plane: drm plane
1049 *
1050 * Allocates and returns a copy of the plane state (both common and
1051 * vmw-specific) for the specified plane.
1052 *
1053 * Returns: The newly allocated plane state, or NULL on failure.
1054 */
1055struct drm_plane_state *
1056vmw_du_plane_duplicate_state(struct drm_plane *plane)
1057{
1058 struct drm_plane_state *state;
1059 struct vmw_plane_state *vps;
1060
1061 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1062
1063 if (!vps)
1064 return NULL;
1065
1066 vps->pinned = 0;
810b3e16
SY
1067 vps->cpp = 0;
1068
53bc3f6f
MB
1069 memset(&vps->cursor, 0, sizeof(vps->cursor));
1070
cc5ec459 1071 /* Each ref counted resource needs to be acquired again */
d6667f0d 1072 vmw_user_object_ref(&vps->uo);
cc5ec459
SY
1073 state = &vps->base;
1074
1075 __drm_atomic_helper_plane_duplicate_state(plane, state);
1076
1077 return state;
1078}
1079
1080
1081/**
1082 * vmw_du_plane_reset - creates a blank vmw plane state
1083 * @plane: drm plane
1084 *
1085 * Resets the atomic state for @plane by freeing the state pointer (which might
1086 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1087 */
1088void vmw_du_plane_reset(struct drm_plane *plane)
1089{
1090 struct vmw_plane_state *vps;
1091
cc5ec459
SY
1092 if (plane->state)
1093 vmw_du_plane_destroy_state(plane, plane->state);
1094
1095 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1096
1097 if (!vps) {
1098 DRM_ERROR("Cannot allocate vmw_plane_state\n");
1099 return;
1100 }
1101
e81eb98e 1102 __drm_atomic_helper_plane_reset(plane, &vps->base);
cc5ec459
SY
1103}
1104
1105
1106/**
1107 * vmw_du_plane_destroy_state - destroy plane state
1108 * @plane: DRM plane
1109 * @state: state object to destroy
1110 *
1111 * Destroys the plane state (both common and vmw-specific) for the
1112 * specified plane.
1113 */
1114void
1115vmw_du_plane_destroy_state(struct drm_plane *plane,
1116 struct drm_plane_state *state)
1117{
1118 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1119
810b3e16 1120 /* Should have been freed by cleanup_fb */
d6667f0d 1121 vmw_user_object_unref(&vps->uo);
cc5ec459
SY
1122
1123 drm_atomic_helper_plane_destroy_state(plane, state);
1124}
1125
1126
d7721ca7
SY
1127/**
1128 * vmw_du_connector_duplicate_state - duplicate connector state
1129 * @connector: DRM connector
1130 *
1131 * Allocates and returns a copy of the connector state (both common and
1132 * vmw-specific) for the specified connector.
1133 *
1134 * Returns: The newly allocated connector state, or NULL on failure.
1135 */
1136struct drm_connector_state *
1137vmw_du_connector_duplicate_state(struct drm_connector *connector)
1138{
1139 struct drm_connector_state *state;
1140 struct vmw_connector_state *vcs;
1141
1142 if (WARN_ON(!connector->state))
1143 return NULL;
1144
1145 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1146
1147 if (!vcs)
1148 return NULL;
1149
1150 state = &vcs->base;
1151
1152 __drm_atomic_helper_connector_duplicate_state(connector, state);
1153
1154 return state;
1155}
1156
1157
1158/**
1159 * vmw_du_connector_reset - creates a blank vmw connector state
1160 * @connector: DRM connector
1161 *
1162 * Resets the atomic state for @connector by freeing the state pointer (which
1163 * might be NULL, e.g. at driver load time) and allocating a new empty state
1164 * object.
1165 */
1166void vmw_du_connector_reset(struct drm_connector *connector)
1167{
1168 struct vmw_connector_state *vcs;
1169
1170
1171 if (connector->state) {
1172 __drm_atomic_helper_connector_destroy_state(connector->state);
1173
1174 kfree(vmw_connector_state_to_vcs(connector->state));
1175 }
1176
1177 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1178
1179 if (!vcs) {
1180 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1181 return;
1182 }
1183
1184 __drm_atomic_helper_connector_reset(connector, &vcs->base);
1185}
1186
1187
1188/**
1189 * vmw_du_connector_destroy_state - destroy connector state
1190 * @connector: DRM connector
1191 * @state: state object to destroy
1192 *
1193 * Destroys the connector state (both common and vmw-specific) for the
1194 * specified plane.
1195 */
1196void
1197vmw_du_connector_destroy_state(struct drm_connector *connector,
1198 struct drm_connector_state *state)
1199{
1200 drm_atomic_helper_connector_destroy_state(connector, state);
1201}
fb1d9738
JB
1202/*
1203 * Generic framebuffer code
1204 */
1205
fb1d9738
JB
1206/*
1207 * Surface framebuffer code
1208 */
1209
847c5964 1210static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
fb1d9738 1211{
3a939a5e 1212 struct vmw_framebuffer_surface *vfbs =
fb1d9738 1213 vmw_framebuffer_to_vfbs(framebuffer);
3a939a5e 1214
fb1d9738 1215 drm_framebuffer_cleanup(framebuffer);
d6667f0d 1216 vmw_user_object_unref(&vfbs->uo);
fb1d9738 1217
3a939a5e 1218 kfree(vfbs);
fb1d9738
JB
1219}
1220
10b1e0ca
TH
1221/**
1222 * vmw_kms_readback - Perform a readback from the screen system to
f1d34bfd 1223 * a buffer-object backed framebuffer.
10b1e0ca
TH
1224 *
1225 * @dev_priv: Pointer to the device private structure.
1226 * @file_priv: Pointer to a struct drm_file identifying the caller.
1227 * Must be set to NULL if @user_fence_rep is NULL.
f1d34bfd 1228 * @vfb: Pointer to the buffer-object backed framebuffer.
10b1e0ca
TH
1229 * @user_fence_rep: User-space provided structure for fence information.
1230 * Must be set to non-NULL if @file_priv is non-NULL.
1231 * @vclips: Array of clip rects.
1232 * @num_clips: Number of clip rects in @vclips.
1233 *
1234 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1235 * interrupted.
1236 */
1237int vmw_kms_readback(struct vmw_private *dev_priv,
1238 struct drm_file *file_priv,
1239 struct vmw_framebuffer *vfb,
1240 struct drm_vmw_fence_rep __user *user_fence_rep,
1241 struct drm_vmw_rect *vclips,
1242 uint32_t num_clips)
1243{
1244 switch (dev_priv->active_display_unit) {
1245 case vmw_du_screen_object:
1246 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
91e9f352
DR
1247 user_fence_rep, vclips, num_clips,
1248 NULL);
6bf6bf03 1249 case vmw_du_screen_target:
39985eea
ZR
1250 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1251 user_fence_rep, NULL, vclips, num_clips,
1252 1, NULL);
10b1e0ca
TH
1253 default:
1254 WARN_ONCE(true,
1255 "Readback called with invalid display system.\n");
6bf6bf03 1256}
10b1e0ca
TH
1257
1258 return -ENOSYS;
1259}
1260
d6667f0d
ZR
1261static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
1262 struct drm_file *file_priv,
1263 unsigned int *handle)
1264{
1265 struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
1266 struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
1267
1268 return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
1269}
10b1e0ca 1270
d7955fcf 1271static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
d6667f0d 1272 .create_handle = vmw_framebuffer_surface_create_handle,
fb1d9738 1273 .destroy = vmw_framebuffer_surface_destroy,
2f5544ff 1274 .dirty = drm_atomic_helper_dirtyfb,
fb1d9738
JB
1275};
1276
d3216a0c 1277static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
d6667f0d 1278 struct vmw_user_object *uo,
d3216a0c 1279 struct vmw_framebuffer **out,
dabdcdc9 1280 const struct drm_mode_fb_cmd2
d6667f0d 1281 *mode_cmd)
fb1d9738
JB
1282
1283{
9703bb32 1284 struct drm_device *dev = &dev_priv->drm;
fb1d9738 1285 struct vmw_framebuffer_surface *vfbs;
d3216a0c 1286 enum SVGA3dSurfaceFormat format;
d6667f0d 1287 struct vmw_surface *surface;
fb1d9738
JB
1288 int ret;
1289
c8261a96
SY
1290 /* 3D is only supported on HWv8 and newer hosts */
1291 if (dev_priv->active_display_unit == vmw_du_legacy)
01e81419
JB
1292 return -ENOSYS;
1293
d6667f0d
ZR
1294 surface = vmw_user_object_surface(uo);
1295
d3216a0c
TH
1296 /*
1297 * Sanity checks.
1298 */
1299
8bb75aeb
ZR
1300 if (!drm_any_plane_has_format(&dev_priv->drm,
1301 mode_cmd->pixel_format,
1302 mode_cmd->modifier[0])) {
1303 drm_dbg(&dev_priv->drm,
1304 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1305 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1306 return -EINVAL;
1307 }
1308
e7ac9211 1309 /* Surface must be marked as a scanout. */
26b82873 1310 if (unlikely(!surface->metadata.scanout))
e7ac9211
JB
1311 return -EINVAL;
1312
26b82873
DR
1313 if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1314 surface->metadata.num_sizes != 1 ||
1315 surface->metadata.base_size.width < mode_cmd->width ||
1316 surface->metadata.base_size.height < mode_cmd->height ||
1317 surface->metadata.base_size.depth != 1)) {
d3216a0c
TH
1318 DRM_ERROR("Incompatible surface dimensions "
1319 "for requested mode.\n");
1320 return -EINVAL;
1321 }
1322
dabdcdc9
DV
1323 switch (mode_cmd->pixel_format) {
1324 case DRM_FORMAT_ARGB8888:
d3216a0c
TH
1325 format = SVGA3D_A8R8G8B8;
1326 break;
dabdcdc9 1327 case DRM_FORMAT_XRGB8888:
d3216a0c
TH
1328 format = SVGA3D_X8R8G8B8;
1329 break;
dabdcdc9 1330 case DRM_FORMAT_RGB565:
d3216a0c
TH
1331 format = SVGA3D_R5G6B5;
1332 break;
dabdcdc9 1333 case DRM_FORMAT_XRGB1555:
d3216a0c
TH
1334 format = SVGA3D_A1R5G5B5;
1335 break;
1336 default:
92f1d09c
SA
1337 DRM_ERROR("Invalid pixel format: %p4cc\n",
1338 &mode_cmd->pixel_format);
d3216a0c
TH
1339 return -EINVAL;
1340 }
1341
d80efd5c
TH
1342 /*
1343 * For DX, surface format validation is done when surface->scanout
1344 * is set.
1345 */
26b82873 1346 if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
d3216a0c
TH
1347 DRM_ERROR("Invalid surface format for requested mode.\n");
1348 return -EINVAL;
1349 }
1350
fb1d9738
JB
1351 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1352 if (!vfbs) {
1353 ret = -ENOMEM;
1354 goto out_err1;
1355 }
1356
a3f913ca 1357 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
d6667f0d
ZR
1358 memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
1359 vmw_user_object_ref(&vfbs->uo);
3a939a5e 1360
fb1d9738
JB
1361 *out = &vfbs->base;
1362
80f0b5af
DV
1363 ret = drm_framebuffer_init(dev, &vfbs->base.base,
1364 &vmw_framebuffer_surface_funcs);
1365 if (ret)
05c95018 1366 goto out_err2;
80f0b5af 1367
fb1d9738
JB
1368 return 0;
1369
fb1d9738 1370out_err2:
d6667f0d 1371 vmw_user_object_unref(&vfbs->uo);
fb1d9738
JB
1372 kfree(vfbs);
1373out_err1:
1374 return ret;
1375}
1376
1377/*
f1d34bfd 1378 * Buffer-object framebuffer code
fb1d9738
JB
1379 */
1380
24df43d9
ZR
1381static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1382 struct drm_file *file_priv,
1383 unsigned int *handle)
1384{
1385 struct vmw_framebuffer_bo *vfbd =
1386 vmw_framebuffer_to_vfbd(fb);
668b2066 1387 return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
24df43d9
ZR
1388}
1389
f1d34bfd 1390static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
fb1d9738 1391{
f1d34bfd 1392 struct vmw_framebuffer_bo *vfbd =
fb1d9738
JB
1393 vmw_framebuffer_to_vfbd(framebuffer);
1394
1395 drm_framebuffer_cleanup(framebuffer);
f1d34bfd 1396 vmw_bo_unreference(&vfbd->buffer);
fb1d9738
JB
1397
1398 kfree(vfbd);
1399}
1400
f1d34bfd 1401static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
24df43d9 1402 .create_handle = vmw_framebuffer_bo_create_handle,
f1d34bfd 1403 .destroy = vmw_framebuffer_bo_destroy,
a37a512d 1404 .dirty = drm_atomic_helper_dirtyfb,
fb1d9738
JB
1405};
1406
f1d34bfd 1407static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
09881d29 1408 struct vmw_bo *bo,
f1d34bfd
TH
1409 struct vmw_framebuffer **out,
1410 const struct drm_mode_fb_cmd2
1411 *mode_cmd)
fb1d9738
JB
1412
1413{
9703bb32 1414 struct drm_device *dev = &dev_priv->drm;
f1d34bfd 1415 struct vmw_framebuffer_bo *vfbd;
d3216a0c 1416 unsigned int requested_size;
fb1d9738
JB
1417 int ret;
1418
dabdcdc9 1419 requested_size = mode_cmd->height * mode_cmd->pitches[0];
668b2066 1420 if (unlikely(requested_size > bo->tbo.base.size)) {
d3216a0c
TH
1421 DRM_ERROR("Screen buffer object size is too small "
1422 "for requested mode.\n");
1423 return -EINVAL;
1424 }
1425
8bb75aeb
ZR
1426 if (!drm_any_plane_has_format(&dev_priv->drm,
1427 mode_cmd->pixel_format,
1428 mode_cmd->modifier[0])) {
1429 drm_dbg(&dev_priv->drm,
1430 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1431 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1432 return -EINVAL;
c337ada7
JB
1433 }
1434
fb1d9738
JB
1435 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1436 if (!vfbd) {
1437 ret = -ENOMEM;
1438 goto out_err1;
1439 }
1440
668b2066 1441 vfbd->base.base.obj[0] = &bo->tbo.base;
a3f913ca 1442 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
f1d34bfd
TH
1443 vfbd->base.bo = true;
1444 vfbd->buffer = vmw_bo_reference(bo);
fb1d9738
JB
1445 *out = &vfbd->base;
1446
80f0b5af 1447 ret = drm_framebuffer_init(dev, &vfbd->base.base,
f1d34bfd 1448 &vmw_framebuffer_bo_funcs);
80f0b5af 1449 if (ret)
05c95018 1450 goto out_err2;
80f0b5af 1451
fb1d9738
JB
1452 return 0;
1453
fb1d9738 1454out_err2:
f1d34bfd 1455 vmw_bo_unreference(&bo);
fb1d9738
JB
1456 kfree(vfbd);
1457out_err1:
1458 return ret;
1459}
1460
810b3e16
SY
1461
1462/**
1463 * vmw_kms_srf_ok - check if a surface can be created
1464 *
c88c25a6 1465 * @dev_priv: Pointer to device private struct.
810b3e16
SY
1466 * @width: requested width
1467 * @height: requested height
1468 *
1469 * Surfaces need to be less than texture size
1470 */
1471static bool
1472vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1473{
1474 if (width > dev_priv->texture_max_width ||
1475 height > dev_priv->texture_max_height)
1476 return false;
1477
1478 return true;
1479}
1480
fd006a43
TH
1481/**
1482 * vmw_kms_new_framebuffer - Create a new framebuffer.
1483 *
1484 * @dev_priv: Pointer to device private struct.
d6667f0d
ZR
1485 * @uo: Pointer to user object to wrap the kms framebuffer around.
1486 * Either the buffer or surface inside the user object must be NULL.
fd006a43 1487 * @mode_cmd: Frame-buffer metadata.
fb1d9738 1488 */
fd006a43
TH
1489struct vmw_framebuffer *
1490vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
d6667f0d 1491 struct vmw_user_object *uo,
dabdcdc9 1492 const struct drm_mode_fb_cmd2 *mode_cmd)
fb1d9738 1493{
fb1d9738 1494 struct vmw_framebuffer *vfb = NULL;
fb1d9738
JB
1495 int ret;
1496
fd006a43 1497 /* Create the new framebuffer depending one what we have */
d6667f0d
ZR
1498 if (vmw_user_object_surface(uo)) {
1499 ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
1500 mode_cmd);
1501 } else if (uo->buffer) {
1502 ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
f1d34bfd 1503 mode_cmd);
05c95018 1504 } else {
fd006a43 1505 BUG();
05c95018 1506 }
fd006a43
TH
1507
1508 if (ret)
1509 return ERR_PTR(ret);
1510
fd006a43
TH
1511 return vfb;
1512}
1513
fb1d9738
JB
1514/*
1515 * Generic Kernel modesetting functions
1516 */
1517
1518static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1519 struct drm_file *file_priv,
dabdcdc9 1520 const struct drm_mode_fb_cmd2 *mode_cmd)
fb1d9738
JB
1521{
1522 struct vmw_private *dev_priv = vmw_priv(dev);
fb1d9738 1523 struct vmw_framebuffer *vfb = NULL;
d6667f0d 1524 struct vmw_user_object uo = {0};
fb1d9738
JB
1525 int ret;
1526
f1d34bfd 1527 /* returns either a bo or surface */
d6667f0d
ZR
1528 ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
1529 &uo);
c593197b
ZR
1530 if (ret) {
1531 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1532 mode_cmd->handles[0], mode_cmd->handles[0]);
e7ac9211 1533 goto err_out;
c593197b 1534 }
e7ac9211 1535
810b3e16 1536
d6667f0d 1537 if (vmw_user_object_surface(&uo) &&
810b3e16 1538 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
c593197b 1539 DRM_ERROR("Surface size cannot exceed %dx%d\n",
810b3e16
SY
1540 dev_priv->texture_max_width,
1541 dev_priv->texture_max_height);
1542 goto err_out;
1543 }
1544
1545
d6667f0d 1546 vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
fd006a43
TH
1547 if (IS_ERR(vfb)) {
1548 ret = PTR_ERR(vfb);
1549 goto err_out;
da7ffb96 1550 }
e7ac9211
JB
1551
1552err_out:
d6667f0d
ZR
1553 /* vmw_user_object_lookup takes one ref so does new_fb */
1554 vmw_user_object_unref(&uo);
fb1d9738
JB
1555
1556 if (ret) {
1557 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
cce13ff7 1558 return ERR_PTR(ret);
8afa13a0 1559 }
fb1d9738
JB
1560
1561 return &vfb->base;
1562}
1563
0a80eb4c
DR
1564/**
1565 * vmw_kms_check_display_memory - Validates display memory required for a
1566 * topology
1567 * @dev: DRM device
1568 * @num_rects: number of drm_rect in rects
1569 * @rects: array of drm_rect representing the topology to validate indexed by
1570 * crtc index.
1571 *
1572 * Returns:
1573 * 0 on success otherwise negative error code
1574 */
1575static int vmw_kms_check_display_memory(struct drm_device *dev,
1576 uint32_t num_rects,
1577 struct drm_rect *rects)
1578{
1579 struct vmw_private *dev_priv = vmw_priv(dev);
0a80eb4c
DR
1580 struct drm_rect bounding_box = {0};
1581 u64 total_pixels = 0, pixel_mem, bb_mem;
1582 int i;
1583
1584 for (i = 0; i < num_rects; i++) {
1585 /*
0c1b174b
DR
1586 * For STDU only individual screen (screen target) is limited by
1587 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
0a80eb4c 1588 */
0c1b174b
DR
1589 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1590 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1591 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
9bb34e90 1592 VMW_DEBUG_KMS("Screen size not supported.\n");
0a80eb4c
DR
1593 return -EINVAL;
1594 }
1595
1596 /* Bounding box upper left is at (0,0). */
1597 if (rects[i].x2 > bounding_box.x2)
1598 bounding_box.x2 = rects[i].x2;
1599
1600 if (rects[i].y2 > bounding_box.y2)
1601 bounding_box.y2 = rects[i].y2;
1602
1603 total_pixels += (u64) drm_rect_width(&rects[i]) *
1604 (u64) drm_rect_height(&rects[i]);
1605 }
1606
1607 /* Virtual svga device primary limits are always in 32-bpp. */
1608 pixel_mem = total_pixels * 4;
1609
1610 /*
1611 * For HV10 and below prim_bb_mem is vram size. When
1612 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1613 * limit on primary bounding box
1614 */
ebc9ac7c 1615 if (pixel_mem > dev_priv->max_primary_mem) {
9bb34e90 1616 VMW_DEBUG_KMS("Combined output size too large.\n");
0a80eb4c
DR
1617 return -EINVAL;
1618 }
1619
1620 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1621 if (dev_priv->active_display_unit != vmw_du_screen_target ||
1622 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1623 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1624
ebc9ac7c 1625 if (bb_mem > dev_priv->max_primary_mem) {
9bb34e90 1626 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
0a80eb4c
DR
1627 return -EINVAL;
1628 }
1629 }
1630
1631 return 0;
1632}
1633
9d9486e4
TH
1634/**
1635 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1636 * crtc mutex
1637 * @state: The atomic state pointer containing the new atomic state
1638 * @crtc: The crtc
1639 *
1640 * This function returns the new crtc state if it's part of the state update.
1641 * Otherwise returns the current crtc state. It also makes sure that the
1642 * crtc mutex is locked.
1643 *
1644 * Returns: A valid crtc state pointer or NULL. It may also return a
1645 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1646 */
1647static struct drm_crtc_state *
1648vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1649{
1650 struct drm_crtc_state *crtc_state;
1651
1652 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1653 if (crtc_state) {
1654 lockdep_assert_held(&crtc->mutex.mutex.base);
1655 } else {
1656 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1657
1658 if (ret != 0 && ret != -EALREADY)
1659 return ERR_PTR(ret);
1660
1661 crtc_state = crtc->state;
1662 }
1663
1664 return crtc_state;
1665}
1666
1667/**
1668 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1669 * from the same fb after the new state is committed.
1670 * @dev: The drm_device.
1671 * @state: The new state to be checked.
1672 *
1673 * Returns:
1674 * Zero on success,
1675 * -EINVAL on invalid state,
1676 * -EDEADLK if modeset locking needs to be rerun.
1677 */
1678static int vmw_kms_check_implicit(struct drm_device *dev,
1679 struct drm_atomic_state *state)
1680{
1681 struct drm_framebuffer *implicit_fb = NULL;
1682 struct drm_crtc *crtc;
1683 struct drm_crtc_state *crtc_state;
1684 struct drm_plane_state *plane_state;
1685
1686 drm_for_each_crtc(crtc, dev) {
1687 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1688
1689 if (!du->is_implicit)
1690 continue;
1691
1692 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1693 if (IS_ERR(crtc_state))
1694 return PTR_ERR(crtc_state);
1695
1696 if (!crtc_state || !crtc_state->enable)
1697 continue;
1698
1699 /*
1700 * Can't move primary planes across crtcs, so this is OK.
1701 * It also means we don't need to take the plane mutex.
1702 */
1703 plane_state = du->primary.state;
1704 if (plane_state->crtc != crtc)
1705 continue;
1706
1707 if (!implicit_fb)
1708 implicit_fb = plane_state->fb;
1709 else if (implicit_fb != plane_state->fb)
1710 return -EINVAL;
1711 }
1712
1713 return 0;
1714}
1715
0a80eb4c
DR
1716/**
1717 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1718 * @dev: DRM device
1719 * @state: the driver state object
1720 *
1721 * Returns:
1722 * 0 on success otherwise negative error code
1723 */
1724static int vmw_kms_check_topology(struct drm_device *dev,
1725 struct drm_atomic_state *state)
1726{
1727 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1728 struct drm_rect *rects;
1729 struct drm_crtc *crtc;
1730 uint32_t i;
1731 int ret = 0;
1732
1733 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1734 GFP_KERNEL);
1735 if (!rects)
1736 return -ENOMEM;
1737
1738 drm_for_each_crtc(crtc, dev) {
1739 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
9da6e26c 1740 struct drm_crtc_state *crtc_state;
0a80eb4c
DR
1741
1742 i = drm_crtc_index(crtc);
1743
9da6e26c
TH
1744 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1745 if (IS_ERR(crtc_state)) {
1746 ret = PTR_ERR(crtc_state);
1747 goto clean;
1748 }
1749
1750 if (!crtc_state)
1751 continue;
1752
1753 if (crtc_state->enable) {
0a80eb4c
DR
1754 rects[i].x1 = du->gui_x;
1755 rects[i].y1 = du->gui_y;
1756 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1757 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
9da6e26c
TH
1758 } else {
1759 rects[i].x1 = 0;
1760 rects[i].y1 = 0;
1761 rects[i].x2 = 0;
1762 rects[i].y2 = 0;
0a80eb4c
DR
1763 }
1764 }
1765
1766 /* Determine change to topology due to new atomic state */
1767 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1768 new_crtc_state, i) {
1769 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
3e79ecda
DR
1770 struct drm_connector *connector;
1771 struct drm_connector_state *conn_state;
1772 struct vmw_connector_state *vmw_conn_state;
0a80eb4c 1773
479d5902 1774 if (!du->pref_active && new_crtc_state->enable) {
9bb34e90 1775 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
3e79ecda
DR
1776 ret = -EINVAL;
1777 goto clean;
1778 }
0a80eb4c 1779
3e79ecda
DR
1780 /*
1781 * For vmwgfx each crtc has only one connector attached and it
1782 * is not changed so don't really need to check the
1783 * crtc->connector_mask and iterate over it.
1784 */
1785 connector = &du->connector;
1786 conn_state = drm_atomic_get_connector_state(state, connector);
1787 if (IS_ERR(conn_state)) {
1788 ret = PTR_ERR(conn_state);
1789 goto clean;
0a80eb4c 1790 }
3e79ecda
DR
1791
1792 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1793 vmw_conn_state->gui_x = du->gui_x;
1794 vmw_conn_state->gui_y = du->gui_y;
0a80eb4c 1795 }
c46a3064 1796
0a80eb4c
DR
1797 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1798 rects);
c46a3064 1799
0a80eb4c
DR
1800clean:
1801 kfree(rects);
1802 return ret;
1803}
c46a3064
SY
1804
1805/**
1806 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1807 *
1808 * @dev: DRM device
1809 * @state: the driver state object
1810 *
1811 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1812 * us to assign a value to mode->crtc_clock so that
1813 * drm_calc_timestamping_constants() won't throw an error message
1814 *
0a80eb4c 1815 * Returns:
c46a3064
SY
1816 * Zero for success or -errno
1817 */
bdc362f6 1818static int
c46a3064
SY
1819vmw_kms_atomic_check_modeset(struct drm_device *dev,
1820 struct drm_atomic_state *state)
1821{
c46a3064 1822 struct drm_crtc *crtc;
7e14eabc
DR
1823 struct drm_crtc_state *crtc_state;
1824 bool need_modeset = false;
1825 int i, ret;
06168448 1826
b249cb4f 1827 ret = drm_atomic_helper_check(dev, state);
0a80eb4c 1828 if (ret)
b249cb4f 1829 return ret;
c46a3064 1830
9d9486e4 1831 ret = vmw_kms_check_implicit(dev, state);
9bb34e90
DR
1832 if (ret) {
1833 VMW_DEBUG_KMS("Invalid implicit state\n");
9d9486e4 1834 return ret;
9bb34e90 1835 }
9d9486e4 1836
7e14eabc
DR
1837 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1838 if (drm_atomic_crtc_needs_modeset(crtc_state))
1839 need_modeset = true;
c46a3064
SY
1840 }
1841
7e14eabc
DR
1842 if (need_modeset)
1843 return vmw_kms_check_topology(dev, state);
1844
1845 return ret;
c46a3064
SY
1846}
1847
e6ecefaa 1848static const struct drm_mode_config_funcs vmw_kms_funcs = {
fb1d9738 1849 .fb_create = vmw_kms_fb_create,
c46a3064 1850 .atomic_check = vmw_kms_atomic_check_modeset,
904efd9e 1851 .atomic_commit = drm_atomic_helper_commit,
fb1d9738
JB
1852};
1853
b9eb1a61
TH
1854static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1855 struct drm_file *file_priv,
1856 struct vmw_framebuffer *vfb,
1857 struct vmw_surface *surface,
1858 uint32_t sid,
1859 int32_t destX, int32_t destY,
1860 struct drm_vmw_rect *clips,
1861 uint32_t num_clips)
2fcd5a73 1862{
10b1e0ca
TH
1863 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1864 &surface->res, destX, destY,
91e9f352 1865 num_clips, 1, NULL, NULL);
2fcd5a73
JB
1866}
1867
6bf6bf03 1868
2fcd5a73
JB
1869int vmw_kms_present(struct vmw_private *dev_priv,
1870 struct drm_file *file_priv,
1871 struct vmw_framebuffer *vfb,
1872 struct vmw_surface *surface,
1873 uint32_t sid,
1874 int32_t destX, int32_t destY,
1875 struct drm_vmw_rect *clips,
1876 uint32_t num_clips)
1877{
35c05125 1878 int ret;
2fcd5a73 1879
6bf6bf03
TH
1880 switch (dev_priv->active_display_unit) {
1881 case vmw_du_screen_target:
1882 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1883 &surface->res, destX, destY,
91e9f352 1884 num_clips, 1, NULL, NULL);
6bf6bf03
TH
1885 break;
1886 case vmw_du_screen_object:
1887 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1888 sid, destX, destY, clips,
1889 num_clips);
1890 break;
1891 default:
1892 WARN_ONCE(true,
1893 "Present called with invalid display system.\n");
1894 ret = -ENOSYS;
1895 break;
2fcd5a73 1896 }
35c05125
SY
1897 if (ret)
1898 return ret;
2fcd5a73 1899
8426ed9c 1900 vmw_cmd_flush(dev_priv, false);
2fcd5a73 1901
35c05125 1902 return 0;
2fcd5a73
JB
1903}
1904
578e609a
TH
1905static void
1906vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1907{
1908 if (dev_priv->hotplug_mode_update_property)
1909 return;
1910
1911 dev_priv->hotplug_mode_update_property =
9703bb32 1912 drm_property_create_range(&dev_priv->drm,
578e609a
TH
1913 DRM_MODE_PROP_IMMUTABLE,
1914 "hotplug_mode_update", 0, 1);
578e609a
TH
1915}
1916
7b006203
ZR
1917static void
1918vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
1919{
1920 struct vmw_private *vmw = vmw_priv(old_state->dev);
1921 struct drm_crtc *crtc;
1922 struct drm_crtc_state *old_crtc_state;
1923 int i;
1924
1925 drm_atomic_helper_commit_tail(old_state);
1926
1927 if (vmw->vkms_enabled) {
1928 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1929 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1930 (void)old_crtc_state;
1931 flush_work(&du->vkms.crc_generator_work);
1932 }
1933 }
1934}
1935
1936static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
1937 .atomic_commit_tail = vmw_atomic_commit_tail,
1938};
1939
fb1d9738
JB
1940int vmw_kms_init(struct vmw_private *dev_priv)
1941{
9703bb32 1942 struct drm_device *dev = &dev_priv->drm;
fb1d9738 1943 int ret;
2b273544
ZR
1944 static const char *display_unit_names[] = {
1945 "Invalid",
1946 "Legacy",
1947 "Screen Object",
1948 "Screen Target",
1949 "Invalid (max)"
1950 };
fb1d9738
JB
1951
1952 drm_mode_config_init(dev);
1953 dev->mode_config.funcs = &vmw_kms_funcs;
3bef3572
JB
1954 dev->mode_config.min_width = 1;
1955 dev->mode_config.min_height = 1;
65ade7d3
SY
1956 dev->mode_config.max_width = dev_priv->texture_max_width;
1957 dev->mode_config.max_height = dev_priv->texture_max_height;
df42523c 1958 dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
7b006203 1959 dev->mode_config.helper_private = &vmw_mode_config_helpers;
fb1d9738 1960
578e609a
TH
1961 drm_mode_create_suggested_offset_properties(dev);
1962 vmw_kms_create_hotplug_mode_update_property(dev_priv);
1963
35c05125
SY
1964 ret = vmw_kms_stdu_init_display(dev_priv);
1965 if (ret) {
1966 ret = vmw_kms_sou_init_display(dev_priv);
1967 if (ret) /* Fallback */
1968 ret = vmw_kms_ldu_init_display(dev_priv);
1969 }
2b273544
ZR
1970 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
1971 drm_info(&dev_priv->drm, "%s display unit initialized\n",
1972 display_unit_names[dev_priv->active_display_unit]);
fb1d9738 1973
c8261a96 1974 return ret;
fb1d9738
JB
1975}
1976
1977int vmw_kms_close(struct vmw_private *dev_priv)
1978{
5f58e974 1979 int ret = 0;
c8261a96 1980
fb1d9738
JB
1981 /*
1982 * Docs says we should take the lock before calling this function
1983 * but since it destroys encoders and our destructor calls
1984 * drm_encoder_cleanup which takes the lock we deadlock.
1985 */
9703bb32 1986 drm_mode_config_cleanup(&dev_priv->drm);
5f58e974 1987 if (dev_priv->active_display_unit == vmw_du_legacy)
c8261a96
SY
1988 ret = vmw_kms_ldu_close_display(dev_priv);
1989
1990 return ret;
fb1d9738
JB
1991}
1992
1993int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1994 struct drm_file *file_priv)
1995{
1996 struct drm_vmw_cursor_bypass_arg *arg = data;
1997 struct vmw_display_unit *du;
fb1d9738
JB
1998 struct drm_crtc *crtc;
1999 int ret = 0;
2000
fb1d9738
JB
2001 mutex_lock(&dev->mode_config.mutex);
2002 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2003
2004 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2005 du = vmw_crtc_to_du(crtc);
2006 du->hotspot_x = arg->xhot;
2007 du->hotspot_y = arg->yhot;
2008 }
2009
2010 mutex_unlock(&dev->mode_config.mutex);
2011 return 0;
2012 }
2013
418da172 2014 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
a4cd5d68 2015 if (!crtc) {
4ae87ff0 2016 ret = -ENOENT;
fb1d9738
JB
2017 goto out;
2018 }
2019
fb1d9738
JB
2020 du = vmw_crtc_to_du(crtc);
2021
2022 du->hotspot_x = arg->xhot;
2023 du->hotspot_y = arg->yhot;
2024
2025out:
2026 mutex_unlock(&dev->mode_config.mutex);
2027
2028 return ret;
2029}
2030
0bef23f9 2031int vmw_kms_write_svga(struct vmw_private *vmw_priv,
d7e1958d 2032 unsigned width, unsigned height, unsigned pitch,
6558429b 2033 unsigned bpp, unsigned depth)
fb1d9738 2034{
d7e1958d
JB
2035 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2036 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2037 else if (vmw_fifo_have_pitchlock(vmw_priv))
be4f77ac 2038 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
d7e1958d
JB
2039 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2040 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
625f8fb5
ZR
2041 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2042 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
0bef23f9
MD
2043
2044 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2045 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2046 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2047 return -EINVAL;
2048 }
2049
2050 return 0;
d7e1958d 2051}
fb1d9738 2052
dde1de06 2053static
e133e737 2054bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
dde1de06
IF
2055 u64 pitch,
2056 u64 height)
e133e737 2057{
dde1de06 2058 return (pitch * height) < (u64)dev_priv->vram_size;
e133e737
TH
2059}
2060
5e24133e
DR
2061/**
2062 * vmw_du_update_layout - Update the display unit with topology from resolution
2063 * plugin and generate DRM uevent
2064 * @dev_priv: device private
2065 * @num_rects: number of drm_rect in rects
2066 * @rects: toplogy to update
626ab771 2067 */
5e24133e
DR
2068static int vmw_du_update_layout(struct vmw_private *dev_priv,
2069 unsigned int num_rects, struct drm_rect *rects)
626ab771 2070{
9703bb32 2071 struct drm_device *dev = &dev_priv->drm;
626ab771
JB
2072 struct vmw_display_unit *du;
2073 struct drm_connector *con;
b89e5ff9 2074 struct drm_connector_list_iter conn_iter;
9da6e26c
TH
2075 struct drm_modeset_acquire_ctx ctx;
2076 struct drm_crtc *crtc;
2077 int ret;
2078
2079 /* Currently gui_x/y is protected with the crtc mutex */
2080 mutex_lock(&dev->mode_config.mutex);
2081 drm_modeset_acquire_init(&ctx, 0);
2082retry:
2083 drm_for_each_crtc(crtc, dev) {
2084 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2085 if (ret < 0) {
2086 if (ret == -EDEADLK) {
2087 drm_modeset_backoff(&ctx);
2088 goto retry;
da7ffb96 2089 }
9da6e26c
TH
2090 goto out_fini;
2091 }
2092 }
626ab771 2093
b89e5ff9
DR
2094 drm_connector_list_iter_begin(dev, &conn_iter);
2095 drm_for_each_connector_iter(con, &conn_iter) {
626ab771 2096 du = vmw_connector_to_du(con);
5e24133e
DR
2097 if (num_rects > du->unit) {
2098 du->pref_width = drm_rect_width(&rects[du->unit]);
2099 du->pref_height = drm_rect_height(&rects[du->unit]);
626ab771 2100 du->pref_active = true;
5e24133e
DR
2101 du->gui_x = rects[du->unit].x1;
2102 du->gui_y = rects[du->unit].y1;
b89e5ff9 2103 } else {
df42523c
ZR
2104 du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
2105 du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
b89e5ff9
DR
2106 du->pref_active = false;
2107 du->gui_x = 0;
2108 du->gui_y = 0;
2109 }
6ea77d13 2110 }
b89e5ff9 2111 drm_connector_list_iter_end(&conn_iter);
626ab771
JB
2112
2113 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2114 du = vmw_connector_to_du(con);
b89e5ff9 2115 if (num_rects > du->unit) {
578e609a
TH
2116 drm_object_property_set_value
2117 (&con->base, dev->mode_config.suggested_x_property,
2118 du->gui_x);
2119 drm_object_property_set_value
2120 (&con->base, dev->mode_config.suggested_y_property,
2121 du->gui_y);
626ab771 2122 } else {
578e609a
TH
2123 drm_object_property_set_value
2124 (&con->base, dev->mode_config.suggested_x_property,
2125 0);
2126 drm_object_property_set_value
2127 (&con->base, dev->mode_config.suggested_y_property,
2128 0);
626ab771
JB
2129 }
2130 con->status = vmw_du_connector_detect(con, true);
2131 }
9da6e26c
TH
2132out_fini:
2133 drm_modeset_drop_locks(&ctx);
2134 drm_modeset_acquire_fini(&ctx);
2135 mutex_unlock(&dev->mode_config.mutex);
97216fef 2136
df42523c
ZR
2137 drm_sysfs_hotplug_event(dev);
2138
626ab771
JB
2139 return 0;
2140}
2141
7ea77283
ML
2142int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2143 u16 *r, u16 *g, u16 *b,
6d124ff8
DV
2144 uint32_t size,
2145 struct drm_modeset_acquire_ctx *ctx)
626ab771
JB
2146{
2147 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2148 int i;
2149
2150 for (i = 0; i < size; i++) {
2151 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2152 r[i], g[i], b[i]);
2153 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2154 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2155 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2156 }
7ea77283
ML
2157
2158 return 0;
626ab771
JB
2159}
2160
9a69a9ac 2161int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
626ab771 2162{
9a69a9ac 2163 return 0;
626ab771
JB
2164}
2165
626ab771
JB
2166enum drm_connector_status
2167vmw_du_connector_detect(struct drm_connector *connector, bool force)
2168{
2169 uint32_t num_displays;
2170 struct drm_device *dev = connector->dev;
2171 struct vmw_private *dev_priv = vmw_priv(dev);
cd2b89e7 2172 struct vmw_display_unit *du = vmw_connector_to_du(connector);
626ab771 2173
626ab771 2174 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
626ab771 2175
cd2b89e7
TH
2176 return ((vmw_connector_to_du(connector)->unit < num_displays &&
2177 du->pref_active) ?
626ab771
JB
2178 connector_status_connected : connector_status_disconnected);
2179}
2180
1543b4dd
TH
2181/**
2182 * vmw_guess_mode_timing - Provide fake timings for a
2183 * 60Hz vrefresh mode.
2184 *
c88c25a6 2185 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
1543b4dd
TH
2186 * members filled in.
2187 */
a278724a 2188void vmw_guess_mode_timing(struct drm_display_mode *mode)
1543b4dd
TH
2189{
2190 mode->hsync_start = mode->hdisplay + 50;
2191 mode->hsync_end = mode->hsync_start + 50;
2192 mode->htotal = mode->hsync_end + 50;
2193
2194 mode->vsync_start = mode->vdisplay + 50;
2195 mode->vsync_end = mode->vsync_start + 50;
2196 mode->vtotal = mode->vsync_end + 50;
2197
2198 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
1543b4dd
TH
2199}
2200
2201
5e24133e
DR
2202/**
2203 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2204 * @dev: drm device for the ioctl
2205 * @data: data pointer for the ioctl
2206 * @file_priv: drm file for the ioctl call
2207 *
2208 * Update preferred topology of display unit as per ioctl request. The topology
2209 * is expressed as array of drm_vmw_rect.
2210 * e.g.
2211 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2212 *
2213 * NOTE:
2214 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2215 * device limit on topology, x + w and y + h (lower right) cannot be greater
2216 * than INT_MAX. So topology beyond these limits will return with error.
2217 *
2218 * Returns:
2219 * Zero on success, negative errno on failure.
2220 */
cd2b89e7
TH
2221int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2222 struct drm_file *file_priv)
2223{
2224 struct vmw_private *dev_priv = vmw_priv(dev);
0c1b174b 2225 struct drm_mode_config *mode_config = &dev->mode_config;
cd2b89e7
TH
2226 struct drm_vmw_update_layout_arg *arg =
2227 (struct drm_vmw_update_layout_arg *)data;
cd2b89e7
TH
2228 void __user *user_rects;
2229 struct drm_vmw_rect *rects;
5e24133e 2230 struct drm_rect *drm_rects;
cd2b89e7 2231 unsigned rects_size;
5e24133e 2232 int ret, i;
cd2b89e7 2233
cd2b89e7 2234 if (!arg->num_outputs) {
df42523c
ZR
2235 struct drm_rect def_rect = {0, 0,
2236 VMWGFX_MIN_INITIAL_WIDTH,
2237 VMWGFX_MIN_INITIAL_HEIGHT};
cd2b89e7 2238 vmw_du_update_layout(dev_priv, 1, &def_rect);
5151adb3 2239 return 0;
cd2b89e7
TH
2240 }
2241
2242 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
bab9efc2
XW
2243 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2244 GFP_KERNEL);
5151adb3
TH
2245 if (unlikely(!rects))
2246 return -ENOMEM;
cd2b89e7
TH
2247
2248 user_rects = (void __user *)(unsigned long)arg->rects;
2249 ret = copy_from_user(rects, user_rects, rects_size);
2250 if (unlikely(ret != 0)) {
2251 DRM_ERROR("Failed to get rects.\n");
2252 ret = -EFAULT;
2253 goto out_free;
2254 }
2255
5e24133e 2256 drm_rects = (struct drm_rect *)rects;
65ade7d3 2257
745adc3f 2258 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
5e24133e
DR
2259 for (i = 0; i < arg->num_outputs; i++) {
2260 struct drm_vmw_rect curr_rect;
cd2b89e7 2261
5e24133e
DR
2262 /* Verify user-space for overflow as kernel use drm_rect */
2263 if ((rects[i].x + rects[i].w > INT_MAX) ||
2264 (rects[i].y + rects[i].h > INT_MAX)) {
2265 ret = -ERANGE;
35c05125
SY
2266 goto out_free;
2267 }
2268
5e24133e
DR
2269 curr_rect = rects[i];
2270 drm_rects[i].x1 = curr_rect.x;
2271 drm_rects[i].y1 = curr_rect.y;
2272 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2273 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
0c1b174b 2274
745adc3f
DR
2275 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
2276 drm_rects[i].x1, drm_rects[i].y1,
2277 drm_rects[i].x2, drm_rects[i].y2);
2278
0c1b174b
DR
2279 /*
2280 * Currently this check is limiting the topology within
2281 * mode_config->max (which actually is max texture size
2282 * supported by virtual device). This limit is here to address
2283 * window managers that create a big framebuffer for whole
2284 * topology.
2285 */
2286 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2287 drm_rects[i].x2 > mode_config->max_width ||
2288 drm_rects[i].y2 > mode_config->max_height) {
9bb34e90
DR
2289 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2290 drm_rects[i].x1, drm_rects[i].y1,
2291 drm_rects[i].x2, drm_rects[i].y2);
0c1b174b
DR
2292 ret = -EINVAL;
2293 goto out_free;
2294 }
cd2b89e7
TH
2295 }
2296
5e24133e
DR
2297 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2298
2299 if (ret == 0)
2300 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
cd2b89e7
TH
2301
2302out_free:
2303 kfree(rects);
cd2b89e7
TH
2304 return ret;
2305}
1a4b172a
TH
2306
2307/**
2308 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2309 * on a set of cliprects and a set of display units.
2310 *
2311 * @dev_priv: Pointer to a device private structure.
2312 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2313 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2314 * Cliprects are given in framebuffer coordinates.
2315 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2316 * be NULL. Cliprects are given in source coordinates.
2317 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2318 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2319 * @num_clips: Number of cliprects in the @clips or @vclips array.
2320 * @increment: Integer with which to increment the clip counter when looping.
2321 * Used to skip a predetermined number of clip rects.
2322 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2323 */
2324int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2325 struct vmw_framebuffer *framebuffer,
2326 const struct drm_clip_rect *clips,
2327 const struct drm_vmw_rect *vclips,
2328 s32 dest_x, s32 dest_y,
2329 int num_clips,
2330 int increment,
2331 struct vmw_kms_dirty *dirty)
2332{
2333 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2334 struct drm_crtc *crtc;
2335 u32 num_units = 0;
2336 u32 i, k;
1a4b172a
TH
2337
2338 dirty->dev_priv = dev_priv;
2339
91e9f352
DR
2340 /* If crtc is passed, no need to iterate over other display units */
2341 if (dirty->crtc) {
2342 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2343 } else {
9703bb32 2344 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
91e9f352 2345 head) {
464ce098
VS
2346 struct drm_plane *plane = crtc->primary;
2347
2348 if (plane->state->fb == &framebuffer->base)
2349 units[num_units++] = vmw_crtc_to_du(crtc);
91e9f352 2350 }
1a4b172a
TH
2351 }
2352
2353 for (k = 0; k < num_units; k++) {
2354 struct vmw_display_unit *unit = units[k];
2355 s32 crtc_x = unit->crtc.x;
2356 s32 crtc_y = unit->crtc.y;
2357 s32 crtc_width = unit->crtc.mode.hdisplay;
2358 s32 crtc_height = unit->crtc.mode.vdisplay;
2359 const struct drm_clip_rect *clips_ptr = clips;
2360 const struct drm_vmw_rect *vclips_ptr = vclips;
2361
2362 dirty->unit = unit;
2363 if (dirty->fifo_reserve_size > 0) {
8426ed9c 2364 dirty->cmd = VMW_CMD_RESERVE(dev_priv,
1a4b172a 2365 dirty->fifo_reserve_size);
11c45419 2366 if (!dirty->cmd)
f3b8c0ca 2367 return -ENOMEM;
11c45419 2368
1a4b172a
TH
2369 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2370 }
2371 dirty->num_hits = 0;
2372 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2373 vclips_ptr += increment) {
2374 s32 clip_left;
2375 s32 clip_top;
2376
2377 /*
2378 * Select clip array type. Note that integer type
2379 * in @clips is unsigned short, whereas in @vclips
2380 * it's 32-bit.
2381 */
2382 if (clips) {
2383 dirty->fb_x = (s32) clips_ptr->x1;
2384 dirty->fb_y = (s32) clips_ptr->y1;
2385 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2386 crtc_x;
2387 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2388 crtc_y;
2389 } else {
2390 dirty->fb_x = vclips_ptr->x;
2391 dirty->fb_y = vclips_ptr->y;
2392 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2393 dest_x - crtc_x;
2394 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2395 dest_y - crtc_y;
2396 }
2397
2398 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2399 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2400
2401 /* Skip this clip if it's outside the crtc region */
2402 if (dirty->unit_x1 >= crtc_width ||
2403 dirty->unit_y1 >= crtc_height ||
2404 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2405 continue;
2406
2407 /* Clip right and bottom to crtc limits */
2408 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2409 crtc_width);
2410 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2411 crtc_height);
2412
2413 /* Clip left and top to crtc limits */
2414 clip_left = min_t(s32, dirty->unit_x1, 0);
2415 clip_top = min_t(s32, dirty->unit_y1, 0);
2416 dirty->unit_x1 -= clip_left;
2417 dirty->unit_y1 -= clip_top;
2418 dirty->fb_x -= clip_left;
2419 dirty->fb_y -= clip_top;
2420
2421 dirty->clip(dirty);
2422 }
2423
2424 dirty->fifo_commit(dirty);
2425 }
2426
2427 return 0;
2428}
2429
2430/**
2724b2d5
TH
2431 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2432 * cleanup and fencing
2433 * @dev_priv: Pointer to the device-private struct
2434 * @file_priv: Pointer identifying the client when user-space fencing is used
2435 * @ctx: Pointer to the validation context
2436 * @out_fence: If non-NULL, returned refcounted fence-pointer
2437 * @user_fence_rep: If non-NULL, pointer to user-space address area
2438 * in which to copy user-space fence info
1a4b172a 2439 */
2724b2d5
TH
2440void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2441 struct drm_file *file_priv,
2442 struct vmw_validation_context *ctx,
2443 struct vmw_fence_obj **out_fence,
2444 struct drm_vmw_fence_rep __user *
2445 user_fence_rep)
2446{
2447 struct vmw_fence_obj *fence = NULL;
51fdbeb4
TH
2448 uint32_t handle = 0;
2449 int ret = 0;
1a4b172a 2450
2724b2d5
TH
2451 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2452 out_fence)
2453 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2454 file_priv ? &handle : NULL);
2455 vmw_validation_done(ctx, fence);
1a4b172a
TH
2456 if (file_priv)
2457 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2458 ret, user_fence_rep, fence,
a0f90c88 2459 handle, -1);
1a4b172a
TH
2460 if (out_fence)
2461 *out_fence = fence;
2462 else
2463 vmw_fence_obj_unreference(&fence);
1a4b172a 2464}
6bf6bf03 2465
76404ac0 2466/**
2cd80dbd 2467 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
76404ac0
TH
2468 * property.
2469 *
2470 * @dev_priv: Pointer to a device private struct.
76404ac0
TH
2471 *
2472 * Sets up the implicit placement property unless it's already set up.
2473 */
2474void
9d9486e4 2475vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
76404ac0
TH
2476{
2477 if (dev_priv->implicit_placement_property)
2478 return;
2479
2480 dev_priv->implicit_placement_property =
9703bb32 2481 drm_property_create_range(&dev_priv->drm,
9d9486e4 2482 DRM_MODE_PROP_IMMUTABLE,
76404ac0 2483 "implicit_placement", 0, 1);
76404ac0 2484}
904bb5e5 2485
c3b9b165
TH
2486/**
2487 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2488 *
2489 * @dev: Pointer to the drm device
2490 * Return: 0 on success. Negative error code on failure.
2491 */
2492int vmw_kms_suspend(struct drm_device *dev)
2493{
2494 struct vmw_private *dev_priv = vmw_priv(dev);
2495
2496 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2497 if (IS_ERR(dev_priv->suspend_state)) {
2498 int ret = PTR_ERR(dev_priv->suspend_state);
2499
2500 DRM_ERROR("Failed kms suspend: %d\n", ret);
2501 dev_priv->suspend_state = NULL;
2502
2503 return ret;
2504 }
2505
2506 return 0;
2507}
2508
2509
2510/**
2511 * vmw_kms_resume - Re-enable modesetting and restore state
2512 *
2513 * @dev: Pointer to the drm device
2514 * Return: 0 on success. Negative error code on failure.
2515 *
2516 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2517 * to call this function without a previous vmw_kms_suspend().
2518 */
2519int vmw_kms_resume(struct drm_device *dev)
2520{
2521 struct vmw_private *dev_priv = vmw_priv(dev);
2522 int ret;
2523
2524 if (WARN_ON(!dev_priv->suspend_state))
2525 return 0;
2526
2527 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2528 dev_priv->suspend_state = NULL;
2529
2530 return ret;
2531}
2b4f44ee 2532
140bcaa2
TH
2533/**
2534 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2535 *
2536 * @dev: Pointer to the drm device
2537 */
2538void vmw_kms_lost_device(struct drm_device *dev)
2539{
2540 drm_atomic_helper_shutdown(dev);
2541}
e41774c0
DR
2542
2543/**
2544 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2545 * @update: The closure structure.
2546 *
2547 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2548 * update on display unit.
2549 *
2550 * Return: 0 on success or a negative error code on failure.
2551 */
2552int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2553{
2554 struct drm_plane_state *state = update->plane->state;
2555 struct drm_plane_state *old_state = update->old_state;
2556 struct drm_atomic_helper_damage_iter iter;
2557 struct drm_rect clip;
2558 struct drm_rect bb;
2559 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2560 uint32_t reserved_size = 0;
2561 uint32_t submit_size = 0;
2562 uint32_t curr_size = 0;
2563 uint32_t num_hits = 0;
2564 void *cmd_start;
2565 char *cmd_next;
2566 int ret;
2567
2568 /*
2569 * Iterate in advance to check if really need plane update and find the
2570 * number of clips that actually are in plane src for fifo allocation.
2571 */
2572 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2573 drm_atomic_for_each_plane_damage(&iter, &clip)
2574 num_hits++;
2575
2576 if (num_hits == 0)
2577 return 0;
2578
2579 if (update->vfb->bo) {
2580 struct vmw_framebuffer_bo *vfbbo =
2581 container_of(update->vfb, typeof(*vfbbo), base);
2582
39985eea
ZR
2583 /*
2584 * For screen targets we want a mappable bo, for everything else we want
2585 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2586 * is not screen target then mob's shouldn't be available.
2587 */
2588 if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2589 vmw_bo_placement_set(vfbbo->buffer,
2590 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2591 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2592 } else {
2593 WARN_ON(update->dev_priv->has_mob);
2594 vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2595 }
2596 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
e41774c0
DR
2597 } else {
2598 struct vmw_framebuffer_surface *vfbs =
2599 container_of(update->vfb, typeof(*vfbs), base);
d6667f0d 2600 struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
e41774c0 2601
d6667f0d 2602 ret = vmw_validation_add_resource(&val_ctx, &surf->res,
a9f58c45
TH
2603 0, VMW_RES_DIRTY_NONE, NULL,
2604 NULL);
e41774c0
DR
2605 }
2606
2607 if (ret)
2608 return ret;
2609
2610 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2611 if (ret)
2612 goto out_unref;
2613
2614 reserved_size = update->calc_fifo_size(update, num_hits);
8426ed9c 2615 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
e41774c0
DR
2616 if (!cmd_start) {
2617 ret = -ENOMEM;
2618 goto out_revert;
2619 }
2620
2621 cmd_next = cmd_start;
2622
2623 if (update->post_prepare) {
2624 curr_size = update->post_prepare(update, cmd_next);
2625 cmd_next += curr_size;
2626 submit_size += curr_size;
2627 }
2628
2629 if (update->pre_clip) {
2630 curr_size = update->pre_clip(update, cmd_next, num_hits);
2631 cmd_next += curr_size;
2632 submit_size += curr_size;
2633 }
2634
2635 bb.x1 = INT_MAX;
2636 bb.y1 = INT_MAX;
2637 bb.x2 = INT_MIN;
2638 bb.y2 = INT_MIN;
2639
2640 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2641 drm_atomic_for_each_plane_damage(&iter, &clip) {
2642 uint32_t fb_x = clip.x1;
2643 uint32_t fb_y = clip.y1;
2644
2645 vmw_du_translate_to_crtc(state, &clip);
2646 if (update->clip) {
2647 curr_size = update->clip(update, cmd_next, &clip, fb_x,
2648 fb_y);
2649 cmd_next += curr_size;
2650 submit_size += curr_size;
2651 }
2652 bb.x1 = min_t(int, bb.x1, clip.x1);
2653 bb.y1 = min_t(int, bb.y1, clip.y1);
2654 bb.x2 = max_t(int, bb.x2, clip.x2);
2655 bb.y2 = max_t(int, bb.y2, clip.y2);
2656 }
2657
2658 curr_size = update->post_clip(update, cmd_next, &bb);
2659 submit_size += curr_size;
2660
2661 if (reserved_size < submit_size)
2662 submit_size = 0;
2663
8426ed9c 2664 vmw_cmd_commit(update->dev_priv, submit_size);
e41774c0
DR
2665
2666 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2667 update->out_fence, NULL);
2668 return ret;
2669
2670out_revert:
2671 vmw_validation_revert(&val_ctx);
2672
2673out_unref:
2674 vmw_validation_unref_lists(&val_ctx);
2675 return ret;
2676}
935f7950
MK
2677
2678/**
2679 * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
2680 *
2681 * @connector: the drm connector, part of a DU container
2682 * @mode: drm mode to check
2683 *
2684 * Returns MODE_OK on success, or a drm_mode_status error code.
2685 */
2686enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
2687 struct drm_display_mode *mode)
2688{
dde1de06 2689 enum drm_mode_status ret;
935f7950
MK
2690 struct drm_device *dev = connector->dev;
2691 struct vmw_private *dev_priv = vmw_priv(dev);
935f7950
MK
2692 u32 assumed_cpp = 4;
2693
2694 if (dev_priv->assume_16bpp)
2695 assumed_cpp = 2;
2696
dde1de06
IF
2697 ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
2698 dev_priv->texture_max_height);
2699 if (ret != MODE_OK)
2700 return ret;
935f7950
MK
2701
2702 if (!vmw_kms_validate_mode_vram(dev_priv,
2703 mode->hdisplay * assumed_cpp,
2704 mode->vdisplay))
2705 return MODE_MEM;
2706
2707 return MODE_OK;
2708}
2709
2710/**
2711 * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
2712 *
2713 * @connector: the drm connector, part of a DU container
2714 *
2715 * Returns the number of added modes.
2716 */
2717int vmw_connector_get_modes(struct drm_connector *connector)
2718{
2719 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2720 struct drm_device *dev = connector->dev;
2721 struct vmw_private *dev_priv = vmw_priv(dev);
2722 struct drm_display_mode *mode = NULL;
2723 struct drm_display_mode prefmode = { DRM_MODE("preferred",
2724 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2725 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2726 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2727 };
2728 u32 max_width;
2729 u32 max_height;
2730 u32 num_modes;
2731
2732 /* Add preferred mode */
2733 mode = drm_mode_duplicate(dev, &prefmode);
2734 if (!mode)
2735 return 0;
2736
2737 mode->hdisplay = du->pref_width;
2738 mode->vdisplay = du->pref_height;
2739 vmw_guess_mode_timing(mode);
2740 drm_mode_set_name(mode);
2741
2742 drm_mode_probed_add(connector, mode);
2743 drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2744
2745 /* Probe connector for all modes not exceeding our geom limits */
2746 max_width = dev_priv->texture_max_width;
2747 max_height = dev_priv->texture_max_height;
2748
2749 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2750 max_width = min(dev_priv->stdu_max_width, max_width);
2751 max_height = min(dev_priv->stdu_max_height, max_height);
2752 }
2753
2754 num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
2755
2756 return num_modes;
2757}
d6667f0d
ZR
2758
2759struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
2760{
2761 if (uo->buffer)
2762 vmw_user_bo_ref(uo->buffer);
2763 else if (uo->surface)
2764 vmw_surface_reference(uo->surface);
2765 return uo;
2766}
2767
2768void vmw_user_object_unref(struct vmw_user_object *uo)
2769{
2770 if (uo->buffer)
2771 vmw_user_bo_unref(&uo->buffer);
2772 else if (uo->surface)
2773 vmw_surface_unreference(&uo->surface);
2774}
2775
2776struct vmw_bo *
2777vmw_user_object_buffer(struct vmw_user_object *uo)
2778{
2779 if (uo->buffer)
2780 return uo->buffer;
2781 else if (uo->surface)
2782 return uo->surface->res.guest_memory_bo;
2783 return NULL;
2784}
2785
2786struct vmw_surface *
2787vmw_user_object_surface(struct vmw_user_object *uo)
2788{
2789 if (uo->buffer)
2790 return uo->buffer->dumb_surface;
2791 return uo->surface;
2792}
2793
2794void *vmw_user_object_map(struct vmw_user_object *uo)
2795{
2796 struct vmw_bo *bo = vmw_user_object_buffer(uo);
2797
2798 WARN_ON(!bo);
2799 return vmw_bo_map_and_cache(bo);
2800}
2801
2802void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
2803{
2804 struct vmw_bo *bo = vmw_user_object_buffer(uo);
2805
2806 WARN_ON(!bo);
2807 return vmw_bo_map_and_cache_size(bo, size);
2808}
2809
2810void vmw_user_object_unmap(struct vmw_user_object *uo)
2811{
2812 struct vmw_bo *bo = vmw_user_object_buffer(uo);
2813 int ret;
2814
2815 WARN_ON(!bo);
2816
2817 /* Fence the mob creation so we are guarateed to have the mob */
2818 ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
2819 if (ret != 0)
2820 return;
2821
2822 vmw_bo_unmap(bo);
2823 vmw_bo_pin_reserved(bo, false);
2824
2825 ttm_bo_unreserve(&bo->tbo);
2826}
2827
2828bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
2829{
2830 struct vmw_bo *bo;
2831
2832 if (!uo || vmw_user_object_is_null(uo))
2833 return false;
2834
2835 bo = vmw_user_object_buffer(uo);
2836
2837 if (WARN_ON(!bo))
2838 return false;
2839
2840 WARN_ON(bo->map.bo && !bo->map.virtual);
2841 return bo->map.virtual;
2842}
2843
2844bool vmw_user_object_is_null(struct vmw_user_object *uo)
2845{
2846 return !uo->buffer && !uo->surface;
2847}