file, i915: fix file reference for mmap_singleton()
[linux-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_kms.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include "vmwgfx_kms.h"
28
29 #include "vmwgfx_bo.h"
30 #include "vmw_surface_cache.h"
31
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_damage_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_rect.h>
37 #include <drm/drm_sysfs.h>
38
39 void vmw_du_cleanup(struct vmw_display_unit *du)
40 {
41         struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
42         drm_plane_cleanup(&du->primary);
43         if (vmw_cmd_supported(dev_priv))
44                 drm_plane_cleanup(&du->cursor.base);
45
46         drm_connector_unregister(&du->connector);
47         drm_crtc_cleanup(&du->crtc);
48         drm_encoder_cleanup(&du->encoder);
49         drm_connector_cleanup(&du->connector);
50 }
51
52 /*
53  * Display Unit Cursor functions
54  */
55
56 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
57 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
58                                   struct vmw_plane_state *vps,
59                                   u32 *image, u32 width, u32 height,
60                                   u32 hotspotX, u32 hotspotY);
61
62 struct vmw_svga_fifo_cmd_define_cursor {
63         u32 cmd;
64         SVGAFifoCmdDefineAlphaCursor cursor;
65 };
66
67 /**
68  * vmw_send_define_cursor_cmd - queue a define cursor command
69  * @dev_priv: the private driver struct
70  * @image: buffer which holds the cursor image
71  * @width: width of the mouse cursor image
72  * @height: height of the mouse cursor image
73  * @hotspotX: the horizontal position of mouse hotspot
74  * @hotspotY: the vertical position of mouse hotspot
75  */
76 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
77                                        u32 *image, u32 width, u32 height,
78                                        u32 hotspotX, u32 hotspotY)
79 {
80         struct vmw_svga_fifo_cmd_define_cursor *cmd;
81         const u32 image_size = width * height * sizeof(*image);
82         const u32 cmd_size = sizeof(*cmd) + image_size;
83
84         /* Try to reserve fifocmd space and swallow any failures;
85            such reservations cannot be left unconsumed for long
86            under the risk of clogging other fifocmd users, so
87            we treat reservations separtely from the way we treat
88            other fallible KMS-atomic resources at prepare_fb */
89         cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
90
91         if (unlikely(!cmd))
92                 return;
93
94         memset(cmd, 0, sizeof(*cmd));
95
96         memcpy(&cmd[1], image, image_size);
97
98         cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
99         cmd->cursor.id = 0;
100         cmd->cursor.width = width;
101         cmd->cursor.height = height;
102         cmd->cursor.hotspotX = hotspotX;
103         cmd->cursor.hotspotY = hotspotY;
104
105         vmw_cmd_commit_flush(dev_priv, cmd_size);
106 }
107
108 /**
109  * vmw_cursor_update_image - update the cursor image on the provided plane
110  * @dev_priv: the private driver struct
111  * @vps: the plane state of the cursor plane
112  * @image: buffer which holds the cursor image
113  * @width: width of the mouse cursor image
114  * @height: height of the mouse cursor image
115  * @hotspotX: the horizontal position of mouse hotspot
116  * @hotspotY: the vertical position of mouse hotspot
117  */
118 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
119                                     struct vmw_plane_state *vps,
120                                     u32 *image, u32 width, u32 height,
121                                     u32 hotspotX, u32 hotspotY)
122 {
123         if (vps->cursor.bo)
124                 vmw_cursor_update_mob(dev_priv, vps, image,
125                                       vps->base.crtc_w, vps->base.crtc_h,
126                                       hotspotX, hotspotY);
127
128         else
129                 vmw_send_define_cursor_cmd(dev_priv, image, width, height,
130                                            hotspotX, hotspotY);
131 }
132
133
134 /**
135  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
136  *
137  * Called from inside vmw_du_cursor_plane_atomic_update to actually
138  * make the cursor-image live.
139  *
140  * @dev_priv: device to work with
141  * @vps: the plane state of the cursor plane
142  * @image: cursor source data to fill the MOB with
143  * @width: source data width
144  * @height: source data height
145  * @hotspotX: cursor hotspot x
146  * @hotspotY: cursor hotspot Y
147  */
148 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
149                                   struct vmw_plane_state *vps,
150                                   u32 *image, u32 width, u32 height,
151                                   u32 hotspotX, u32 hotspotY)
152 {
153         SVGAGBCursorHeader *header;
154         SVGAGBAlphaCursorHeader *alpha_header;
155         const u32 image_size = width * height * sizeof(*image);
156
157         header = vmw_bo_map_and_cache(vps->cursor.bo);
158         alpha_header = &header->header.alphaHeader;
159
160         memset(header, 0, sizeof(*header));
161
162         header->type = SVGA_ALPHA_CURSOR;
163         header->sizeInBytes = image_size;
164
165         alpha_header->hotspotX = hotspotX;
166         alpha_header->hotspotY = hotspotY;
167         alpha_header->width = width;
168         alpha_header->height = height;
169
170         memcpy(header + 1, image, image_size);
171         vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
172                   vps->cursor.bo->tbo.resource->start);
173 }
174
175
176 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
177 {
178         return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
179 }
180
181 /**
182  * vmw_du_cursor_plane_acquire_image -- Acquire the image data
183  * @vps: cursor plane state
184  */
185 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
186 {
187         bool is_iomem;
188         if (vps->surf) {
189                 if (vps->surf_mapped)
190                         return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
191                 return vps->surf->snooper.image;
192         } else if (vps->bo)
193                 return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
194         return NULL;
195 }
196
197 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
198                                             struct vmw_plane_state *new_vps)
199 {
200         void *old_image;
201         void *new_image;
202         u32 size;
203         bool changed;
204
205         if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
206             old_vps->base.crtc_h != new_vps->base.crtc_h)
207             return true;
208
209         if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
210             old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
211             return true;
212
213         size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
214
215         old_image = vmw_du_cursor_plane_acquire_image(old_vps);
216         new_image = vmw_du_cursor_plane_acquire_image(new_vps);
217
218         changed = false;
219         if (old_image && new_image)
220                 changed = memcmp(old_image, new_image, size) != 0;
221
222         return changed;
223 }
224
225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
226 {
227         if (!(*vbo))
228                 return;
229
230         ttm_bo_unpin(&(*vbo)->tbo);
231         vmw_bo_unreference(vbo);
232 }
233
234 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
235                                   struct vmw_plane_state *vps)
236 {
237         u32 i;
238
239         if (!vps->cursor.bo)
240                 return;
241
242         vmw_du_cursor_plane_unmap_cm(vps);
243
244         /* Look for a free slot to return this mob to the cache. */
245         for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
246                 if (!vcp->cursor_mobs[i]) {
247                         vcp->cursor_mobs[i] = vps->cursor.bo;
248                         vps->cursor.bo = NULL;
249                         return;
250                 }
251         }
252
253         /* Cache is full: See if this mob is bigger than an existing mob. */
254         for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
255                 if (vcp->cursor_mobs[i]->tbo.base.size <
256                     vps->cursor.bo->tbo.base.size) {
257                         vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
258                         vcp->cursor_mobs[i] = vps->cursor.bo;
259                         vps->cursor.bo = NULL;
260                         return;
261                 }
262         }
263
264         /* Destroy it if it's not worth caching. */
265         vmw_du_destroy_cursor_mob(&vps->cursor.bo);
266 }
267
268 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
269                                  struct vmw_plane_state *vps)
270 {
271         struct vmw_private *dev_priv = vcp->base.dev->dev_private;
272         u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
273         u32 i;
274         u32 cursor_max_dim, mob_max_size;
275         int ret;
276
277         if (!dev_priv->has_mob ||
278             (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
279                 return -EINVAL;
280
281         mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
282         cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
283
284         if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
285             vps->base.crtc_h > cursor_max_dim)
286                 return -EINVAL;
287
288         if (vps->cursor.bo) {
289                 if (vps->cursor.bo->tbo.base.size >= size)
290                         return 0;
291                 vmw_du_put_cursor_mob(vcp, vps);
292         }
293
294         /* Look for an unused mob in the cache. */
295         for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
296                 if (vcp->cursor_mobs[i] &&
297                     vcp->cursor_mobs[i]->tbo.base.size >= size) {
298                         vps->cursor.bo = vcp->cursor_mobs[i];
299                         vcp->cursor_mobs[i] = NULL;
300                         return 0;
301                 }
302         }
303         /* Create a new mob if we can't find an existing one. */
304         ret = vmw_bo_create_and_populate(dev_priv, size,
305                                          VMW_BO_DOMAIN_MOB,
306                                          &vps->cursor.bo);
307
308         if (ret != 0)
309                 return ret;
310
311         /* Fence the mob creation so we are guarateed to have the mob */
312         ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
313         if (ret != 0)
314                 goto teardown;
315
316         vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
317         ttm_bo_unreserve(&vps->cursor.bo->tbo);
318         return 0;
319
320 teardown:
321         vmw_du_destroy_cursor_mob(&vps->cursor.bo);
322         return ret;
323 }
324
325
326 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
327                                        bool show, int x, int y)
328 {
329         const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
330                                              : SVGA_CURSOR_ON_HIDE;
331         uint32_t count;
332
333         spin_lock(&dev_priv->cursor_lock);
334         if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
335                 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
336                 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
337                 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
338                 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
339                 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
340         } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
341                 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
342                 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
343                 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
344                 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
345                 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
346         } else {
347                 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
348                 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
349                 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
350         }
351         spin_unlock(&dev_priv->cursor_lock);
352 }
353
354 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
355                           struct ttm_object_file *tfile,
356                           struct ttm_buffer_object *bo,
357                           SVGA3dCmdHeader *header)
358 {
359         struct ttm_bo_kmap_obj map;
360         unsigned long kmap_offset;
361         unsigned long kmap_num;
362         SVGA3dCopyBox *box;
363         unsigned box_count;
364         void *virtual;
365         bool is_iomem;
366         struct vmw_dma_cmd {
367                 SVGA3dCmdHeader header;
368                 SVGA3dCmdSurfaceDMA dma;
369         } *cmd;
370         int i, ret;
371         const struct SVGA3dSurfaceDesc *desc =
372                 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
373         const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
374
375         cmd = container_of(header, struct vmw_dma_cmd, header);
376
377         /* No snooper installed, nothing to copy */
378         if (!srf->snooper.image)
379                 return;
380
381         if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
382                 DRM_ERROR("face and mipmap for cursors should never != 0\n");
383                 return;
384         }
385
386         if (cmd->header.size < 64) {
387                 DRM_ERROR("at least one full copy box must be given\n");
388                 return;
389         }
390
391         box = (SVGA3dCopyBox *)&cmd[1];
392         box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
393                         sizeof(SVGA3dCopyBox);
394
395         if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
396             box->x != 0    || box->y != 0    || box->z != 0    ||
397             box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
398             box->d != 1    || box_count != 1 ||
399             box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
400                 /* TODO handle none page aligned offsets */
401                 /* TODO handle more dst & src != 0 */
402                 /* TODO handle more then one copy */
403                 DRM_ERROR("Can't snoop dma request for cursor!\n");
404                 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
405                           box->srcx, box->srcy, box->srcz,
406                           box->x, box->y, box->z,
407                           box->w, box->h, box->d, box_count,
408                           cmd->dma.guest.ptr.offset);
409                 return;
410         }
411
412         kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
413         kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
414
415         ret = ttm_bo_reserve(bo, true, false, NULL);
416         if (unlikely(ret != 0)) {
417                 DRM_ERROR("reserve failed\n");
418                 return;
419         }
420
421         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
422         if (unlikely(ret != 0))
423                 goto err_unreserve;
424
425         virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
426
427         if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
428                 memcpy(srf->snooper.image, virtual,
429                        VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
430         } else {
431                 /* Image is unsigned pointer. */
432                 for (i = 0; i < box->h; i++)
433                         memcpy(srf->snooper.image + i * image_pitch,
434                                virtual + i * cmd->dma.guest.pitch,
435                                box->w * desc->pitchBytesPerBlock);
436         }
437
438         srf->snooper.age++;
439
440         ttm_bo_kunmap(&map);
441 err_unreserve:
442         ttm_bo_unreserve(bo);
443 }
444
445 /**
446  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
447  *
448  * @dev_priv: Pointer to the device private struct.
449  *
450  * Clears all legacy hotspots.
451  */
452 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
453 {
454         struct drm_device *dev = &dev_priv->drm;
455         struct vmw_display_unit *du;
456         struct drm_crtc *crtc;
457
458         drm_modeset_lock_all(dev);
459         drm_for_each_crtc(crtc, dev) {
460                 du = vmw_crtc_to_du(crtc);
461
462                 du->hotspot_x = 0;
463                 du->hotspot_y = 0;
464         }
465         drm_modeset_unlock_all(dev);
466 }
467
468 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
469 {
470         struct drm_device *dev = &dev_priv->drm;
471         struct vmw_display_unit *du;
472         struct drm_crtc *crtc;
473
474         mutex_lock(&dev->mode_config.mutex);
475
476         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
477                 du = vmw_crtc_to_du(crtc);
478                 if (!du->cursor_surface ||
479                     du->cursor_age == du->cursor_surface->snooper.age ||
480                     !du->cursor_surface->snooper.image)
481                         continue;
482
483                 du->cursor_age = du->cursor_surface->snooper.age;
484                 vmw_send_define_cursor_cmd(dev_priv,
485                                            du->cursor_surface->snooper.image,
486                                            VMW_CURSOR_SNOOP_WIDTH,
487                                            VMW_CURSOR_SNOOP_HEIGHT,
488                                            du->hotspot_x + du->core_hotspot_x,
489                                            du->hotspot_y + du->core_hotspot_y);
490         }
491
492         mutex_unlock(&dev->mode_config.mutex);
493 }
494
495
496 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
497 {
498         struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
499         u32 i;
500
501         vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
502
503         for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
504                 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
505
506         drm_plane_cleanup(plane);
507 }
508
509
510 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
511 {
512         drm_plane_cleanup(plane);
513
514         /* Planes are static in our case so we don't free it */
515 }
516
517
518 /**
519  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
520  *
521  * @vps: plane state associated with the display surface
522  * @unreference: true if we also want to unreference the display.
523  */
524 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
525                              bool unreference)
526 {
527         if (vps->surf) {
528                 if (vps->pinned) {
529                         vmw_resource_unpin(&vps->surf->res);
530                         vps->pinned--;
531                 }
532
533                 if (unreference) {
534                         if (vps->pinned)
535                                 DRM_ERROR("Surface still pinned\n");
536                         vmw_surface_unreference(&vps->surf);
537                 }
538         }
539 }
540
541
542 /**
543  * vmw_du_plane_cleanup_fb - Unpins the plane surface
544  *
545  * @plane:  display plane
546  * @old_state: Contains the FB to clean up
547  *
548  * Unpins the framebuffer surface
549  *
550  * Returns 0 on success
551  */
552 void
553 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
554                         struct drm_plane_state *old_state)
555 {
556         struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
557
558         vmw_du_plane_unpin_surf(vps, false);
559 }
560
561
562 /**
563  * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
564  *
565  * @vps: plane_state
566  *
567  * Returns 0 on success
568  */
569
570 static int
571 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
572 {
573         int ret;
574         u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
575         struct ttm_buffer_object *bo;
576
577         if (!vps->cursor.bo)
578                 return -EINVAL;
579
580         bo = &vps->cursor.bo->tbo;
581
582         if (bo->base.size < size)
583                 return -EINVAL;
584
585         if (vps->cursor.bo->map.virtual)
586                 return 0;
587
588         ret = ttm_bo_reserve(bo, false, false, NULL);
589         if (unlikely(ret != 0))
590                 return -ENOMEM;
591
592         vmw_bo_map_and_cache(vps->cursor.bo);
593
594         ttm_bo_unreserve(bo);
595
596         if (unlikely(ret != 0))
597                 return -ENOMEM;
598
599         return 0;
600 }
601
602
603 /**
604  * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
605  *
606  * @vps: state of the cursor plane
607  *
608  * Returns 0 on success
609  */
610
611 static int
612 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
613 {
614         int ret = 0;
615         struct vmw_bo *vbo = vps->cursor.bo;
616
617         if (!vbo || !vbo->map.virtual)
618                 return 0;
619
620         ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
621         if (likely(ret == 0)) {
622                 vmw_bo_unmap(vbo);
623                 ttm_bo_unreserve(&vbo->tbo);
624         }
625
626         return ret;
627 }
628
629
630 /**
631  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
632  *
633  * @plane: cursor plane
634  * @old_state: contains the state to clean up
635  *
636  * Unmaps all cursor bo mappings and unpins the cursor surface
637  *
638  * Returns 0 on success
639  */
640 void
641 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
642                                struct drm_plane_state *old_state)
643 {
644         struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
645         struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
646         bool is_iomem;
647
648         if (vps->surf_mapped) {
649                 vmw_bo_unmap(vps->surf->res.guest_memory_bo);
650                 vps->surf_mapped = false;
651         }
652
653         if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
654                 const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
655
656                 if (likely(ret == 0)) {
657                         ttm_bo_kunmap(&vps->bo->map);
658                         ttm_bo_unreserve(&vps->bo->tbo);
659                 }
660         }
661
662         vmw_du_cursor_plane_unmap_cm(vps);
663         vmw_du_put_cursor_mob(vcp, vps);
664
665         vmw_du_plane_unpin_surf(vps, false);
666
667         if (vps->surf) {
668                 vmw_surface_unreference(&vps->surf);
669                 vps->surf = NULL;
670         }
671
672         if (vps->bo) {
673                 vmw_bo_unreference(&vps->bo);
674                 vps->bo = NULL;
675         }
676 }
677
678
679 /**
680  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
681  *
682  * @plane:  display plane
683  * @new_state: info on the new plane state, including the FB
684  *
685  * Returns 0 on success
686  */
687 int
688 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
689                                struct drm_plane_state *new_state)
690 {
691         struct drm_framebuffer *fb = new_state->fb;
692         struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
693         struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
694         int ret = 0;
695
696         if (vps->surf) {
697                 vmw_surface_unreference(&vps->surf);
698                 vps->surf = NULL;
699         }
700
701         if (vps->bo) {
702                 vmw_bo_unreference(&vps->bo);
703                 vps->bo = NULL;
704         }
705
706         if (fb) {
707                 if (vmw_framebuffer_to_vfb(fb)->bo) {
708                         vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
709                         vmw_bo_reference(vps->bo);
710                 } else {
711                         vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
712                         vmw_surface_reference(vps->surf);
713                 }
714         }
715
716         if (!vps->surf && vps->bo) {
717                 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
718
719                 /*
720                  * Not using vmw_bo_map_and_cache() helper here as we need to
721                  * reserve the ttm_buffer_object first which
722                  * vmw_bo_map_and_cache() omits.
723                  */
724                 ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
725
726                 if (unlikely(ret != 0))
727                         return -ENOMEM;
728
729                 ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
730
731                 ttm_bo_unreserve(&vps->bo->tbo);
732
733                 if (unlikely(ret != 0))
734                         return -ENOMEM;
735         } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
736
737                 WARN_ON(vps->surf->snooper.image);
738                 ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
739                                      NULL);
740                 if (unlikely(ret != 0))
741                         return -ENOMEM;
742                 vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
743                 ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
744                 vps->surf_mapped = true;
745         }
746
747         if (vps->surf || vps->bo) {
748                 vmw_du_get_cursor_mob(vcp, vps);
749                 vmw_du_cursor_plane_map_cm(vps);
750         }
751
752         return 0;
753 }
754
755
756 void
757 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
758                                   struct drm_atomic_state *state)
759 {
760         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
761                                                                            plane);
762         struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
763                                                                            plane);
764         struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
765         struct vmw_private *dev_priv = vmw_priv(crtc->dev);
766         struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
767         struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
768         struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
769         s32 hotspot_x, hotspot_y;
770
771         hotspot_x = du->hotspot_x;
772         hotspot_y = du->hotspot_y;
773
774         if (new_state->fb) {
775                 hotspot_x += new_state->fb->hot_x;
776                 hotspot_y += new_state->fb->hot_y;
777         }
778
779         du->cursor_surface = vps->surf;
780         du->cursor_bo = vps->bo;
781
782         if (!vps->surf && !vps->bo) {
783                 vmw_cursor_update_position(dev_priv, false, 0, 0);
784                 return;
785         }
786
787         vps->cursor.hotspot_x = hotspot_x;
788         vps->cursor.hotspot_y = hotspot_y;
789
790         if (vps->surf) {
791                 du->cursor_age = du->cursor_surface->snooper.age;
792         }
793
794         if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
795                 /*
796                  * If it hasn't changed, avoid making the device do extra
797                  * work by keeping the old cursor active.
798                  */
799                 struct vmw_cursor_plane_state tmp = old_vps->cursor;
800                 old_vps->cursor = vps->cursor;
801                 vps->cursor = tmp;
802         } else {
803                 void *image = vmw_du_cursor_plane_acquire_image(vps);
804                 if (image)
805                         vmw_cursor_update_image(dev_priv, vps, image,
806                                                 new_state->crtc_w,
807                                                 new_state->crtc_h,
808                                                 hotspot_x, hotspot_y);
809         }
810
811         du->cursor_x = new_state->crtc_x + du->set_gui_x;
812         du->cursor_y = new_state->crtc_y + du->set_gui_y;
813
814         vmw_cursor_update_position(dev_priv, true,
815                                    du->cursor_x + hotspot_x,
816                                    du->cursor_y + hotspot_y);
817
818         du->core_hotspot_x = hotspot_x - du->hotspot_x;
819         du->core_hotspot_y = hotspot_y - du->hotspot_y;
820 }
821
822
823 /**
824  * vmw_du_primary_plane_atomic_check - check if the new state is okay
825  *
826  * @plane: display plane
827  * @state: info on the new plane state, including the FB
828  *
829  * Check if the new state is settable given the current state.  Other
830  * than what the atomic helper checks, we care about crtc fitting
831  * the FB and maintaining one active framebuffer.
832  *
833  * Returns 0 on success
834  */
835 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
836                                       struct drm_atomic_state *state)
837 {
838         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
839                                                                            plane);
840         struct drm_crtc_state *crtc_state = NULL;
841         struct drm_framebuffer *new_fb = new_state->fb;
842         int ret;
843
844         if (new_state->crtc)
845                 crtc_state = drm_atomic_get_new_crtc_state(state,
846                                                            new_state->crtc);
847
848         ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
849                                                   DRM_PLANE_NO_SCALING,
850                                                   DRM_PLANE_NO_SCALING,
851                                                   false, true);
852
853         if (!ret && new_fb) {
854                 struct drm_crtc *crtc = new_state->crtc;
855                 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
856
857                 vmw_connector_state_to_vcs(du->connector.state);
858         }
859
860
861         return ret;
862 }
863
864
865 /**
866  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
867  *
868  * @plane: cursor plane
869  * @state: info on the new plane state
870  *
871  * This is a chance to fail if the new cursor state does not fit
872  * our requirements.
873  *
874  * Returns 0 on success
875  */
876 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
877                                      struct drm_atomic_state *state)
878 {
879         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
880                                                                            plane);
881         int ret = 0;
882         struct drm_crtc_state *crtc_state = NULL;
883         struct vmw_surface *surface = NULL;
884         struct drm_framebuffer *fb = new_state->fb;
885
886         if (new_state->crtc)
887                 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
888                                                            new_state->crtc);
889
890         ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
891                                                   DRM_PLANE_NO_SCALING,
892                                                   DRM_PLANE_NO_SCALING,
893                                                   true, true);
894         if (ret)
895                 return ret;
896
897         /* Turning off */
898         if (!fb)
899                 return 0;
900
901         /* A lot of the code assumes this */
902         if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
903                 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
904                           new_state->crtc_w, new_state->crtc_h);
905                 return -EINVAL;
906         }
907
908         if (!vmw_framebuffer_to_vfb(fb)->bo) {
909                 surface = vmw_framebuffer_to_vfbs(fb)->surface;
910
911                 WARN_ON(!surface);
912
913                 if (!surface ||
914                     (!surface->snooper.image && !surface->res.guest_memory_bo)) {
915                         DRM_ERROR("surface not suitable for cursor\n");
916                         return -EINVAL;
917                 }
918         }
919
920         return 0;
921 }
922
923
924 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
925                              struct drm_atomic_state *state)
926 {
927         struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
928                                                                          crtc);
929         struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
930         int connector_mask = drm_connector_mask(&du->connector);
931         bool has_primary = new_state->plane_mask &
932                            drm_plane_mask(crtc->primary);
933
934         /* We always want to have an active plane with an active CRTC */
935         if (has_primary != new_state->enable)
936                 return -EINVAL;
937
938
939         if (new_state->connector_mask != connector_mask &&
940             new_state->connector_mask != 0) {
941                 DRM_ERROR("Invalid connectors configuration\n");
942                 return -EINVAL;
943         }
944
945         /*
946          * Our virtual device does not have a dot clock, so use the logical
947          * clock value as the dot clock.
948          */
949         if (new_state->mode.crtc_clock == 0)
950                 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
951
952         return 0;
953 }
954
955
956 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
957                               struct drm_atomic_state *state)
958 {
959 }
960
961
962 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
963                               struct drm_atomic_state *state)
964 {
965 }
966
967
968 /**
969  * vmw_du_crtc_duplicate_state - duplicate crtc state
970  * @crtc: DRM crtc
971  *
972  * Allocates and returns a copy of the crtc state (both common and
973  * vmw-specific) for the specified crtc.
974  *
975  * Returns: The newly allocated crtc state, or NULL on failure.
976  */
977 struct drm_crtc_state *
978 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
979 {
980         struct drm_crtc_state *state;
981         struct vmw_crtc_state *vcs;
982
983         if (WARN_ON(!crtc->state))
984                 return NULL;
985
986         vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
987
988         if (!vcs)
989                 return NULL;
990
991         state = &vcs->base;
992
993         __drm_atomic_helper_crtc_duplicate_state(crtc, state);
994
995         return state;
996 }
997
998
999 /**
1000  * vmw_du_crtc_reset - creates a blank vmw crtc state
1001  * @crtc: DRM crtc
1002  *
1003  * Resets the atomic state for @crtc by freeing the state pointer (which
1004  * might be NULL, e.g. at driver load time) and allocating a new empty state
1005  * object.
1006  */
1007 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1008 {
1009         struct vmw_crtc_state *vcs;
1010
1011
1012         if (crtc->state) {
1013                 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1014
1015                 kfree(vmw_crtc_state_to_vcs(crtc->state));
1016         }
1017
1018         vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1019
1020         if (!vcs) {
1021                 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1022                 return;
1023         }
1024
1025         __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1026 }
1027
1028
1029 /**
1030  * vmw_du_crtc_destroy_state - destroy crtc state
1031  * @crtc: DRM crtc
1032  * @state: state object to destroy
1033  *
1034  * Destroys the crtc state (both common and vmw-specific) for the
1035  * specified plane.
1036  */
1037 void
1038 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1039                           struct drm_crtc_state *state)
1040 {
1041         drm_atomic_helper_crtc_destroy_state(crtc, state);
1042 }
1043
1044
1045 /**
1046  * vmw_du_plane_duplicate_state - duplicate plane state
1047  * @plane: drm plane
1048  *
1049  * Allocates and returns a copy of the plane state (both common and
1050  * vmw-specific) for the specified plane.
1051  *
1052  * Returns: The newly allocated plane state, or NULL on failure.
1053  */
1054 struct drm_plane_state *
1055 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1056 {
1057         struct drm_plane_state *state;
1058         struct vmw_plane_state *vps;
1059
1060         vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1061
1062         if (!vps)
1063                 return NULL;
1064
1065         vps->pinned = 0;
1066         vps->cpp = 0;
1067
1068         memset(&vps->cursor, 0, sizeof(vps->cursor));
1069
1070         /* Each ref counted resource needs to be acquired again */
1071         if (vps->surf)
1072                 (void) vmw_surface_reference(vps->surf);
1073
1074         if (vps->bo)
1075                 (void) vmw_bo_reference(vps->bo);
1076
1077         state = &vps->base;
1078
1079         __drm_atomic_helper_plane_duplicate_state(plane, state);
1080
1081         return state;
1082 }
1083
1084
1085 /**
1086  * vmw_du_plane_reset - creates a blank vmw plane state
1087  * @plane: drm plane
1088  *
1089  * Resets the atomic state for @plane by freeing the state pointer (which might
1090  * be NULL, e.g. at driver load time) and allocating a new empty state object.
1091  */
1092 void vmw_du_plane_reset(struct drm_plane *plane)
1093 {
1094         struct vmw_plane_state *vps;
1095
1096         if (plane->state)
1097                 vmw_du_plane_destroy_state(plane, plane->state);
1098
1099         vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1100
1101         if (!vps) {
1102                 DRM_ERROR("Cannot allocate vmw_plane_state\n");
1103                 return;
1104         }
1105
1106         __drm_atomic_helper_plane_reset(plane, &vps->base);
1107 }
1108
1109
1110 /**
1111  * vmw_du_plane_destroy_state - destroy plane state
1112  * @plane: DRM plane
1113  * @state: state object to destroy
1114  *
1115  * Destroys the plane state (both common and vmw-specific) for the
1116  * specified plane.
1117  */
1118 void
1119 vmw_du_plane_destroy_state(struct drm_plane *plane,
1120                            struct drm_plane_state *state)
1121 {
1122         struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1123
1124         /* Should have been freed by cleanup_fb */
1125         if (vps->surf)
1126                 vmw_surface_unreference(&vps->surf);
1127
1128         if (vps->bo)
1129                 vmw_bo_unreference(&vps->bo);
1130
1131         drm_atomic_helper_plane_destroy_state(plane, state);
1132 }
1133
1134
1135 /**
1136  * vmw_du_connector_duplicate_state - duplicate connector state
1137  * @connector: DRM connector
1138  *
1139  * Allocates and returns a copy of the connector state (both common and
1140  * vmw-specific) for the specified connector.
1141  *
1142  * Returns: The newly allocated connector state, or NULL on failure.
1143  */
1144 struct drm_connector_state *
1145 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1146 {
1147         struct drm_connector_state *state;
1148         struct vmw_connector_state *vcs;
1149
1150         if (WARN_ON(!connector->state))
1151                 return NULL;
1152
1153         vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1154
1155         if (!vcs)
1156                 return NULL;
1157
1158         state = &vcs->base;
1159
1160         __drm_atomic_helper_connector_duplicate_state(connector, state);
1161
1162         return state;
1163 }
1164
1165
1166 /**
1167  * vmw_du_connector_reset - creates a blank vmw connector state
1168  * @connector: DRM connector
1169  *
1170  * Resets the atomic state for @connector by freeing the state pointer (which
1171  * might be NULL, e.g. at driver load time) and allocating a new empty state
1172  * object.
1173  */
1174 void vmw_du_connector_reset(struct drm_connector *connector)
1175 {
1176         struct vmw_connector_state *vcs;
1177
1178
1179         if (connector->state) {
1180                 __drm_atomic_helper_connector_destroy_state(connector->state);
1181
1182                 kfree(vmw_connector_state_to_vcs(connector->state));
1183         }
1184
1185         vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1186
1187         if (!vcs) {
1188                 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1189                 return;
1190         }
1191
1192         __drm_atomic_helper_connector_reset(connector, &vcs->base);
1193 }
1194
1195
1196 /**
1197  * vmw_du_connector_destroy_state - destroy connector state
1198  * @connector: DRM connector
1199  * @state: state object to destroy
1200  *
1201  * Destroys the connector state (both common and vmw-specific) for the
1202  * specified plane.
1203  */
1204 void
1205 vmw_du_connector_destroy_state(struct drm_connector *connector,
1206                           struct drm_connector_state *state)
1207 {
1208         drm_atomic_helper_connector_destroy_state(connector, state);
1209 }
1210 /*
1211  * Generic framebuffer code
1212  */
1213
1214 /*
1215  * Surface framebuffer code
1216  */
1217
1218 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1219 {
1220         struct vmw_framebuffer_surface *vfbs =
1221                 vmw_framebuffer_to_vfbs(framebuffer);
1222
1223         drm_framebuffer_cleanup(framebuffer);
1224         vmw_surface_unreference(&vfbs->surface);
1225
1226         kfree(vfbs);
1227 }
1228
1229 /**
1230  * vmw_kms_readback - Perform a readback from the screen system to
1231  * a buffer-object backed framebuffer.
1232  *
1233  * @dev_priv: Pointer to the device private structure.
1234  * @file_priv: Pointer to a struct drm_file identifying the caller.
1235  * Must be set to NULL if @user_fence_rep is NULL.
1236  * @vfb: Pointer to the buffer-object backed framebuffer.
1237  * @user_fence_rep: User-space provided structure for fence information.
1238  * Must be set to non-NULL if @file_priv is non-NULL.
1239  * @vclips: Array of clip rects.
1240  * @num_clips: Number of clip rects in @vclips.
1241  *
1242  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1243  * interrupted.
1244  */
1245 int vmw_kms_readback(struct vmw_private *dev_priv,
1246                      struct drm_file *file_priv,
1247                      struct vmw_framebuffer *vfb,
1248                      struct drm_vmw_fence_rep __user *user_fence_rep,
1249                      struct drm_vmw_rect *vclips,
1250                      uint32_t num_clips)
1251 {
1252         switch (dev_priv->active_display_unit) {
1253         case vmw_du_screen_object:
1254                 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1255                                             user_fence_rep, vclips, num_clips,
1256                                             NULL);
1257         case vmw_du_screen_target:
1258                 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1259                                              user_fence_rep, NULL, vclips, num_clips,
1260                                              1, NULL);
1261         default:
1262                 WARN_ONCE(true,
1263                           "Readback called with invalid display system.\n");
1264 }
1265
1266         return -ENOSYS;
1267 }
1268
1269
1270 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1271         .destroy = vmw_framebuffer_surface_destroy,
1272         .dirty = drm_atomic_helper_dirtyfb,
1273 };
1274
1275 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1276                                            struct vmw_surface *surface,
1277                                            struct vmw_framebuffer **out,
1278                                            const struct drm_mode_fb_cmd2
1279                                            *mode_cmd,
1280                                            bool is_bo_proxy)
1281
1282 {
1283         struct drm_device *dev = &dev_priv->drm;
1284         struct vmw_framebuffer_surface *vfbs;
1285         enum SVGA3dSurfaceFormat format;
1286         int ret;
1287
1288         /* 3D is only supported on HWv8 and newer hosts */
1289         if (dev_priv->active_display_unit == vmw_du_legacy)
1290                 return -ENOSYS;
1291
1292         /*
1293          * Sanity checks.
1294          */
1295
1296         if (!drm_any_plane_has_format(&dev_priv->drm,
1297                                       mode_cmd->pixel_format,
1298                                       mode_cmd->modifier[0])) {
1299                 drm_dbg(&dev_priv->drm,
1300                         "unsupported pixel format %p4cc / modifier 0x%llx\n",
1301                         &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1302                 return -EINVAL;
1303         }
1304
1305         /* Surface must be marked as a scanout. */
1306         if (unlikely(!surface->metadata.scanout))
1307                 return -EINVAL;
1308
1309         if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1310                      surface->metadata.num_sizes != 1 ||
1311                      surface->metadata.base_size.width < mode_cmd->width ||
1312                      surface->metadata.base_size.height < mode_cmd->height ||
1313                      surface->metadata.base_size.depth != 1)) {
1314                 DRM_ERROR("Incompatible surface dimensions "
1315                           "for requested mode.\n");
1316                 return -EINVAL;
1317         }
1318
1319         switch (mode_cmd->pixel_format) {
1320         case DRM_FORMAT_ARGB8888:
1321                 format = SVGA3D_A8R8G8B8;
1322                 break;
1323         case DRM_FORMAT_XRGB8888:
1324                 format = SVGA3D_X8R8G8B8;
1325                 break;
1326         case DRM_FORMAT_RGB565:
1327                 format = SVGA3D_R5G6B5;
1328                 break;
1329         case DRM_FORMAT_XRGB1555:
1330                 format = SVGA3D_A1R5G5B5;
1331                 break;
1332         default:
1333                 DRM_ERROR("Invalid pixel format: %p4cc\n",
1334                           &mode_cmd->pixel_format);
1335                 return -EINVAL;
1336         }
1337
1338         /*
1339          * For DX, surface format validation is done when surface->scanout
1340          * is set.
1341          */
1342         if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1343                 DRM_ERROR("Invalid surface format for requested mode.\n");
1344                 return -EINVAL;
1345         }
1346
1347         vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1348         if (!vfbs) {
1349                 ret = -ENOMEM;
1350                 goto out_err1;
1351         }
1352
1353         drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1354         vfbs->surface = vmw_surface_reference(surface);
1355         vfbs->base.user_handle = mode_cmd->handles[0];
1356         vfbs->is_bo_proxy = is_bo_proxy;
1357
1358         *out = &vfbs->base;
1359
1360         ret = drm_framebuffer_init(dev, &vfbs->base.base,
1361                                    &vmw_framebuffer_surface_funcs);
1362         if (ret)
1363                 goto out_err2;
1364
1365         return 0;
1366
1367 out_err2:
1368         vmw_surface_unreference(&surface);
1369         kfree(vfbs);
1370 out_err1:
1371         return ret;
1372 }
1373
1374 /*
1375  * Buffer-object framebuffer code
1376  */
1377
1378 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1379                                             struct drm_file *file_priv,
1380                                             unsigned int *handle)
1381 {
1382         struct vmw_framebuffer_bo *vfbd =
1383                         vmw_framebuffer_to_vfbd(fb);
1384
1385         return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1386 }
1387
1388 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1389 {
1390         struct vmw_framebuffer_bo *vfbd =
1391                 vmw_framebuffer_to_vfbd(framebuffer);
1392
1393         drm_framebuffer_cleanup(framebuffer);
1394         vmw_bo_unreference(&vfbd->buffer);
1395
1396         kfree(vfbd);
1397 }
1398
1399 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1400         .create_handle = vmw_framebuffer_bo_create_handle,
1401         .destroy = vmw_framebuffer_bo_destroy,
1402         .dirty = drm_atomic_helper_dirtyfb,
1403 };
1404
1405 /**
1406  * vmw_create_bo_proxy - create a proxy surface for the buffer object
1407  *
1408  * @dev: DRM device
1409  * @mode_cmd: parameters for the new surface
1410  * @bo_mob: MOB backing the buffer object
1411  * @srf_out: newly created surface
1412  *
1413  * When the content FB is a buffer object, we create a surface as a proxy to the
1414  * same buffer.  This way we can do a surface copy rather than a surface DMA.
1415  * This is a more efficient approach
1416  *
1417  * RETURNS:
1418  * 0 on success, error code otherwise
1419  */
1420 static int vmw_create_bo_proxy(struct drm_device *dev,
1421                                const struct drm_mode_fb_cmd2 *mode_cmd,
1422                                struct vmw_bo *bo_mob,
1423                                struct vmw_surface **srf_out)
1424 {
1425         struct vmw_surface_metadata metadata = {0};
1426         uint32_t format;
1427         struct vmw_resource *res;
1428         unsigned int bytes_pp;
1429         int ret;
1430
1431         switch (mode_cmd->pixel_format) {
1432         case DRM_FORMAT_ARGB8888:
1433         case DRM_FORMAT_XRGB8888:
1434                 format = SVGA3D_X8R8G8B8;
1435                 bytes_pp = 4;
1436                 break;
1437
1438         case DRM_FORMAT_RGB565:
1439         case DRM_FORMAT_XRGB1555:
1440                 format = SVGA3D_R5G6B5;
1441                 bytes_pp = 2;
1442                 break;
1443
1444         case 8:
1445                 format = SVGA3D_P8;
1446                 bytes_pp = 1;
1447                 break;
1448
1449         default:
1450                 DRM_ERROR("Invalid framebuffer format %p4cc\n",
1451                           &mode_cmd->pixel_format);
1452                 return -EINVAL;
1453         }
1454
1455         metadata.format = format;
1456         metadata.mip_levels[0] = 1;
1457         metadata.num_sizes = 1;
1458         metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1459         metadata.base_size.height =  mode_cmd->height;
1460         metadata.base_size.depth = 1;
1461         metadata.scanout = true;
1462
1463         ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1464         if (ret) {
1465                 DRM_ERROR("Failed to allocate proxy content buffer\n");
1466                 return ret;
1467         }
1468
1469         res = &(*srf_out)->res;
1470
1471         /* Reserve and switch the backing mob. */
1472         mutex_lock(&res->dev_priv->cmdbuf_mutex);
1473         (void) vmw_resource_reserve(res, false, true);
1474         vmw_bo_unreference(&res->guest_memory_bo);
1475         res->guest_memory_bo = vmw_bo_reference(bo_mob);
1476         res->guest_memory_offset = 0;
1477         vmw_resource_unreserve(res, false, false, false, NULL, 0);
1478         mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1479
1480         return 0;
1481 }
1482
1483
1484
1485 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1486                                       struct vmw_bo *bo,
1487                                       struct vmw_framebuffer **out,
1488                                       const struct drm_mode_fb_cmd2
1489                                       *mode_cmd)
1490
1491 {
1492         struct drm_device *dev = &dev_priv->drm;
1493         struct vmw_framebuffer_bo *vfbd;
1494         unsigned int requested_size;
1495         int ret;
1496
1497         requested_size = mode_cmd->height * mode_cmd->pitches[0];
1498         if (unlikely(requested_size > bo->tbo.base.size)) {
1499                 DRM_ERROR("Screen buffer object size is too small "
1500                           "for requested mode.\n");
1501                 return -EINVAL;
1502         }
1503
1504         if (!drm_any_plane_has_format(&dev_priv->drm,
1505                                       mode_cmd->pixel_format,
1506                                       mode_cmd->modifier[0])) {
1507                 drm_dbg(&dev_priv->drm,
1508                         "unsupported pixel format %p4cc / modifier 0x%llx\n",
1509                         &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1510                 return -EINVAL;
1511         }
1512
1513         vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1514         if (!vfbd) {
1515                 ret = -ENOMEM;
1516                 goto out_err1;
1517         }
1518
1519         vfbd->base.base.obj[0] = &bo->tbo.base;
1520         drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1521         vfbd->base.bo = true;
1522         vfbd->buffer = vmw_bo_reference(bo);
1523         vfbd->base.user_handle = mode_cmd->handles[0];
1524         *out = &vfbd->base;
1525
1526         ret = drm_framebuffer_init(dev, &vfbd->base.base,
1527                                    &vmw_framebuffer_bo_funcs);
1528         if (ret)
1529                 goto out_err2;
1530
1531         return 0;
1532
1533 out_err2:
1534         vmw_bo_unreference(&bo);
1535         kfree(vfbd);
1536 out_err1:
1537         return ret;
1538 }
1539
1540
1541 /**
1542  * vmw_kms_srf_ok - check if a surface can be created
1543  *
1544  * @dev_priv: Pointer to device private struct.
1545  * @width: requested width
1546  * @height: requested height
1547  *
1548  * Surfaces need to be less than texture size
1549  */
1550 static bool
1551 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1552 {
1553         if (width  > dev_priv->texture_max_width ||
1554             height > dev_priv->texture_max_height)
1555                 return false;
1556
1557         return true;
1558 }
1559
1560 /**
1561  * vmw_kms_new_framebuffer - Create a new framebuffer.
1562  *
1563  * @dev_priv: Pointer to device private struct.
1564  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1565  * Either @bo or @surface must be NULL.
1566  * @surface: Pointer to a surface to wrap the kms framebuffer around.
1567  * Either @bo or @surface must be NULL.
1568  * @only_2d: No presents will occur to this buffer object based framebuffer.
1569  * This helps the code to do some important optimizations.
1570  * @mode_cmd: Frame-buffer metadata.
1571  */
1572 struct vmw_framebuffer *
1573 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1574                         struct vmw_bo *bo,
1575                         struct vmw_surface *surface,
1576                         bool only_2d,
1577                         const struct drm_mode_fb_cmd2 *mode_cmd)
1578 {
1579         struct vmw_framebuffer *vfb = NULL;
1580         bool is_bo_proxy = false;
1581         int ret;
1582
1583         /*
1584          * We cannot use the SurfaceDMA command in an non-accelerated VM,
1585          * therefore, wrap the buffer object in a surface so we can use the
1586          * SurfaceCopy command.
1587          */
1588         if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1589             bo && only_2d &&
1590             mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1591             dev_priv->active_display_unit == vmw_du_screen_target) {
1592                 ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1593                                           bo, &surface);
1594                 if (ret)
1595                         return ERR_PTR(ret);
1596
1597                 is_bo_proxy = true;
1598         }
1599
1600         /* Create the new framebuffer depending one what we have */
1601         if (surface) {
1602                 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1603                                                       mode_cmd,
1604                                                       is_bo_proxy);
1605                 /*
1606                  * vmw_create_bo_proxy() adds a reference that is no longer
1607                  * needed
1608                  */
1609                 if (is_bo_proxy)
1610                         vmw_surface_unreference(&surface);
1611         } else if (bo) {
1612                 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1613                                                  mode_cmd);
1614         } else {
1615                 BUG();
1616         }
1617
1618         if (ret)
1619                 return ERR_PTR(ret);
1620
1621         return vfb;
1622 }
1623
1624 /*
1625  * Generic Kernel modesetting functions
1626  */
1627
1628 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1629                                                  struct drm_file *file_priv,
1630                                                  const struct drm_mode_fb_cmd2 *mode_cmd)
1631 {
1632         struct vmw_private *dev_priv = vmw_priv(dev);
1633         struct vmw_framebuffer *vfb = NULL;
1634         struct vmw_surface *surface = NULL;
1635         struct vmw_bo *bo = NULL;
1636         int ret;
1637
1638         /* returns either a bo or surface */
1639         ret = vmw_user_lookup_handle(dev_priv, file_priv,
1640                                      mode_cmd->handles[0],
1641                                      &surface, &bo);
1642         if (ret) {
1643                 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1644                           mode_cmd->handles[0], mode_cmd->handles[0]);
1645                 goto err_out;
1646         }
1647
1648
1649         if (!bo &&
1650             !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1651                 DRM_ERROR("Surface size cannot exceed %dx%d\n",
1652                         dev_priv->texture_max_width,
1653                         dev_priv->texture_max_height);
1654                 goto err_out;
1655         }
1656
1657
1658         vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1659                                       !(dev_priv->capabilities & SVGA_CAP_3D),
1660                                       mode_cmd);
1661         if (IS_ERR(vfb)) {
1662                 ret = PTR_ERR(vfb);
1663                 goto err_out;
1664         }
1665
1666 err_out:
1667         /* vmw_user_lookup_handle takes one ref so does new_fb */
1668         if (bo)
1669                 vmw_user_bo_unref(bo);
1670         if (surface)
1671                 vmw_surface_unreference(&surface);
1672
1673         if (ret) {
1674                 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1675                 return ERR_PTR(ret);
1676         }
1677
1678         return &vfb->base;
1679 }
1680
1681 /**
1682  * vmw_kms_check_display_memory - Validates display memory required for a
1683  * topology
1684  * @dev: DRM device
1685  * @num_rects: number of drm_rect in rects
1686  * @rects: array of drm_rect representing the topology to validate indexed by
1687  * crtc index.
1688  *
1689  * Returns:
1690  * 0 on success otherwise negative error code
1691  */
1692 static int vmw_kms_check_display_memory(struct drm_device *dev,
1693                                         uint32_t num_rects,
1694                                         struct drm_rect *rects)
1695 {
1696         struct vmw_private *dev_priv = vmw_priv(dev);
1697         struct drm_rect bounding_box = {0};
1698         u64 total_pixels = 0, pixel_mem, bb_mem;
1699         int i;
1700
1701         for (i = 0; i < num_rects; i++) {
1702                 /*
1703                  * For STDU only individual screen (screen target) is limited by
1704                  * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1705                  */
1706                 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1707                     (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1708                      drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1709                         VMW_DEBUG_KMS("Screen size not supported.\n");
1710                         return -EINVAL;
1711                 }
1712
1713                 /* Bounding box upper left is at (0,0). */
1714                 if (rects[i].x2 > bounding_box.x2)
1715                         bounding_box.x2 = rects[i].x2;
1716
1717                 if (rects[i].y2 > bounding_box.y2)
1718                         bounding_box.y2 = rects[i].y2;
1719
1720                 total_pixels += (u64) drm_rect_width(&rects[i]) *
1721                         (u64) drm_rect_height(&rects[i]);
1722         }
1723
1724         /* Virtual svga device primary limits are always in 32-bpp. */
1725         pixel_mem = total_pixels * 4;
1726
1727         /*
1728          * For HV10 and below prim_bb_mem is vram size. When
1729          * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1730          * limit on primary bounding box
1731          */
1732         if (pixel_mem > dev_priv->max_primary_mem) {
1733                 VMW_DEBUG_KMS("Combined output size too large.\n");
1734                 return -EINVAL;
1735         }
1736
1737         /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1738         if (dev_priv->active_display_unit != vmw_du_screen_target ||
1739             !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1740                 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1741
1742                 if (bb_mem > dev_priv->max_primary_mem) {
1743                         VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1744                         return -EINVAL;
1745                 }
1746         }
1747
1748         return 0;
1749 }
1750
1751 /**
1752  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1753  * crtc mutex
1754  * @state: The atomic state pointer containing the new atomic state
1755  * @crtc: The crtc
1756  *
1757  * This function returns the new crtc state if it's part of the state update.
1758  * Otherwise returns the current crtc state. It also makes sure that the
1759  * crtc mutex is locked.
1760  *
1761  * Returns: A valid crtc state pointer or NULL. It may also return a
1762  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1763  */
1764 static struct drm_crtc_state *
1765 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1766 {
1767         struct drm_crtc_state *crtc_state;
1768
1769         crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1770         if (crtc_state) {
1771                 lockdep_assert_held(&crtc->mutex.mutex.base);
1772         } else {
1773                 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1774
1775                 if (ret != 0 && ret != -EALREADY)
1776                         return ERR_PTR(ret);
1777
1778                 crtc_state = crtc->state;
1779         }
1780
1781         return crtc_state;
1782 }
1783
1784 /**
1785  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1786  * from the same fb after the new state is committed.
1787  * @dev: The drm_device.
1788  * @state: The new state to be checked.
1789  *
1790  * Returns:
1791  *   Zero on success,
1792  *   -EINVAL on invalid state,
1793  *   -EDEADLK if modeset locking needs to be rerun.
1794  */
1795 static int vmw_kms_check_implicit(struct drm_device *dev,
1796                                   struct drm_atomic_state *state)
1797 {
1798         struct drm_framebuffer *implicit_fb = NULL;
1799         struct drm_crtc *crtc;
1800         struct drm_crtc_state *crtc_state;
1801         struct drm_plane_state *plane_state;
1802
1803         drm_for_each_crtc(crtc, dev) {
1804                 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1805
1806                 if (!du->is_implicit)
1807                         continue;
1808
1809                 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1810                 if (IS_ERR(crtc_state))
1811                         return PTR_ERR(crtc_state);
1812
1813                 if (!crtc_state || !crtc_state->enable)
1814                         continue;
1815
1816                 /*
1817                  * Can't move primary planes across crtcs, so this is OK.
1818                  * It also means we don't need to take the plane mutex.
1819                  */
1820                 plane_state = du->primary.state;
1821                 if (plane_state->crtc != crtc)
1822                         continue;
1823
1824                 if (!implicit_fb)
1825                         implicit_fb = plane_state->fb;
1826                 else if (implicit_fb != plane_state->fb)
1827                         return -EINVAL;
1828         }
1829
1830         return 0;
1831 }
1832
1833 /**
1834  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1835  * @dev: DRM device
1836  * @state: the driver state object
1837  *
1838  * Returns:
1839  * 0 on success otherwise negative error code
1840  */
1841 static int vmw_kms_check_topology(struct drm_device *dev,
1842                                   struct drm_atomic_state *state)
1843 {
1844         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1845         struct drm_rect *rects;
1846         struct drm_crtc *crtc;
1847         uint32_t i;
1848         int ret = 0;
1849
1850         rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1851                         GFP_KERNEL);
1852         if (!rects)
1853                 return -ENOMEM;
1854
1855         drm_for_each_crtc(crtc, dev) {
1856                 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1857                 struct drm_crtc_state *crtc_state;
1858
1859                 i = drm_crtc_index(crtc);
1860
1861                 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1862                 if (IS_ERR(crtc_state)) {
1863                         ret = PTR_ERR(crtc_state);
1864                         goto clean;
1865                 }
1866
1867                 if (!crtc_state)
1868                         continue;
1869
1870                 if (crtc_state->enable) {
1871                         rects[i].x1 = du->gui_x;
1872                         rects[i].y1 = du->gui_y;
1873                         rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1874                         rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1875                 } else {
1876                         rects[i].x1 = 0;
1877                         rects[i].y1 = 0;
1878                         rects[i].x2 = 0;
1879                         rects[i].y2 = 0;
1880                 }
1881         }
1882
1883         /* Determine change to topology due to new atomic state */
1884         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1885                                       new_crtc_state, i) {
1886                 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1887                 struct drm_connector *connector;
1888                 struct drm_connector_state *conn_state;
1889                 struct vmw_connector_state *vmw_conn_state;
1890
1891                 if (!du->pref_active && new_crtc_state->enable) {
1892                         VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1893                         ret = -EINVAL;
1894                         goto clean;
1895                 }
1896
1897                 /*
1898                  * For vmwgfx each crtc has only one connector attached and it
1899                  * is not changed so don't really need to check the
1900                  * crtc->connector_mask and iterate over it.
1901                  */
1902                 connector = &du->connector;
1903                 conn_state = drm_atomic_get_connector_state(state, connector);
1904                 if (IS_ERR(conn_state)) {
1905                         ret = PTR_ERR(conn_state);
1906                         goto clean;
1907                 }
1908
1909                 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1910                 vmw_conn_state->gui_x = du->gui_x;
1911                 vmw_conn_state->gui_y = du->gui_y;
1912         }
1913
1914         ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1915                                            rects);
1916
1917 clean:
1918         kfree(rects);
1919         return ret;
1920 }
1921
1922 /**
1923  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1924  *
1925  * @dev: DRM device
1926  * @state: the driver state object
1927  *
1928  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1929  * us to assign a value to mode->crtc_clock so that
1930  * drm_calc_timestamping_constants() won't throw an error message
1931  *
1932  * Returns:
1933  * Zero for success or -errno
1934  */
1935 static int
1936 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1937                              struct drm_atomic_state *state)
1938 {
1939         struct drm_crtc *crtc;
1940         struct drm_crtc_state *crtc_state;
1941         bool need_modeset = false;
1942         int i, ret;
1943
1944         ret = drm_atomic_helper_check(dev, state);
1945         if (ret)
1946                 return ret;
1947
1948         ret = vmw_kms_check_implicit(dev, state);
1949         if (ret) {
1950                 VMW_DEBUG_KMS("Invalid implicit state\n");
1951                 return ret;
1952         }
1953
1954         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1955                 if (drm_atomic_crtc_needs_modeset(crtc_state))
1956                         need_modeset = true;
1957         }
1958
1959         if (need_modeset)
1960                 return vmw_kms_check_topology(dev, state);
1961
1962         return ret;
1963 }
1964
1965 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1966         .fb_create = vmw_kms_fb_create,
1967         .atomic_check = vmw_kms_atomic_check_modeset,
1968         .atomic_commit = drm_atomic_helper_commit,
1969 };
1970
1971 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1972                                    struct drm_file *file_priv,
1973                                    struct vmw_framebuffer *vfb,
1974                                    struct vmw_surface *surface,
1975                                    uint32_t sid,
1976                                    int32_t destX, int32_t destY,
1977                                    struct drm_vmw_rect *clips,
1978                                    uint32_t num_clips)
1979 {
1980         return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1981                                             &surface->res, destX, destY,
1982                                             num_clips, 1, NULL, NULL);
1983 }
1984
1985
1986 int vmw_kms_present(struct vmw_private *dev_priv,
1987                     struct drm_file *file_priv,
1988                     struct vmw_framebuffer *vfb,
1989                     struct vmw_surface *surface,
1990                     uint32_t sid,
1991                     int32_t destX, int32_t destY,
1992                     struct drm_vmw_rect *clips,
1993                     uint32_t num_clips)
1994 {
1995         int ret;
1996
1997         switch (dev_priv->active_display_unit) {
1998         case vmw_du_screen_target:
1999                 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2000                                                  &surface->res, destX, destY,
2001                                                  num_clips, 1, NULL, NULL);
2002                 break;
2003         case vmw_du_screen_object:
2004                 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2005                                               sid, destX, destY, clips,
2006                                               num_clips);
2007                 break;
2008         default:
2009                 WARN_ONCE(true,
2010                           "Present called with invalid display system.\n");
2011                 ret = -ENOSYS;
2012                 break;
2013         }
2014         if (ret)
2015                 return ret;
2016
2017         vmw_cmd_flush(dev_priv, false);
2018
2019         return 0;
2020 }
2021
2022 static void
2023 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2024 {
2025         if (dev_priv->hotplug_mode_update_property)
2026                 return;
2027
2028         dev_priv->hotplug_mode_update_property =
2029                 drm_property_create_range(&dev_priv->drm,
2030                                           DRM_MODE_PROP_IMMUTABLE,
2031                                           "hotplug_mode_update", 0, 1);
2032 }
2033
2034 int vmw_kms_init(struct vmw_private *dev_priv)
2035 {
2036         struct drm_device *dev = &dev_priv->drm;
2037         int ret;
2038         static const char *display_unit_names[] = {
2039                 "Invalid",
2040                 "Legacy",
2041                 "Screen Object",
2042                 "Screen Target",
2043                 "Invalid (max)"
2044         };
2045
2046         drm_mode_config_init(dev);
2047         dev->mode_config.funcs = &vmw_kms_funcs;
2048         dev->mode_config.min_width = 1;
2049         dev->mode_config.min_height = 1;
2050         dev->mode_config.max_width = dev_priv->texture_max_width;
2051         dev->mode_config.max_height = dev_priv->texture_max_height;
2052         dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2053
2054         drm_mode_create_suggested_offset_properties(dev);
2055         vmw_kms_create_hotplug_mode_update_property(dev_priv);
2056
2057         ret = vmw_kms_stdu_init_display(dev_priv);
2058         if (ret) {
2059                 ret = vmw_kms_sou_init_display(dev_priv);
2060                 if (ret) /* Fallback */
2061                         ret = vmw_kms_ldu_init_display(dev_priv);
2062         }
2063         BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2064         drm_info(&dev_priv->drm, "%s display unit initialized\n",
2065                  display_unit_names[dev_priv->active_display_unit]);
2066
2067         return ret;
2068 }
2069
2070 int vmw_kms_close(struct vmw_private *dev_priv)
2071 {
2072         int ret = 0;
2073
2074         /*
2075          * Docs says we should take the lock before calling this function
2076          * but since it destroys encoders and our destructor calls
2077          * drm_encoder_cleanup which takes the lock we deadlock.
2078          */
2079         drm_mode_config_cleanup(&dev_priv->drm);
2080         if (dev_priv->active_display_unit == vmw_du_legacy)
2081                 ret = vmw_kms_ldu_close_display(dev_priv);
2082
2083         return ret;
2084 }
2085
2086 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2087                                 struct drm_file *file_priv)
2088 {
2089         struct drm_vmw_cursor_bypass_arg *arg = data;
2090         struct vmw_display_unit *du;
2091         struct drm_crtc *crtc;
2092         int ret = 0;
2093
2094         mutex_lock(&dev->mode_config.mutex);
2095         if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2096
2097                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2098                         du = vmw_crtc_to_du(crtc);
2099                         du->hotspot_x = arg->xhot;
2100                         du->hotspot_y = arg->yhot;
2101                 }
2102
2103                 mutex_unlock(&dev->mode_config.mutex);
2104                 return 0;
2105         }
2106
2107         crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2108         if (!crtc) {
2109                 ret = -ENOENT;
2110                 goto out;
2111         }
2112
2113         du = vmw_crtc_to_du(crtc);
2114
2115         du->hotspot_x = arg->xhot;
2116         du->hotspot_y = arg->yhot;
2117
2118 out:
2119         mutex_unlock(&dev->mode_config.mutex);
2120
2121         return ret;
2122 }
2123
2124 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2125                         unsigned width, unsigned height, unsigned pitch,
2126                         unsigned bpp, unsigned depth)
2127 {
2128         if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2129                 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2130         else if (vmw_fifo_have_pitchlock(vmw_priv))
2131                 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2132         vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2133         vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2134         if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2135                 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2136
2137         if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2138                 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2139                           depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2140                 return -EINVAL;
2141         }
2142
2143         return 0;
2144 }
2145
2146 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2147                                 uint32_t pitch,
2148                                 uint32_t height)
2149 {
2150         return ((u64) pitch * (u64) height) < (u64)
2151                 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
2152                  dev_priv->max_primary_mem : dev_priv->vram_size);
2153 }
2154
2155 /**
2156  * vmw_du_update_layout - Update the display unit with topology from resolution
2157  * plugin and generate DRM uevent
2158  * @dev_priv: device private
2159  * @num_rects: number of drm_rect in rects
2160  * @rects: toplogy to update
2161  */
2162 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2163                                 unsigned int num_rects, struct drm_rect *rects)
2164 {
2165         struct drm_device *dev = &dev_priv->drm;
2166         struct vmw_display_unit *du;
2167         struct drm_connector *con;
2168         struct drm_connector_list_iter conn_iter;
2169         struct drm_modeset_acquire_ctx ctx;
2170         struct drm_crtc *crtc;
2171         int ret;
2172
2173         /* Currently gui_x/y is protected with the crtc mutex */
2174         mutex_lock(&dev->mode_config.mutex);
2175         drm_modeset_acquire_init(&ctx, 0);
2176 retry:
2177         drm_for_each_crtc(crtc, dev) {
2178                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2179                 if (ret < 0) {
2180                         if (ret == -EDEADLK) {
2181                                 drm_modeset_backoff(&ctx);
2182                                 goto retry;
2183                 }
2184                         goto out_fini;
2185                 }
2186         }
2187
2188         drm_connector_list_iter_begin(dev, &conn_iter);
2189         drm_for_each_connector_iter(con, &conn_iter) {
2190                 du = vmw_connector_to_du(con);
2191                 if (num_rects > du->unit) {
2192                         du->pref_width = drm_rect_width(&rects[du->unit]);
2193                         du->pref_height = drm_rect_height(&rects[du->unit]);
2194                         du->pref_active = true;
2195                         du->gui_x = rects[du->unit].x1;
2196                         du->gui_y = rects[du->unit].y1;
2197                 } else {
2198                         du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2199                         du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2200                         du->pref_active = false;
2201                         du->gui_x = 0;
2202                         du->gui_y = 0;
2203                 }
2204         }
2205         drm_connector_list_iter_end(&conn_iter);
2206
2207         list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2208                 du = vmw_connector_to_du(con);
2209                 if (num_rects > du->unit) {
2210                         drm_object_property_set_value
2211                           (&con->base, dev->mode_config.suggested_x_property,
2212                            du->gui_x);
2213                         drm_object_property_set_value
2214                           (&con->base, dev->mode_config.suggested_y_property,
2215                            du->gui_y);
2216                 } else {
2217                         drm_object_property_set_value
2218                           (&con->base, dev->mode_config.suggested_x_property,
2219                            0);
2220                         drm_object_property_set_value
2221                           (&con->base, dev->mode_config.suggested_y_property,
2222                            0);
2223                 }
2224                 con->status = vmw_du_connector_detect(con, true);
2225         }
2226 out_fini:
2227         drm_modeset_drop_locks(&ctx);
2228         drm_modeset_acquire_fini(&ctx);
2229         mutex_unlock(&dev->mode_config.mutex);
2230
2231         drm_sysfs_hotplug_event(dev);
2232
2233         return 0;
2234 }
2235
2236 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2237                           u16 *r, u16 *g, u16 *b,
2238                           uint32_t size,
2239                           struct drm_modeset_acquire_ctx *ctx)
2240 {
2241         struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2242         int i;
2243
2244         for (i = 0; i < size; i++) {
2245                 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2246                           r[i], g[i], b[i]);
2247                 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2248                 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2249                 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2250         }
2251
2252         return 0;
2253 }
2254
2255 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2256 {
2257         return 0;
2258 }
2259
2260 enum drm_connector_status
2261 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2262 {
2263         uint32_t num_displays;
2264         struct drm_device *dev = connector->dev;
2265         struct vmw_private *dev_priv = vmw_priv(dev);
2266         struct vmw_display_unit *du = vmw_connector_to_du(connector);
2267
2268         num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2269
2270         return ((vmw_connector_to_du(connector)->unit < num_displays &&
2271                  du->pref_active) ?
2272                 connector_status_connected : connector_status_disconnected);
2273 }
2274
2275 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2276         /* 640x480@60Hz */
2277         { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2278                    752, 800, 0, 480, 489, 492, 525, 0,
2279                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2280         /* 800x600@60Hz */
2281         { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2282                    968, 1056, 0, 600, 601, 605, 628, 0,
2283                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2284         /* 1024x768@60Hz */
2285         { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2286                    1184, 1344, 0, 768, 771, 777, 806, 0,
2287                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2288         /* 1152x864@75Hz */
2289         { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2290                    1344, 1600, 0, 864, 865, 868, 900, 0,
2291                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2292         /* 1280x720@60Hz */
2293         { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2294                    1472, 1664, 0, 720, 723, 728, 748, 0,
2295                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2296         /* 1280x768@60Hz */
2297         { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2298                    1472, 1664, 0, 768, 771, 778, 798, 0,
2299                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2300         /* 1280x800@60Hz */
2301         { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2302                    1480, 1680, 0, 800, 803, 809, 831, 0,
2303                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2304         /* 1280x960@60Hz */
2305         { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2306                    1488, 1800, 0, 960, 961, 964, 1000, 0,
2307                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2308         /* 1280x1024@60Hz */
2309         { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2310                    1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2311                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2312         /* 1360x768@60Hz */
2313         { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2314                    1536, 1792, 0, 768, 771, 777, 795, 0,
2315                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2316         /* 1440x1050@60Hz */
2317         { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2318                    1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2319                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2320         /* 1440x900@60Hz */
2321         { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2322                    1672, 1904, 0, 900, 903, 909, 934, 0,
2323                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2324         /* 1600x1200@60Hz */
2325         { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2326                    1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2327                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2328         /* 1680x1050@60Hz */
2329         { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2330                    1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2331                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2332         /* 1792x1344@60Hz */
2333         { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2334                    2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2335                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2336         /* 1853x1392@60Hz */
2337         { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2338                    2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2339                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2340         /* 1920x1080@60Hz */
2341         { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2342                    2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2343                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2344         /* 1920x1200@60Hz */
2345         { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2346                    2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2347                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2348         /* 1920x1440@60Hz */
2349         { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2350                    2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2351                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2352         /* 2560x1440@60Hz */
2353         { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2354                    2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2355                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2356         /* 2560x1600@60Hz */
2357         { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2358                    3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2359                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2360         /* 2880x1800@60Hz */
2361         { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2362                    2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2363                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2364         /* 3840x2160@60Hz */
2365         { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2366                    3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2367                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2368         /* 3840x2400@60Hz */
2369         { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2370                    3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2371                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2372         /* Terminate */
2373         { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2374 };
2375
2376 /**
2377  * vmw_guess_mode_timing - Provide fake timings for a
2378  * 60Hz vrefresh mode.
2379  *
2380  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2381  * members filled in.
2382  */
2383 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2384 {
2385         mode->hsync_start = mode->hdisplay + 50;
2386         mode->hsync_end = mode->hsync_start + 50;
2387         mode->htotal = mode->hsync_end + 50;
2388
2389         mode->vsync_start = mode->vdisplay + 50;
2390         mode->vsync_end = mode->vsync_start + 50;
2391         mode->vtotal = mode->vsync_end + 50;
2392
2393         mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2394 }
2395
2396
2397 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2398                                 uint32_t max_width, uint32_t max_height)
2399 {
2400         struct vmw_display_unit *du = vmw_connector_to_du(connector);
2401         struct drm_device *dev = connector->dev;
2402         struct vmw_private *dev_priv = vmw_priv(dev);
2403         struct drm_display_mode *mode = NULL;
2404         struct drm_display_mode *bmode;
2405         struct drm_display_mode prefmode = { DRM_MODE("preferred",
2406                 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2407                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2408                 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2409         };
2410         int i;
2411         u32 assumed_bpp = 4;
2412
2413         if (dev_priv->assume_16bpp)
2414                 assumed_bpp = 2;
2415
2416         max_width  = min(max_width,  dev_priv->texture_max_width);
2417         max_height = min(max_height, dev_priv->texture_max_height);
2418
2419         /*
2420          * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2421          * HEIGHT registers.
2422          */
2423         if (dev_priv->active_display_unit == vmw_du_screen_target) {
2424                 max_width  = min(max_width,  dev_priv->stdu_max_width);
2425                 max_height = min(max_height, dev_priv->stdu_max_height);
2426         }
2427
2428         /* Add preferred mode */
2429         mode = drm_mode_duplicate(dev, &prefmode);
2430         if (!mode)
2431                 return 0;
2432         mode->hdisplay = du->pref_width;
2433         mode->vdisplay = du->pref_height;
2434         vmw_guess_mode_timing(mode);
2435         drm_mode_set_name(mode);
2436
2437         if (vmw_kms_validate_mode_vram(dev_priv,
2438                                         mode->hdisplay * assumed_bpp,
2439                                         mode->vdisplay)) {
2440                 drm_mode_probed_add(connector, mode);
2441         } else {
2442                 drm_mode_destroy(dev, mode);
2443                 mode = NULL;
2444         }
2445
2446         if (du->pref_mode) {
2447                 list_del_init(&du->pref_mode->head);
2448                 drm_mode_destroy(dev, du->pref_mode);
2449         }
2450
2451         /* mode might be null here, this is intended */
2452         du->pref_mode = mode;
2453
2454         for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2455                 bmode = &vmw_kms_connector_builtin[i];
2456                 if (bmode->hdisplay > max_width ||
2457                     bmode->vdisplay > max_height)
2458                         continue;
2459
2460                 if (!vmw_kms_validate_mode_vram(dev_priv,
2461                                                 bmode->hdisplay * assumed_bpp,
2462                                                 bmode->vdisplay))
2463                         continue;
2464
2465                 mode = drm_mode_duplicate(dev, bmode);
2466                 if (!mode)
2467                         return 0;
2468
2469                 drm_mode_probed_add(connector, mode);
2470         }
2471
2472         drm_connector_list_update(connector);
2473         /* Move the prefered mode first, help apps pick the right mode. */
2474         drm_mode_sort(&connector->modes);
2475
2476         return 1;
2477 }
2478
2479 /**
2480  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2481  * @dev: drm device for the ioctl
2482  * @data: data pointer for the ioctl
2483  * @file_priv: drm file for the ioctl call
2484  *
2485  * Update preferred topology of display unit as per ioctl request. The topology
2486  * is expressed as array of drm_vmw_rect.
2487  * e.g.
2488  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2489  *
2490  * NOTE:
2491  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2492  * device limit on topology, x + w and y + h (lower right) cannot be greater
2493  * than INT_MAX. So topology beyond these limits will return with error.
2494  *
2495  * Returns:
2496  * Zero on success, negative errno on failure.
2497  */
2498 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2499                                 struct drm_file *file_priv)
2500 {
2501         struct vmw_private *dev_priv = vmw_priv(dev);
2502         struct drm_mode_config *mode_config = &dev->mode_config;
2503         struct drm_vmw_update_layout_arg *arg =
2504                 (struct drm_vmw_update_layout_arg *)data;
2505         void __user *user_rects;
2506         struct drm_vmw_rect *rects;
2507         struct drm_rect *drm_rects;
2508         unsigned rects_size;
2509         int ret, i;
2510
2511         if (!arg->num_outputs) {
2512                 struct drm_rect def_rect = {0, 0,
2513                                             VMWGFX_MIN_INITIAL_WIDTH,
2514                                             VMWGFX_MIN_INITIAL_HEIGHT};
2515                 vmw_du_update_layout(dev_priv, 1, &def_rect);
2516                 return 0;
2517         }
2518
2519         rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2520         rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2521                         GFP_KERNEL);
2522         if (unlikely(!rects))
2523                 return -ENOMEM;
2524
2525         user_rects = (void __user *)(unsigned long)arg->rects;
2526         ret = copy_from_user(rects, user_rects, rects_size);
2527         if (unlikely(ret != 0)) {
2528                 DRM_ERROR("Failed to get rects.\n");
2529                 ret = -EFAULT;
2530                 goto out_free;
2531         }
2532
2533         drm_rects = (struct drm_rect *)rects;
2534
2535         VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2536         for (i = 0; i < arg->num_outputs; i++) {
2537                 struct drm_vmw_rect curr_rect;
2538
2539                 /* Verify user-space for overflow as kernel use drm_rect */
2540                 if ((rects[i].x + rects[i].w > INT_MAX) ||
2541                     (rects[i].y + rects[i].h > INT_MAX)) {
2542                         ret = -ERANGE;
2543                         goto out_free;
2544                 }
2545
2546                 curr_rect = rects[i];
2547                 drm_rects[i].x1 = curr_rect.x;
2548                 drm_rects[i].y1 = curr_rect.y;
2549                 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2550                 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2551
2552                 VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2553                               drm_rects[i].x1, drm_rects[i].y1,
2554                               drm_rects[i].x2, drm_rects[i].y2);
2555
2556                 /*
2557                  * Currently this check is limiting the topology within
2558                  * mode_config->max (which actually is max texture size
2559                  * supported by virtual device). This limit is here to address
2560                  * window managers that create a big framebuffer for whole
2561                  * topology.
2562                  */
2563                 if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2564                     drm_rects[i].x2 > mode_config->max_width ||
2565                     drm_rects[i].y2 > mode_config->max_height) {
2566                         VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2567                                       drm_rects[i].x1, drm_rects[i].y1,
2568                                       drm_rects[i].x2, drm_rects[i].y2);
2569                         ret = -EINVAL;
2570                         goto out_free;
2571                 }
2572         }
2573
2574         ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2575
2576         if (ret == 0)
2577                 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2578
2579 out_free:
2580         kfree(rects);
2581         return ret;
2582 }
2583
2584 /**
2585  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2586  * on a set of cliprects and a set of display units.
2587  *
2588  * @dev_priv: Pointer to a device private structure.
2589  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2590  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2591  * Cliprects are given in framebuffer coordinates.
2592  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2593  * be NULL. Cliprects are given in source coordinates.
2594  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2595  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2596  * @num_clips: Number of cliprects in the @clips or @vclips array.
2597  * @increment: Integer with which to increment the clip counter when looping.
2598  * Used to skip a predetermined number of clip rects.
2599  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2600  */
2601 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2602                          struct vmw_framebuffer *framebuffer,
2603                          const struct drm_clip_rect *clips,
2604                          const struct drm_vmw_rect *vclips,
2605                          s32 dest_x, s32 dest_y,
2606                          int num_clips,
2607                          int increment,
2608                          struct vmw_kms_dirty *dirty)
2609 {
2610         struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2611         struct drm_crtc *crtc;
2612         u32 num_units = 0;
2613         u32 i, k;
2614
2615         dirty->dev_priv = dev_priv;
2616
2617         /* If crtc is passed, no need to iterate over other display units */
2618         if (dirty->crtc) {
2619                 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2620         } else {
2621                 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2622                                     head) {
2623                         struct drm_plane *plane = crtc->primary;
2624
2625                         if (plane->state->fb == &framebuffer->base)
2626                                 units[num_units++] = vmw_crtc_to_du(crtc);
2627                 }
2628         }
2629
2630         for (k = 0; k < num_units; k++) {
2631                 struct vmw_display_unit *unit = units[k];
2632                 s32 crtc_x = unit->crtc.x;
2633                 s32 crtc_y = unit->crtc.y;
2634                 s32 crtc_width = unit->crtc.mode.hdisplay;
2635                 s32 crtc_height = unit->crtc.mode.vdisplay;
2636                 const struct drm_clip_rect *clips_ptr = clips;
2637                 const struct drm_vmw_rect *vclips_ptr = vclips;
2638
2639                 dirty->unit = unit;
2640                 if (dirty->fifo_reserve_size > 0) {
2641                         dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2642                                                       dirty->fifo_reserve_size);
2643                         if (!dirty->cmd)
2644                                 return -ENOMEM;
2645
2646                         memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2647                 }
2648                 dirty->num_hits = 0;
2649                 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2650                        vclips_ptr += increment) {
2651                         s32 clip_left;
2652                         s32 clip_top;
2653
2654                         /*
2655                          * Select clip array type. Note that integer type
2656                          * in @clips is unsigned short, whereas in @vclips
2657                          * it's 32-bit.
2658                          */
2659                         if (clips) {
2660                                 dirty->fb_x = (s32) clips_ptr->x1;
2661                                 dirty->fb_y = (s32) clips_ptr->y1;
2662                                 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2663                                         crtc_x;
2664                                 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2665                                         crtc_y;
2666                         } else {
2667                                 dirty->fb_x = vclips_ptr->x;
2668                                 dirty->fb_y = vclips_ptr->y;
2669                                 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2670                                         dest_x - crtc_x;
2671                                 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2672                                         dest_y - crtc_y;
2673                         }
2674
2675                         dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2676                         dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2677
2678                         /* Skip this clip if it's outside the crtc region */
2679                         if (dirty->unit_x1 >= crtc_width ||
2680                             dirty->unit_y1 >= crtc_height ||
2681                             dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2682                                 continue;
2683
2684                         /* Clip right and bottom to crtc limits */
2685                         dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2686                                                crtc_width);
2687                         dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2688                                                crtc_height);
2689
2690                         /* Clip left and top to crtc limits */
2691                         clip_left = min_t(s32, dirty->unit_x1, 0);
2692                         clip_top = min_t(s32, dirty->unit_y1, 0);
2693                         dirty->unit_x1 -= clip_left;
2694                         dirty->unit_y1 -= clip_top;
2695                         dirty->fb_x -= clip_left;
2696                         dirty->fb_y -= clip_top;
2697
2698                         dirty->clip(dirty);
2699                 }
2700
2701                 dirty->fifo_commit(dirty);
2702         }
2703
2704         return 0;
2705 }
2706
2707 /**
2708  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2709  * cleanup and fencing
2710  * @dev_priv: Pointer to the device-private struct
2711  * @file_priv: Pointer identifying the client when user-space fencing is used
2712  * @ctx: Pointer to the validation context
2713  * @out_fence: If non-NULL, returned refcounted fence-pointer
2714  * @user_fence_rep: If non-NULL, pointer to user-space address area
2715  * in which to copy user-space fence info
2716  */
2717 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2718                                       struct drm_file *file_priv,
2719                                       struct vmw_validation_context *ctx,
2720                                       struct vmw_fence_obj **out_fence,
2721                                       struct drm_vmw_fence_rep __user *
2722                                       user_fence_rep)
2723 {
2724         struct vmw_fence_obj *fence = NULL;
2725         uint32_t handle = 0;
2726         int ret = 0;
2727
2728         if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2729             out_fence)
2730                 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2731                                                  file_priv ? &handle : NULL);
2732         vmw_validation_done(ctx, fence);
2733         if (file_priv)
2734                 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2735                                             ret, user_fence_rep, fence,
2736                                             handle, -1);
2737         if (out_fence)
2738                 *out_fence = fence;
2739         else
2740                 vmw_fence_obj_unreference(&fence);
2741 }
2742
2743 /**
2744  * vmw_kms_update_proxy - Helper function to update a proxy surface from
2745  * its backing MOB.
2746  *
2747  * @res: Pointer to the surface resource
2748  * @clips: Clip rects in framebuffer (surface) space.
2749  * @num_clips: Number of clips in @clips.
2750  * @increment: Integer with which to increment the clip counter when looping.
2751  * Used to skip a predetermined number of clip rects.
2752  *
2753  * This function makes sure the proxy surface is updated from its backing MOB
2754  * using the region given by @clips. The surface resource @res and its backing
2755  * MOB needs to be reserved and validated on call.
2756  */
2757 int vmw_kms_update_proxy(struct vmw_resource *res,
2758                          const struct drm_clip_rect *clips,
2759                          unsigned num_clips,
2760                          int increment)
2761 {
2762         struct vmw_private *dev_priv = res->dev_priv;
2763         struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2764         struct {
2765                 SVGA3dCmdHeader header;
2766                 SVGA3dCmdUpdateGBImage body;
2767         } *cmd;
2768         SVGA3dBox *box;
2769         size_t copy_size = 0;
2770         int i;
2771
2772         if (!clips)
2773                 return 0;
2774
2775         cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2776         if (!cmd)
2777                 return -ENOMEM;
2778
2779         for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2780                 box = &cmd->body.box;
2781
2782                 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2783                 cmd->header.size = sizeof(cmd->body);
2784                 cmd->body.image.sid = res->id;
2785                 cmd->body.image.face = 0;
2786                 cmd->body.image.mipmap = 0;
2787
2788                 if (clips->x1 > size->width || clips->x2 > size->width ||
2789                     clips->y1 > size->height || clips->y2 > size->height) {
2790                         DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2791                         return -EINVAL;
2792                 }
2793
2794                 box->x = clips->x1;
2795                 box->y = clips->y1;
2796                 box->z = 0;
2797                 box->w = clips->x2 - clips->x1;
2798                 box->h = clips->y2 - clips->y1;
2799                 box->d = 1;
2800
2801                 copy_size += sizeof(*cmd);
2802         }
2803
2804         vmw_cmd_commit(dev_priv, copy_size);
2805
2806         return 0;
2807 }
2808
2809 /**
2810  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2811  * property.
2812  *
2813  * @dev_priv: Pointer to a device private struct.
2814  *
2815  * Sets up the implicit placement property unless it's already set up.
2816  */
2817 void
2818 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2819 {
2820         if (dev_priv->implicit_placement_property)
2821                 return;
2822
2823         dev_priv->implicit_placement_property =
2824                 drm_property_create_range(&dev_priv->drm,
2825                                           DRM_MODE_PROP_IMMUTABLE,
2826                                           "implicit_placement", 0, 1);
2827 }
2828
2829 /**
2830  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2831  *
2832  * @dev: Pointer to the drm device
2833  * Return: 0 on success. Negative error code on failure.
2834  */
2835 int vmw_kms_suspend(struct drm_device *dev)
2836 {
2837         struct vmw_private *dev_priv = vmw_priv(dev);
2838
2839         dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2840         if (IS_ERR(dev_priv->suspend_state)) {
2841                 int ret = PTR_ERR(dev_priv->suspend_state);
2842
2843                 DRM_ERROR("Failed kms suspend: %d\n", ret);
2844                 dev_priv->suspend_state = NULL;
2845
2846                 return ret;
2847         }
2848
2849         return 0;
2850 }
2851
2852
2853 /**
2854  * vmw_kms_resume - Re-enable modesetting and restore state
2855  *
2856  * @dev: Pointer to the drm device
2857  * Return: 0 on success. Negative error code on failure.
2858  *
2859  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2860  * to call this function without a previous vmw_kms_suspend().
2861  */
2862 int vmw_kms_resume(struct drm_device *dev)
2863 {
2864         struct vmw_private *dev_priv = vmw_priv(dev);
2865         int ret;
2866
2867         if (WARN_ON(!dev_priv->suspend_state))
2868                 return 0;
2869
2870         ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2871         dev_priv->suspend_state = NULL;
2872
2873         return ret;
2874 }
2875
2876 /**
2877  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2878  *
2879  * @dev: Pointer to the drm device
2880  */
2881 void vmw_kms_lost_device(struct drm_device *dev)
2882 {
2883         drm_atomic_helper_shutdown(dev);
2884 }
2885
2886 /**
2887  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2888  * @update: The closure structure.
2889  *
2890  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2891  * update on display unit.
2892  *
2893  * Return: 0 on success or a negative error code on failure.
2894  */
2895 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2896 {
2897         struct drm_plane_state *state = update->plane->state;
2898         struct drm_plane_state *old_state = update->old_state;
2899         struct drm_atomic_helper_damage_iter iter;
2900         struct drm_rect clip;
2901         struct drm_rect bb;
2902         DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2903         uint32_t reserved_size = 0;
2904         uint32_t submit_size = 0;
2905         uint32_t curr_size = 0;
2906         uint32_t num_hits = 0;
2907         void *cmd_start;
2908         char *cmd_next;
2909         int ret;
2910
2911         /*
2912          * Iterate in advance to check if really need plane update and find the
2913          * number of clips that actually are in plane src for fifo allocation.
2914          */
2915         drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2916         drm_atomic_for_each_plane_damage(&iter, &clip)
2917                 num_hits++;
2918
2919         if (num_hits == 0)
2920                 return 0;
2921
2922         if (update->vfb->bo) {
2923                 struct vmw_framebuffer_bo *vfbbo =
2924                         container_of(update->vfb, typeof(*vfbbo), base);
2925
2926                 /*
2927                  * For screen targets we want a mappable bo, for everything else we want
2928                  * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2929                  * is not screen target then mob's shouldn't be available.
2930                  */
2931                 if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2932                         vmw_bo_placement_set(vfbbo->buffer,
2933                                              VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2934                                              VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2935                 } else {
2936                         WARN_ON(update->dev_priv->has_mob);
2937                         vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2938                 }
2939                 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2940         } else {
2941                 struct vmw_framebuffer_surface *vfbs =
2942                         container_of(update->vfb, typeof(*vfbs), base);
2943
2944                 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2945                                                   0, VMW_RES_DIRTY_NONE, NULL,
2946                                                   NULL);
2947         }
2948
2949         if (ret)
2950                 return ret;
2951
2952         ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2953         if (ret)
2954                 goto out_unref;
2955
2956         reserved_size = update->calc_fifo_size(update, num_hits);
2957         cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2958         if (!cmd_start) {
2959                 ret = -ENOMEM;
2960                 goto out_revert;
2961         }
2962
2963         cmd_next = cmd_start;
2964
2965         if (update->post_prepare) {
2966                 curr_size = update->post_prepare(update, cmd_next);
2967                 cmd_next += curr_size;
2968                 submit_size += curr_size;
2969         }
2970
2971         if (update->pre_clip) {
2972                 curr_size = update->pre_clip(update, cmd_next, num_hits);
2973                 cmd_next += curr_size;
2974                 submit_size += curr_size;
2975         }
2976
2977         bb.x1 = INT_MAX;
2978         bb.y1 = INT_MAX;
2979         bb.x2 = INT_MIN;
2980         bb.y2 = INT_MIN;
2981
2982         drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2983         drm_atomic_for_each_plane_damage(&iter, &clip) {
2984                 uint32_t fb_x = clip.x1;
2985                 uint32_t fb_y = clip.y1;
2986
2987                 vmw_du_translate_to_crtc(state, &clip);
2988                 if (update->clip) {
2989                         curr_size = update->clip(update, cmd_next, &clip, fb_x,
2990                                                  fb_y);
2991                         cmd_next += curr_size;
2992                         submit_size += curr_size;
2993                 }
2994                 bb.x1 = min_t(int, bb.x1, clip.x1);
2995                 bb.y1 = min_t(int, bb.y1, clip.y1);
2996                 bb.x2 = max_t(int, bb.x2, clip.x2);
2997                 bb.y2 = max_t(int, bb.y2, clip.y2);
2998         }
2999
3000         curr_size = update->post_clip(update, cmd_next, &bb);
3001         submit_size += curr_size;
3002
3003         if (reserved_size < submit_size)
3004                 submit_size = 0;
3005
3006         vmw_cmd_commit(update->dev_priv, submit_size);
3007
3008         vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3009                                          update->out_fence, NULL);
3010         return ret;
3011
3012 out_revert:
3013         vmw_validation_revert(&val_ctx);
3014
3015 out_unref:
3016         vmw_validation_unref_lists(&val_ctx);
3017         return ret;
3018 }