1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/module.h>
28 #include <linux/console.h>
31 #include "vmwgfx_drv.h"
32 #include "vmwgfx_binding.h"
33 #include "ttm_object.h"
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_bo_driver.h>
36 #include <drm/ttm/ttm_module.h>
37 #include <linux/intel-iommu.h>
39 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
40 #define VMWGFX_CHIP_SVGAII 0
41 #define VMW_FB_RESERVATION 0
43 #define VMW_MIN_INITIAL_WIDTH 800
44 #define VMW_MIN_INITIAL_HEIGHT 600
46 #ifndef VMWGFX_GIT_VERSION
47 #define VMWGFX_GIT_VERSION "Unknown"
50 #define VMWGFX_REPO "In Tree"
52 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
56 * Fully encoded drm commands. Might move to vmw_drm.h
59 #define DRM_IOCTL_VMW_GET_PARAM \
60 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
61 struct drm_vmw_getparam_arg)
62 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
63 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
64 union drm_vmw_alloc_dmabuf_arg)
65 #define DRM_IOCTL_VMW_UNREF_DMABUF \
66 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
67 struct drm_vmw_unref_dmabuf_arg)
68 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
69 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
70 struct drm_vmw_cursor_bypass_arg)
72 #define DRM_IOCTL_VMW_CONTROL_STREAM \
73 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
74 struct drm_vmw_control_stream_arg)
75 #define DRM_IOCTL_VMW_CLAIM_STREAM \
76 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
77 struct drm_vmw_stream_arg)
78 #define DRM_IOCTL_VMW_UNREF_STREAM \
79 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
80 struct drm_vmw_stream_arg)
82 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
83 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
84 struct drm_vmw_context_arg)
85 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
86 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
87 struct drm_vmw_context_arg)
88 #define DRM_IOCTL_VMW_CREATE_SURFACE \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
90 union drm_vmw_surface_create_arg)
91 #define DRM_IOCTL_VMW_UNREF_SURFACE \
92 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
93 struct drm_vmw_surface_arg)
94 #define DRM_IOCTL_VMW_REF_SURFACE \
95 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
96 union drm_vmw_surface_reference_arg)
97 #define DRM_IOCTL_VMW_EXECBUF \
98 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
99 struct drm_vmw_execbuf_arg)
100 #define DRM_IOCTL_VMW_GET_3D_CAP \
101 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
102 struct drm_vmw_get_3d_cap_arg)
103 #define DRM_IOCTL_VMW_FENCE_WAIT \
104 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
105 struct drm_vmw_fence_wait_arg)
106 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
107 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
108 struct drm_vmw_fence_signaled_arg)
109 #define DRM_IOCTL_VMW_FENCE_UNREF \
110 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
111 struct drm_vmw_fence_arg)
112 #define DRM_IOCTL_VMW_FENCE_EVENT \
113 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
114 struct drm_vmw_fence_event_arg)
115 #define DRM_IOCTL_VMW_PRESENT \
116 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
117 struct drm_vmw_present_arg)
118 #define DRM_IOCTL_VMW_PRESENT_READBACK \
119 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
120 struct drm_vmw_present_readback_arg)
121 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
122 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
123 struct drm_vmw_update_layout_arg)
124 #define DRM_IOCTL_VMW_CREATE_SHADER \
125 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
126 struct drm_vmw_shader_create_arg)
127 #define DRM_IOCTL_VMW_UNREF_SHADER \
128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
129 struct drm_vmw_shader_arg)
130 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
131 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
132 union drm_vmw_gb_surface_create_arg)
133 #define DRM_IOCTL_VMW_GB_SURFACE_REF \
134 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
135 union drm_vmw_gb_surface_reference_arg)
136 #define DRM_IOCTL_VMW_SYNCCPU \
137 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
138 struct drm_vmw_synccpu_arg)
139 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
140 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
141 struct drm_vmw_context_arg)
142 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
143 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
144 union drm_vmw_gb_surface_create_ext_arg)
145 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
146 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
147 union drm_vmw_gb_surface_reference_ext_arg)
150 * The core DRM version of this macro doesn't account for
154 #define VMW_IOCTL_DEF(ioctl, func, flags) \
155 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
161 static const struct drm_ioctl_desc vmw_ioctls[] = {
162 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
163 DRM_AUTH | DRM_RENDER_ALLOW),
164 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
165 DRM_AUTH | DRM_RENDER_ALLOW),
166 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
168 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
169 vmw_kms_cursor_bypass_ioctl,
172 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
174 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
176 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
179 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
180 DRM_AUTH | DRM_RENDER_ALLOW),
181 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
183 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
184 DRM_AUTH | DRM_RENDER_ALLOW),
185 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
187 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
188 DRM_AUTH | DRM_RENDER_ALLOW),
189 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
191 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
193 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
194 vmw_fence_obj_signaled_ioctl,
196 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
198 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
199 DRM_AUTH | DRM_RENDER_ALLOW),
200 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
201 DRM_AUTH | DRM_RENDER_ALLOW),
203 /* these allow direct access to the framebuffers mark as master only */
204 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
205 DRM_MASTER | DRM_AUTH),
206 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
207 vmw_present_readback_ioctl,
208 DRM_MASTER | DRM_AUTH),
210 * The permissions of the below ioctl are overridden in
211 * vmw_generic_ioctl(). We require either
212 * DRM_MASTER or capable(CAP_SYS_ADMIN).
214 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
215 vmw_kms_update_layout_ioctl,
217 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
218 vmw_shader_define_ioctl,
219 DRM_AUTH | DRM_RENDER_ALLOW),
220 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
221 vmw_shader_destroy_ioctl,
223 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
224 vmw_gb_surface_define_ioctl,
225 DRM_AUTH | DRM_RENDER_ALLOW),
226 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
227 vmw_gb_surface_reference_ioctl,
228 DRM_AUTH | DRM_RENDER_ALLOW),
229 VMW_IOCTL_DEF(VMW_SYNCCPU,
230 vmw_user_bo_synccpu_ioctl,
232 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
233 vmw_extended_context_define_ioctl,
234 DRM_AUTH | DRM_RENDER_ALLOW),
235 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
236 vmw_gb_surface_define_ext_ioctl,
237 DRM_AUTH | DRM_RENDER_ALLOW),
238 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
239 vmw_gb_surface_reference_ext_ioctl,
240 DRM_AUTH | DRM_RENDER_ALLOW),
243 static const struct pci_device_id vmw_pci_id_list[] = {
244 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
247 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
249 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
250 static int vmw_force_iommu;
251 static int vmw_restrict_iommu;
252 static int vmw_force_coherent;
253 static int vmw_restrict_dma_mask;
254 static int vmw_assume_16bpp;
256 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
257 static void vmw_master_init(struct vmw_master *);
258 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
261 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
262 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
263 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
264 module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
265 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
266 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
267 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
268 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
269 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
270 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
271 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
272 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
275 static void vmw_print_capabilities2(uint32_t capabilities2)
277 DRM_INFO("Capabilities2:\n");
278 if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
279 DRM_INFO(" Grow oTable.\n");
280 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
281 DRM_INFO(" IntraSurface copy.\n");
284 static void vmw_print_capabilities(uint32_t capabilities)
286 DRM_INFO("Capabilities:\n");
287 if (capabilities & SVGA_CAP_RECT_COPY)
288 DRM_INFO(" Rect copy.\n");
289 if (capabilities & SVGA_CAP_CURSOR)
290 DRM_INFO(" Cursor.\n");
291 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
292 DRM_INFO(" Cursor bypass.\n");
293 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
294 DRM_INFO(" Cursor bypass 2.\n");
295 if (capabilities & SVGA_CAP_8BIT_EMULATION)
296 DRM_INFO(" 8bit emulation.\n");
297 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
298 DRM_INFO(" Alpha cursor.\n");
299 if (capabilities & SVGA_CAP_3D)
301 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
302 DRM_INFO(" Extended Fifo.\n");
303 if (capabilities & SVGA_CAP_MULTIMON)
304 DRM_INFO(" Multimon.\n");
305 if (capabilities & SVGA_CAP_PITCHLOCK)
306 DRM_INFO(" Pitchlock.\n");
307 if (capabilities & SVGA_CAP_IRQMASK)
308 DRM_INFO(" Irq mask.\n");
309 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
310 DRM_INFO(" Display Topology.\n");
311 if (capabilities & SVGA_CAP_GMR)
313 if (capabilities & SVGA_CAP_TRACES)
314 DRM_INFO(" Traces.\n");
315 if (capabilities & SVGA_CAP_GMR2)
316 DRM_INFO(" GMR2.\n");
317 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
318 DRM_INFO(" Screen Object 2.\n");
319 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
320 DRM_INFO(" Command Buffers.\n");
321 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
322 DRM_INFO(" Command Buffers 2.\n");
323 if (capabilities & SVGA_CAP_GBOBJECTS)
324 DRM_INFO(" Guest Backed Resources.\n");
325 if (capabilities & SVGA_CAP_DX)
326 DRM_INFO(" DX Features.\n");
327 if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
328 DRM_INFO(" HP Command Queue.\n");
332 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
334 * @dev_priv: A device private structure.
336 * This function creates a small buffer object that holds the query
337 * result for dummy queries emitted as query barriers.
338 * The function will then map the first page and initialize a pending
339 * occlusion query result structure, Finally it will unmap the buffer.
340 * No interruptible waits are done within this function.
342 * Returns an error if bo creation or initialization fails.
344 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
347 struct vmw_buffer_object *vbo;
348 struct ttm_bo_kmap_obj map;
349 volatile SVGA3dQueryResult *result;
353 * Create the vbo as pinned, so that a tryreserve will
354 * immediately succeed. This is because we're the only
355 * user of the bo currently.
357 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
361 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
362 &vmw_sys_ne_placement, false,
364 if (unlikely(ret != 0))
367 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
369 vmw_bo_pin_reserved(vbo, true);
371 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
372 if (likely(ret == 0)) {
373 result = ttm_kmap_obj_virtual(&map, &dummy);
374 result->totalSize = sizeof(*result);
375 result->state = SVGA3D_QUERYSTATE_PENDING;
376 result->result32 = 0xff;
379 vmw_bo_pin_reserved(vbo, false);
380 ttm_bo_unreserve(&vbo->base);
382 if (unlikely(ret != 0)) {
383 DRM_ERROR("Dummy query buffer map failed.\n");
384 vmw_bo_unreference(&vbo);
386 dev_priv->dummy_query_bo = vbo;
392 * vmw_request_device_late - Perform late device setup
394 * @dev_priv: Pointer to device private.
396 * This function performs setup of otables and enables large command
397 * buffer submission. These tasks are split out to a separate function
398 * because it reverts vmw_release_device_early and is intended to be used
399 * by an error path in the hibernation code.
401 static int vmw_request_device_late(struct vmw_private *dev_priv)
405 if (dev_priv->has_mob) {
406 ret = vmw_otables_setup(dev_priv);
407 if (unlikely(ret != 0)) {
408 DRM_ERROR("Unable to initialize "
409 "guest Memory OBjects.\n");
414 if (dev_priv->cman) {
415 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
418 struct vmw_cmdbuf_man *man = dev_priv->cman;
420 dev_priv->cman = NULL;
421 vmw_cmdbuf_man_destroy(man);
428 static int vmw_request_device(struct vmw_private *dev_priv)
432 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
433 if (unlikely(ret != 0)) {
434 DRM_ERROR("Unable to initialize FIFO.\n");
437 vmw_fence_fifo_up(dev_priv->fman);
438 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
439 if (IS_ERR(dev_priv->cman)) {
440 dev_priv->cman = NULL;
441 dev_priv->has_dx = false;
444 ret = vmw_request_device_late(dev_priv);
448 ret = vmw_dummy_query_bo_create(dev_priv);
449 if (unlikely(ret != 0))
450 goto out_no_query_bo;
456 vmw_cmdbuf_remove_pool(dev_priv->cman);
457 if (dev_priv->has_mob) {
458 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
459 vmw_otables_takedown(dev_priv);
462 vmw_cmdbuf_man_destroy(dev_priv->cman);
464 vmw_fence_fifo_down(dev_priv->fman);
465 vmw_fifo_release(dev_priv, &dev_priv->fifo);
470 * vmw_release_device_early - Early part of fifo takedown.
472 * @dev_priv: Pointer to device private struct.
474 * This is the first part of command submission takedown, to be called before
475 * buffer management is taken down.
477 static void vmw_release_device_early(struct vmw_private *dev_priv)
480 * Previous destructions should've released
484 BUG_ON(dev_priv->pinned_bo != NULL);
486 vmw_bo_unreference(&dev_priv->dummy_query_bo);
488 vmw_cmdbuf_remove_pool(dev_priv->cman);
490 if (dev_priv->has_mob) {
491 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
492 vmw_otables_takedown(dev_priv);
497 * vmw_release_device_late - Late part of fifo takedown.
499 * @dev_priv: Pointer to device private struct.
501 * This is the last part of the command submission takedown, to be called when
502 * command submission is no longer needed. It may wait on pending fences.
504 static void vmw_release_device_late(struct vmw_private *dev_priv)
506 vmw_fence_fifo_down(dev_priv->fman);
508 vmw_cmdbuf_man_destroy(dev_priv->cman);
510 vmw_fifo_release(dev_priv, &dev_priv->fifo);
514 * Sets the initial_[width|height] fields on the given vmw_private.
516 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
517 * clamping the value to fb_max_[width|height] fields and the
518 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
519 * If the values appear to be invalid, set them to
520 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
522 static void vmw_get_initial_size(struct vmw_private *dev_priv)
527 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
528 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
530 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
531 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
533 if (width > dev_priv->fb_max_width ||
534 height > dev_priv->fb_max_height) {
537 * This is a host error and shouldn't occur.
540 width = VMW_MIN_INITIAL_WIDTH;
541 height = VMW_MIN_INITIAL_HEIGHT;
544 dev_priv->initial_width = width;
545 dev_priv->initial_height = height;
549 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
552 * @dev_priv: Pointer to a struct vmw_private
554 * This functions tries to determine the IOMMU setup and what actions
555 * need to be taken by the driver to make system pages visible to the
557 * If this function decides that DMA is not possible, it returns -EINVAL.
558 * The driver may then try to disable features of the device that require
561 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
563 static const char *names[vmw_dma_map_max] = {
564 [vmw_dma_phys] = "Using physical TTM page addresses.",
565 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
566 [vmw_dma_map_populate] = "Keeping DMA mappings.",
567 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
569 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
571 #ifdef CONFIG_INTEL_IOMMU
572 if (intel_iommu_enabled) {
573 dev_priv->map_mode = vmw_dma_map_populate;
578 if (!(vmw_force_iommu || vmw_force_coherent)) {
579 dev_priv->map_mode = vmw_dma_phys;
580 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
584 dev_priv->map_mode = vmw_dma_map_populate;
586 if (dma_ops && dma_ops->sync_single_for_cpu)
587 dev_priv->map_mode = vmw_dma_alloc_coherent;
588 #ifdef CONFIG_SWIOTLB
589 if (swiotlb_nr_tbl() == 0)
590 dev_priv->map_mode = vmw_dma_map_populate;
593 #ifdef CONFIG_INTEL_IOMMU
596 if (dev_priv->map_mode == vmw_dma_map_populate &&
598 dev_priv->map_mode = vmw_dma_map_bind;
600 if (vmw_force_coherent)
601 dev_priv->map_mode = vmw_dma_alloc_coherent;
603 #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
605 * No coherent page pool
607 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
611 #else /* CONFIG_X86 */
612 dev_priv->map_mode = vmw_dma_map_populate;
613 #endif /* CONFIG_X86 */
615 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
621 * vmw_dma_masks - set required page- and dma masks
623 * @dev: Pointer to struct drm-device
625 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
626 * restriction also for 64-bit systems.
628 #ifdef CONFIG_INTEL_IOMMU
629 static int vmw_dma_masks(struct vmw_private *dev_priv)
631 struct drm_device *dev = dev_priv->dev;
633 if (intel_iommu_enabled &&
634 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
635 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
636 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
641 static int vmw_dma_masks(struct vmw_private *dev_priv)
647 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
649 struct vmw_private *dev_priv;
653 bool refuse_dma = false;
654 char host_log[100] = {0};
656 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
657 if (unlikely(!dev_priv)) {
658 DRM_ERROR("Failed allocating a device private struct.\n");
662 pci_set_master(dev->pdev);
665 dev_priv->vmw_chipset = chipset;
666 dev_priv->last_read_seqno = (uint32_t) -100;
667 mutex_init(&dev_priv->cmdbuf_mutex);
668 mutex_init(&dev_priv->release_mutex);
669 mutex_init(&dev_priv->binding_mutex);
670 mutex_init(&dev_priv->global_kms_state_mutex);
671 ttm_lock_init(&dev_priv->reservation_sem);
672 spin_lock_init(&dev_priv->resource_lock);
673 spin_lock_init(&dev_priv->hw_lock);
674 spin_lock_init(&dev_priv->waiter_lock);
675 spin_lock_init(&dev_priv->cap_lock);
676 spin_lock_init(&dev_priv->svga_lock);
677 spin_lock_init(&dev_priv->cursor_lock);
679 for (i = vmw_res_context; i < vmw_res_max; ++i) {
680 idr_init(&dev_priv->res_idr[i]);
681 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
684 mutex_init(&dev_priv->init_mutex);
685 init_waitqueue_head(&dev_priv->fence_queue);
686 init_waitqueue_head(&dev_priv->fifo_queue);
687 dev_priv->fence_queue_waiters = 0;
688 dev_priv->fifo_queue_waiters = 0;
690 dev_priv->used_memory_size = 0;
692 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
693 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
694 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
696 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
698 dev_priv->enable_fb = enable_fbdev;
700 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
701 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
702 if (svga_id != SVGA_ID_2) {
704 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
708 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
710 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
711 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
715 ret = vmw_dma_select_mode(dev_priv);
716 if (unlikely(ret != 0)) {
717 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
721 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
722 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
723 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
724 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
726 vmw_get_initial_size(dev_priv);
728 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
729 dev_priv->max_gmr_ids =
730 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
731 dev_priv->max_gmr_pages =
732 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
733 dev_priv->memory_size =
734 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
735 dev_priv->memory_size -= dev_priv->vram_size;
738 * An arbitrary limit of 512MiB on surface
739 * memory. But all HWV8 hardware supports GMR2.
741 dev_priv->memory_size = 512*1024*1024;
743 dev_priv->max_mob_pages = 0;
744 dev_priv->max_mob_size = 0;
745 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
748 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
751 * Workaround for low memory 2D VMs to compensate for the
752 * allocation taken by fbdev
754 if (!(dev_priv->capabilities & SVGA_CAP_3D))
757 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
758 dev_priv->prim_bb_mem =
760 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
761 dev_priv->max_mob_size =
762 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
763 dev_priv->stdu_max_width =
764 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
765 dev_priv->stdu_max_height =
766 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
768 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
769 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
770 dev_priv->texture_max_width = vmw_read(dev_priv,
772 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
773 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
774 dev_priv->texture_max_height = vmw_read(dev_priv,
777 dev_priv->texture_max_width = 8192;
778 dev_priv->texture_max_height = 8192;
779 dev_priv->prim_bb_mem = dev_priv->vram_size;
782 vmw_print_capabilities(dev_priv->capabilities);
783 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
784 vmw_print_capabilities2(dev_priv->capabilities2);
786 ret = vmw_dma_masks(dev_priv);
787 if (unlikely(ret != 0))
790 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
791 DRM_INFO("Max GMR ids is %u\n",
792 (unsigned)dev_priv->max_gmr_ids);
793 DRM_INFO("Max number of GMR pages is %u\n",
794 (unsigned)dev_priv->max_gmr_pages);
795 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
796 (unsigned)dev_priv->memory_size / 1024);
798 DRM_INFO("Maximum display memory size is %u kiB\n",
799 dev_priv->prim_bb_mem / 1024);
800 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
801 dev_priv->vram_start, dev_priv->vram_size / 1024);
802 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
803 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
805 vmw_master_init(&dev_priv->fbdev_master);
806 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
807 dev_priv->active_master = &dev_priv->fbdev_master;
809 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
810 dev_priv->mmio_size, MEMREMAP_WB);
812 if (unlikely(dev_priv->mmio_virt == NULL)) {
814 DRM_ERROR("Failed mapping MMIO.\n");
818 /* Need mmio memory to check for fifo pitchlock cap. */
819 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
820 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
821 !vmw_fifo_have_pitchlock(dev_priv)) {
823 DRM_ERROR("Hardware has no pitchlock\n");
827 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
828 &vmw_prime_dmabuf_ops);
830 if (unlikely(dev_priv->tdev == NULL)) {
831 DRM_ERROR("Unable to initialize TTM object management.\n");
836 dev->dev_private = dev_priv;
838 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
839 dev_priv->stealth = (ret != 0);
840 if (dev_priv->stealth) {
842 * Request at least the mmio PCI resource.
845 DRM_INFO("It appears like vesafb is loaded. "
846 "Ignore above error if any.\n");
847 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
848 if (unlikely(ret != 0)) {
849 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
854 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
855 ret = vmw_irq_install(dev, dev->pdev->irq);
857 DRM_ERROR("Failed installing irq: %d\n", ret);
862 dev_priv->fman = vmw_fence_manager_init(dev_priv);
863 if (unlikely(dev_priv->fman == NULL)) {
868 ret = ttm_bo_device_init(&dev_priv->bdev,
870 dev->anon_inode->i_mapping,
871 VMWGFX_FILE_PAGE_OFFSET,
873 if (unlikely(ret != 0)) {
874 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
879 * Enable VRAM, but initially don't use it until SVGA is enabled and
882 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
883 (dev_priv->vram_size >> PAGE_SHIFT));
884 if (unlikely(ret != 0)) {
885 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
888 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
890 dev_priv->has_gmr = true;
891 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
892 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
894 DRM_INFO("No GMR memory available. "
895 "Graphics memory resources are very limited.\n");
896 dev_priv->has_gmr = false;
899 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
900 dev_priv->has_mob = true;
901 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
903 DRM_INFO("No MOB memory available. "
904 "3D will be disabled.\n");
905 dev_priv->has_mob = false;
909 if (dev_priv->has_mob) {
910 spin_lock(&dev_priv->cap_lock);
911 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
912 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
913 spin_unlock(&dev_priv->cap_lock);
916 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
917 ret = vmw_kms_init(dev_priv);
918 if (unlikely(ret != 0))
920 vmw_overlay_init(dev_priv);
922 ret = vmw_request_device(dev_priv);
926 if (dev_priv->has_dx) {
928 * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
931 if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
932 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
934 dev_priv->has_sm4_1 = vmw_read(dev_priv,
939 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
940 DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
942 DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
944 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
945 VMWGFX_REPO, VMWGFX_GIT_VERSION);
946 vmw_host_log(host_log);
948 memset(host_log, 0, sizeof(host_log));
949 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
950 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
951 VMWGFX_DRIVER_PATCHLEVEL);
952 vmw_host_log(host_log);
954 if (dev_priv->enable_fb) {
955 vmw_fifo_resource_inc(dev_priv);
956 vmw_svga_enable(dev_priv);
957 vmw_fb_init(dev_priv);
960 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
961 register_pm_notifier(&dev_priv->pm_nb);
966 vmw_overlay_close(dev_priv);
967 vmw_kms_close(dev_priv);
969 if (dev_priv->has_mob)
970 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
971 if (dev_priv->has_gmr)
972 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
973 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
975 (void)ttm_bo_device_release(&dev_priv->bdev);
977 vmw_fence_manager_takedown(dev_priv->fman);
979 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
980 vmw_irq_uninstall(dev_priv->dev);
982 if (dev_priv->stealth)
983 pci_release_region(dev->pdev, 2);
985 pci_release_regions(dev->pdev);
987 ttm_object_device_release(&dev_priv->tdev);
989 memunmap(dev_priv->mmio_virt);
991 for (i = vmw_res_context; i < vmw_res_max; ++i)
992 idr_destroy(&dev_priv->res_idr[i]);
994 if (dev_priv->ctx.staged_bindings)
995 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1000 static void vmw_driver_unload(struct drm_device *dev)
1002 struct vmw_private *dev_priv = vmw_priv(dev);
1003 enum vmw_res_type i;
1005 unregister_pm_notifier(&dev_priv->pm_nb);
1007 if (dev_priv->ctx.res_ht_initialized)
1008 drm_ht_remove(&dev_priv->ctx.res_ht);
1009 vfree(dev_priv->ctx.cmd_bounce);
1010 if (dev_priv->enable_fb) {
1011 vmw_fb_off(dev_priv);
1012 vmw_fb_close(dev_priv);
1013 vmw_fifo_resource_dec(dev_priv);
1014 vmw_svga_disable(dev_priv);
1017 vmw_kms_close(dev_priv);
1018 vmw_overlay_close(dev_priv);
1020 if (dev_priv->has_gmr)
1021 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
1022 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
1024 vmw_release_device_early(dev_priv);
1025 if (dev_priv->has_mob)
1026 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
1027 (void) ttm_bo_device_release(&dev_priv->bdev);
1028 vmw_release_device_late(dev_priv);
1029 vmw_fence_manager_takedown(dev_priv->fman);
1030 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1031 vmw_irq_uninstall(dev_priv->dev);
1032 if (dev_priv->stealth)
1033 pci_release_region(dev->pdev, 2);
1035 pci_release_regions(dev->pdev);
1037 ttm_object_device_release(&dev_priv->tdev);
1038 memunmap(dev_priv->mmio_virt);
1039 if (dev_priv->ctx.staged_bindings)
1040 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1042 for (i = vmw_res_context; i < vmw_res_max; ++i)
1043 idr_destroy(&dev_priv->res_idr[i]);
1048 static void vmw_postclose(struct drm_device *dev,
1049 struct drm_file *file_priv)
1051 struct vmw_fpriv *vmw_fp;
1053 vmw_fp = vmw_fpriv(file_priv);
1055 if (vmw_fp->locked_master) {
1056 struct vmw_master *vmaster =
1057 vmw_master(vmw_fp->locked_master);
1059 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1060 ttm_vt_unlock(&vmaster->lock);
1061 drm_master_put(&vmw_fp->locked_master);
1064 ttm_object_file_release(&vmw_fp->tfile);
1068 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1070 struct vmw_private *dev_priv = vmw_priv(dev);
1071 struct vmw_fpriv *vmw_fp;
1074 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1075 if (unlikely(!vmw_fp))
1078 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1079 if (unlikely(vmw_fp->tfile == NULL))
1082 file_priv->driver_priv = vmw_fp;
1091 static struct vmw_master *vmw_master_check(struct drm_device *dev,
1092 struct drm_file *file_priv,
1096 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1097 struct vmw_master *vmaster;
1099 if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
1102 ret = mutex_lock_interruptible(&dev->master_mutex);
1103 if (unlikely(ret != 0))
1104 return ERR_PTR(-ERESTARTSYS);
1106 if (drm_is_current_master(file_priv)) {
1107 mutex_unlock(&dev->master_mutex);
1112 * Check if we were previously master, but now dropped. In that
1113 * case, allow at least render node functionality.
1115 if (vmw_fp->locked_master) {
1116 mutex_unlock(&dev->master_mutex);
1118 if (flags & DRM_RENDER_ALLOW)
1121 DRM_ERROR("Dropped master trying to access ioctl that "
1122 "requires authentication.\n");
1123 return ERR_PTR(-EACCES);
1125 mutex_unlock(&dev->master_mutex);
1128 * Take the TTM lock. Possibly sleep waiting for the authenticating
1129 * master to become master again, or for a SIGTERM if the
1130 * authenticating master exits.
1132 vmaster = vmw_master(file_priv->master);
1133 ret = ttm_read_lock(&vmaster->lock, true);
1134 if (unlikely(ret != 0))
1135 vmaster = ERR_PTR(ret);
1140 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1142 long (*ioctl_func)(struct file *, unsigned int,
1145 struct drm_file *file_priv = filp->private_data;
1146 struct drm_device *dev = file_priv->minor->dev;
1147 unsigned int nr = DRM_IOCTL_NR(cmd);
1148 struct vmw_master *vmaster;
1153 * Do extra checking on driver private ioctls.
1156 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1157 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1158 const struct drm_ioctl_desc *ioctl =
1159 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1161 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1162 ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1163 if (unlikely(ret != 0))
1166 if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1167 goto out_io_encoding;
1169 return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1171 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1172 if (!drm_is_current_master(file_priv) &&
1173 !capable(CAP_SYS_ADMIN))
1177 if (unlikely(ioctl->cmd != cmd))
1178 goto out_io_encoding;
1180 flags = ioctl->flags;
1181 } else if (!drm_ioctl_flags(nr, &flags))
1184 vmaster = vmw_master_check(dev, file_priv, flags);
1185 if (IS_ERR(vmaster)) {
1186 ret = PTR_ERR(vmaster);
1188 if (ret != -ERESTARTSYS)
1189 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1194 ret = ioctl_func(filp, cmd, arg);
1196 ttm_read_unlock(&vmaster->lock);
1201 DRM_ERROR("Invalid command format, ioctl %d\n",
1202 nr - DRM_COMMAND_BASE);
1207 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1210 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1213 #ifdef CONFIG_COMPAT
1214 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1217 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1221 static void vmw_lastclose(struct drm_device *dev)
1225 static void vmw_master_init(struct vmw_master *vmaster)
1227 ttm_lock_init(&vmaster->lock);
1230 static int vmw_master_create(struct drm_device *dev,
1231 struct drm_master *master)
1233 struct vmw_master *vmaster;
1235 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1236 if (unlikely(!vmaster))
1239 vmw_master_init(vmaster);
1240 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1241 master->driver_priv = vmaster;
1246 static void vmw_master_destroy(struct drm_device *dev,
1247 struct drm_master *master)
1249 struct vmw_master *vmaster = vmw_master(master);
1251 master->driver_priv = NULL;
1255 static int vmw_master_set(struct drm_device *dev,
1256 struct drm_file *file_priv,
1259 struct vmw_private *dev_priv = vmw_priv(dev);
1260 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1261 struct vmw_master *active = dev_priv->active_master;
1262 struct vmw_master *vmaster = vmw_master(file_priv->master);
1266 BUG_ON(active != &dev_priv->fbdev_master);
1267 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1268 if (unlikely(ret != 0))
1271 ttm_lock_set_kill(&active->lock, true, SIGTERM);
1272 dev_priv->active_master = NULL;
1275 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1277 ttm_vt_unlock(&vmaster->lock);
1278 BUG_ON(vmw_fp->locked_master != file_priv->master);
1279 drm_master_put(&vmw_fp->locked_master);
1282 dev_priv->active_master = vmaster;
1283 drm_sysfs_hotplug_event(dev);
1288 static void vmw_master_drop(struct drm_device *dev,
1289 struct drm_file *file_priv)
1291 struct vmw_private *dev_priv = vmw_priv(dev);
1292 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1293 struct vmw_master *vmaster = vmw_master(file_priv->master);
1297 * Make sure the master doesn't disappear while we have
1301 vmw_fp->locked_master = drm_master_get(file_priv->master);
1302 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1303 vmw_kms_legacy_hotspot_clear(dev_priv);
1304 if (unlikely((ret != 0))) {
1305 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1306 drm_master_put(&vmw_fp->locked_master);
1309 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1311 if (!dev_priv->enable_fb)
1312 vmw_svga_disable(dev_priv);
1314 dev_priv->active_master = &dev_priv->fbdev_master;
1315 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1316 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1320 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1322 * @dev_priv: Pointer to device private struct.
1323 * Needs the reservation sem to be held in non-exclusive mode.
1325 static void __vmw_svga_enable(struct vmw_private *dev_priv)
1327 spin_lock(&dev_priv->svga_lock);
1328 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1329 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1330 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1332 spin_unlock(&dev_priv->svga_lock);
1336 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1338 * @dev_priv: Pointer to device private struct.
1340 void vmw_svga_enable(struct vmw_private *dev_priv)
1342 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
1343 __vmw_svga_enable(dev_priv);
1344 ttm_read_unlock(&dev_priv->reservation_sem);
1348 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1350 * @dev_priv: Pointer to device private struct.
1351 * Needs the reservation sem to be held in exclusive mode.
1352 * Will not empty VRAM. VRAM must be emptied by caller.
1354 static void __vmw_svga_disable(struct vmw_private *dev_priv)
1356 spin_lock(&dev_priv->svga_lock);
1357 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1358 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1359 vmw_write(dev_priv, SVGA_REG_ENABLE,
1360 SVGA_REG_ENABLE_HIDE |
1361 SVGA_REG_ENABLE_ENABLE);
1363 spin_unlock(&dev_priv->svga_lock);
1367 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1370 * @dev_priv: Pointer to device private struct.
1373 void vmw_svga_disable(struct vmw_private *dev_priv)
1376 * Disabling SVGA will turn off device modesetting capabilities, so
1377 * notify KMS about that so that it doesn't cache atomic state that
1378 * isn't valid anymore, for example crtcs turned on.
1379 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1380 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1381 * end up with lock order reversal. Thus, a master may actually perform
1382 * a new modeset just after we call vmw_kms_lost_device() and race with
1383 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1384 * to be inconsistent with the device, causing modesetting problems.
1387 vmw_kms_lost_device(dev_priv->dev);
1388 ttm_write_lock(&dev_priv->reservation_sem, false);
1389 spin_lock(&dev_priv->svga_lock);
1390 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1391 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1392 spin_unlock(&dev_priv->svga_lock);
1393 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1394 DRM_ERROR("Failed evicting VRAM buffers.\n");
1395 vmw_write(dev_priv, SVGA_REG_ENABLE,
1396 SVGA_REG_ENABLE_HIDE |
1397 SVGA_REG_ENABLE_ENABLE);
1399 spin_unlock(&dev_priv->svga_lock);
1400 ttm_write_unlock(&dev_priv->reservation_sem);
1403 static void vmw_remove(struct pci_dev *pdev)
1405 struct drm_device *dev = pci_get_drvdata(pdev);
1407 pci_disable_device(pdev);
1411 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1414 struct vmw_private *dev_priv =
1415 container_of(nb, struct vmw_private, pm_nb);
1418 case PM_HIBERNATION_PREPARE:
1420 * Take the reservation sem in write mode, which will make sure
1421 * there are no other processes holding a buffer object
1422 * reservation, meaning we should be able to evict all buffer
1423 * objects if needed.
1424 * Once user-space processes have been frozen, we can release
1427 ttm_suspend_lock(&dev_priv->reservation_sem);
1428 dev_priv->suspend_locked = true;
1430 case PM_POST_HIBERNATION:
1431 case PM_POST_RESTORE:
1432 if (READ_ONCE(dev_priv->suspend_locked)) {
1433 dev_priv->suspend_locked = false;
1434 ttm_suspend_unlock(&dev_priv->reservation_sem);
1443 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1445 struct drm_device *dev = pci_get_drvdata(pdev);
1446 struct vmw_private *dev_priv = vmw_priv(dev);
1448 if (dev_priv->refuse_hibernation)
1451 pci_save_state(pdev);
1452 pci_disable_device(pdev);
1453 pci_set_power_state(pdev, PCI_D3hot);
1457 static int vmw_pci_resume(struct pci_dev *pdev)
1459 pci_set_power_state(pdev, PCI_D0);
1460 pci_restore_state(pdev);
1461 return pci_enable_device(pdev);
1464 static int vmw_pm_suspend(struct device *kdev)
1466 struct pci_dev *pdev = to_pci_dev(kdev);
1467 struct pm_message dummy;
1471 return vmw_pci_suspend(pdev, dummy);
1474 static int vmw_pm_resume(struct device *kdev)
1476 struct pci_dev *pdev = to_pci_dev(kdev);
1478 return vmw_pci_resume(pdev);
1481 static int vmw_pm_freeze(struct device *kdev)
1483 struct pci_dev *pdev = to_pci_dev(kdev);
1484 struct drm_device *dev = pci_get_drvdata(pdev);
1485 struct vmw_private *dev_priv = vmw_priv(dev);
1489 * Unlock for vmw_kms_suspend.
1490 * No user-space processes should be running now.
1492 ttm_suspend_unlock(&dev_priv->reservation_sem);
1493 ret = vmw_kms_suspend(dev_priv->dev);
1495 ttm_suspend_lock(&dev_priv->reservation_sem);
1496 DRM_ERROR("Failed to freeze modesetting.\n");
1499 if (dev_priv->enable_fb)
1500 vmw_fb_off(dev_priv);
1502 ttm_suspend_lock(&dev_priv->reservation_sem);
1503 vmw_execbuf_release_pinned_bo(dev_priv);
1504 vmw_resource_evict_all(dev_priv);
1505 vmw_release_device_early(dev_priv);
1506 ttm_bo_swapout_all(&dev_priv->bdev);
1507 if (dev_priv->enable_fb)
1508 vmw_fifo_resource_dec(dev_priv);
1509 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1510 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1511 if (dev_priv->enable_fb)
1512 vmw_fifo_resource_inc(dev_priv);
1513 WARN_ON(vmw_request_device_late(dev_priv));
1514 dev_priv->suspend_locked = false;
1515 ttm_suspend_unlock(&dev_priv->reservation_sem);
1516 if (dev_priv->suspend_state)
1517 vmw_kms_resume(dev);
1518 if (dev_priv->enable_fb)
1519 vmw_fb_on(dev_priv);
1523 vmw_fence_fifo_down(dev_priv->fman);
1524 __vmw_svga_disable(dev_priv);
1526 vmw_release_device_late(dev_priv);
1530 static int vmw_pm_restore(struct device *kdev)
1532 struct pci_dev *pdev = to_pci_dev(kdev);
1533 struct drm_device *dev = pci_get_drvdata(pdev);
1534 struct vmw_private *dev_priv = vmw_priv(dev);
1537 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1538 (void) vmw_read(dev_priv, SVGA_REG_ID);
1540 if (dev_priv->enable_fb)
1541 vmw_fifo_resource_inc(dev_priv);
1543 ret = vmw_request_device(dev_priv);
1547 if (dev_priv->enable_fb)
1548 __vmw_svga_enable(dev_priv);
1550 vmw_fence_fifo_up(dev_priv->fman);
1551 dev_priv->suspend_locked = false;
1552 ttm_suspend_unlock(&dev_priv->reservation_sem);
1553 if (dev_priv->suspend_state)
1554 vmw_kms_resume(dev_priv->dev);
1556 if (dev_priv->enable_fb)
1557 vmw_fb_on(dev_priv);
1562 static const struct dev_pm_ops vmw_pm_ops = {
1563 .freeze = vmw_pm_freeze,
1564 .thaw = vmw_pm_restore,
1565 .restore = vmw_pm_restore,
1566 .suspend = vmw_pm_suspend,
1567 .resume = vmw_pm_resume,
1570 static const struct file_operations vmwgfx_driver_fops = {
1571 .owner = THIS_MODULE,
1573 .release = drm_release,
1574 .unlocked_ioctl = vmw_unlocked_ioctl,
1576 .poll = vmw_fops_poll,
1577 .read = vmw_fops_read,
1578 #if defined(CONFIG_COMPAT)
1579 .compat_ioctl = vmw_compat_ioctl,
1581 .llseek = noop_llseek,
1584 static struct drm_driver driver = {
1585 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1586 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
1587 .load = vmw_driver_load,
1588 .unload = vmw_driver_unload,
1589 .lastclose = vmw_lastclose,
1590 .get_vblank_counter = vmw_get_vblank_counter,
1591 .enable_vblank = vmw_enable_vblank,
1592 .disable_vblank = vmw_disable_vblank,
1593 .ioctls = vmw_ioctls,
1594 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1595 .master_create = vmw_master_create,
1596 .master_destroy = vmw_master_destroy,
1597 .master_set = vmw_master_set,
1598 .master_drop = vmw_master_drop,
1599 .open = vmw_driver_open,
1600 .postclose = vmw_postclose,
1602 .dumb_create = vmw_dumb_create,
1603 .dumb_map_offset = vmw_dumb_map_offset,
1604 .dumb_destroy = vmw_dumb_destroy,
1606 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1607 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1609 .fops = &vmwgfx_driver_fops,
1610 .name = VMWGFX_DRIVER_NAME,
1611 .desc = VMWGFX_DRIVER_DESC,
1612 .date = VMWGFX_DRIVER_DATE,
1613 .major = VMWGFX_DRIVER_MAJOR,
1614 .minor = VMWGFX_DRIVER_MINOR,
1615 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1618 static struct pci_driver vmw_pci_driver = {
1619 .name = VMWGFX_DRIVER_NAME,
1620 .id_table = vmw_pci_id_list,
1622 .remove = vmw_remove,
1628 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1630 return drm_get_pci_dev(pdev, ent, &driver);
1633 static int __init vmwgfx_init(void)
1637 if (vgacon_text_force())
1640 ret = pci_register_driver(&vmw_pci_driver);
1642 DRM_ERROR("Failed initializing DRM.\n");
1646 static void __exit vmwgfx_exit(void)
1648 pci_unregister_driver(&vmw_pci_driver);
1651 module_init(vmwgfx_init);
1652 module_exit(vmwgfx_exit);
1654 MODULE_AUTHOR("VMware Inc. and others");
1655 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1656 MODULE_LICENSE("GPL and additional rights");
1657 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1658 __stringify(VMWGFX_DRIVER_MINOR) "."
1659 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."