Commit | Line | Data |
---|---|---|
fb1d9738 JB |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
e0cd3608 | 27 | #include <linux/module.h> |
fb1d9738 | 28 | |
760285e7 | 29 | #include <drm/drmP.h> |
fb1d9738 | 30 | #include "vmwgfx_drv.h" |
760285e7 DH |
31 | #include <drm/ttm/ttm_placement.h> |
32 | #include <drm/ttm/ttm_bo_driver.h> | |
33 | #include <drm/ttm/ttm_object.h> | |
34 | #include <drm/ttm/ttm_module.h> | |
d92d9851 | 35 | #include <linux/dma_remapping.h> |
fb1d9738 JB |
36 | |
37 | #define VMWGFX_DRIVER_NAME "vmwgfx" | |
38 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" | |
39 | #define VMWGFX_CHIP_SVGAII 0 | |
40 | #define VMW_FB_RESERVATION 0 | |
41 | ||
eb4f923b JB |
42 | #define VMW_MIN_INITIAL_WIDTH 800 |
43 | #define VMW_MIN_INITIAL_HEIGHT 600 | |
44 | ||
45 | ||
fb1d9738 JB |
46 | /** |
47 | * Fully encoded drm commands. Might move to vmw_drm.h | |
48 | */ | |
49 | ||
50 | #define DRM_IOCTL_VMW_GET_PARAM \ | |
51 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ | |
52 | struct drm_vmw_getparam_arg) | |
53 | #define DRM_IOCTL_VMW_ALLOC_DMABUF \ | |
54 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ | |
55 | union drm_vmw_alloc_dmabuf_arg) | |
56 | #define DRM_IOCTL_VMW_UNREF_DMABUF \ | |
57 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ | |
58 | struct drm_vmw_unref_dmabuf_arg) | |
59 | #define DRM_IOCTL_VMW_CURSOR_BYPASS \ | |
60 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ | |
61 | struct drm_vmw_cursor_bypass_arg) | |
62 | ||
63 | #define DRM_IOCTL_VMW_CONTROL_STREAM \ | |
64 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ | |
65 | struct drm_vmw_control_stream_arg) | |
66 | #define DRM_IOCTL_VMW_CLAIM_STREAM \ | |
67 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ | |
68 | struct drm_vmw_stream_arg) | |
69 | #define DRM_IOCTL_VMW_UNREF_STREAM \ | |
70 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ | |
71 | struct drm_vmw_stream_arg) | |
72 | ||
73 | #define DRM_IOCTL_VMW_CREATE_CONTEXT \ | |
74 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ | |
75 | struct drm_vmw_context_arg) | |
76 | #define DRM_IOCTL_VMW_UNREF_CONTEXT \ | |
77 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ | |
78 | struct drm_vmw_context_arg) | |
79 | #define DRM_IOCTL_VMW_CREATE_SURFACE \ | |
80 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ | |
81 | union drm_vmw_surface_create_arg) | |
82 | #define DRM_IOCTL_VMW_UNREF_SURFACE \ | |
83 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ | |
84 | struct drm_vmw_surface_arg) | |
85 | #define DRM_IOCTL_VMW_REF_SURFACE \ | |
86 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ | |
87 | union drm_vmw_surface_reference_arg) | |
88 | #define DRM_IOCTL_VMW_EXECBUF \ | |
89 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ | |
90 | struct drm_vmw_execbuf_arg) | |
ae2a1040 TH |
91 | #define DRM_IOCTL_VMW_GET_3D_CAP \ |
92 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ | |
93 | struct drm_vmw_get_3d_cap_arg) | |
fb1d9738 JB |
94 | #define DRM_IOCTL_VMW_FENCE_WAIT \ |
95 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ | |
96 | struct drm_vmw_fence_wait_arg) | |
ae2a1040 TH |
97 | #define DRM_IOCTL_VMW_FENCE_SIGNALED \ |
98 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ | |
99 | struct drm_vmw_fence_signaled_arg) | |
100 | #define DRM_IOCTL_VMW_FENCE_UNREF \ | |
101 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ | |
102 | struct drm_vmw_fence_arg) | |
57c5ee79 TH |
103 | #define DRM_IOCTL_VMW_FENCE_EVENT \ |
104 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ | |
105 | struct drm_vmw_fence_event_arg) | |
2fcd5a73 JB |
106 | #define DRM_IOCTL_VMW_PRESENT \ |
107 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ | |
108 | struct drm_vmw_present_arg) | |
109 | #define DRM_IOCTL_VMW_PRESENT_READBACK \ | |
110 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ | |
111 | struct drm_vmw_present_readback_arg) | |
cd2b89e7 TH |
112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ | |
114 | struct drm_vmw_update_layout_arg) | |
c74c162f TH |
115 | #define DRM_IOCTL_VMW_CREATE_SHADER \ |
116 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ | |
117 | struct drm_vmw_shader_create_arg) | |
118 | #define DRM_IOCTL_VMW_UNREF_SHADER \ | |
119 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ | |
120 | struct drm_vmw_shader_arg) | |
a97e2192 TH |
121 | #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ |
122 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ | |
123 | union drm_vmw_gb_surface_create_arg) | |
124 | #define DRM_IOCTL_VMW_GB_SURFACE_REF \ | |
125 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ | |
126 | union drm_vmw_gb_surface_reference_arg) | |
1d7a5cbf TH |
127 | #define DRM_IOCTL_VMW_SYNCCPU \ |
128 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ | |
129 | struct drm_vmw_synccpu_arg) | |
fb1d9738 JB |
130 | |
131 | /** | |
132 | * The core DRM version of this macro doesn't account for | |
133 | * DRM_COMMAND_BASE. | |
134 | */ | |
135 | ||
136 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ | |
7e7392a6 | 137 | [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} |
fb1d9738 JB |
138 | |
139 | /** | |
140 | * Ioctl definitions. | |
141 | */ | |
142 | ||
baa70943 | 143 | static const struct drm_ioctl_desc vmw_ioctls[] = { |
1b2f1489 | 144 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, |
03f80263 | 145 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
1b2f1489 | 146 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, |
03f80263 | 147 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
1b2f1489 | 148 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, |
03f80263 | 149 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
1b2f1489 | 150 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, |
e1f78003 TH |
151 | vmw_kms_cursor_bypass_ioctl, |
152 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | |
fb1d9738 | 153 | |
1b2f1489 | 154 | VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, |
e1f78003 | 155 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
1b2f1489 | 156 | VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, |
e1f78003 | 157 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
1b2f1489 | 158 | VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, |
e1f78003 | 159 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
fb1d9738 | 160 | |
1b2f1489 | 161 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
03f80263 | 162 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
1b2f1489 | 163 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
03f80263 | 164 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
1b2f1489 | 165 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
03f80263 | 166 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
1b2f1489 | 167 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
03f80263 | 168 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
1b2f1489 | 169 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
03f80263 | 170 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
1b2f1489 | 171 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, |
03f80263 | 172 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
ae2a1040 | 173 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, |
89dcbda6 | 174 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
ae2a1040 TH |
175 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, |
176 | vmw_fence_obj_signaled_ioctl, | |
89dcbda6 | 177 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
ae2a1040 | 178 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, |
03f80263 TH |
179 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
180 | VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, | |
181 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
f63f6a59 | 182 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, |
03f80263 | 183 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
2fcd5a73 JB |
184 | |
185 | /* these allow direct access to the framebuffers mark as master only */ | |
186 | VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, | |
187 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), | |
188 | VMW_IOCTL_DEF(VMW_PRESENT_READBACK, | |
189 | vmw_present_readback_ioctl, | |
190 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), | |
cd2b89e7 TH |
191 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
192 | vmw_kms_update_layout_ioctl, | |
193 | DRM_MASTER | DRM_UNLOCKED), | |
c74c162f TH |
194 | VMW_IOCTL_DEF(VMW_CREATE_SHADER, |
195 | vmw_shader_define_ioctl, | |
03f80263 | 196 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
c74c162f TH |
197 | VMW_IOCTL_DEF(VMW_UNREF_SHADER, |
198 | vmw_shader_destroy_ioctl, | |
03f80263 | 199 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
a97e2192 TH |
200 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, |
201 | vmw_gb_surface_define_ioctl, | |
03f80263 | 202 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
a97e2192 TH |
203 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, |
204 | vmw_gb_surface_reference_ioctl, | |
03f80263 | 205 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
1d7a5cbf TH |
206 | VMW_IOCTL_DEF(VMW_SYNCCPU, |
207 | vmw_user_dmabuf_synccpu_ioctl, | |
89dcbda6 | 208 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
fb1d9738 JB |
209 | }; |
210 | ||
211 | static struct pci_device_id vmw_pci_id_list[] = { | |
212 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, | |
213 | {0, 0, 0} | |
214 | }; | |
c4903429 | 215 | MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); |
fb1d9738 | 216 | |
5d2afab9 | 217 | static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); |
d92d9851 TH |
218 | static int vmw_force_iommu; |
219 | static int vmw_restrict_iommu; | |
220 | static int vmw_force_coherent; | |
0d00c488 | 221 | static int vmw_restrict_dma_mask; |
fb1d9738 JB |
222 | |
223 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | |
224 | static void vmw_master_init(struct vmw_master *); | |
d9f36a00 TH |
225 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
226 | void *ptr); | |
fb1d9738 | 227 | |
30c78bb8 TH |
228 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
229 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); | |
d92d9851 TH |
230 | MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); |
231 | module_param_named(force_dma_api, vmw_force_iommu, int, 0600); | |
232 | MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); | |
233 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); | |
234 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); | |
235 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); | |
0d00c488 TH |
236 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); |
237 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); | |
d92d9851 | 238 | |
30c78bb8 | 239 | |
fb1d9738 JB |
240 | static void vmw_print_capabilities(uint32_t capabilities) |
241 | { | |
242 | DRM_INFO("Capabilities:\n"); | |
243 | if (capabilities & SVGA_CAP_RECT_COPY) | |
244 | DRM_INFO(" Rect copy.\n"); | |
245 | if (capabilities & SVGA_CAP_CURSOR) | |
246 | DRM_INFO(" Cursor.\n"); | |
247 | if (capabilities & SVGA_CAP_CURSOR_BYPASS) | |
248 | DRM_INFO(" Cursor bypass.\n"); | |
249 | if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) | |
250 | DRM_INFO(" Cursor bypass 2.\n"); | |
251 | if (capabilities & SVGA_CAP_8BIT_EMULATION) | |
252 | DRM_INFO(" 8bit emulation.\n"); | |
253 | if (capabilities & SVGA_CAP_ALPHA_CURSOR) | |
254 | DRM_INFO(" Alpha cursor.\n"); | |
255 | if (capabilities & SVGA_CAP_3D) | |
256 | DRM_INFO(" 3D.\n"); | |
257 | if (capabilities & SVGA_CAP_EXTENDED_FIFO) | |
258 | DRM_INFO(" Extended Fifo.\n"); | |
259 | if (capabilities & SVGA_CAP_MULTIMON) | |
260 | DRM_INFO(" Multimon.\n"); | |
261 | if (capabilities & SVGA_CAP_PITCHLOCK) | |
262 | DRM_INFO(" Pitchlock.\n"); | |
263 | if (capabilities & SVGA_CAP_IRQMASK) | |
264 | DRM_INFO(" Irq mask.\n"); | |
265 | if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) | |
266 | DRM_INFO(" Display Topology.\n"); | |
267 | if (capabilities & SVGA_CAP_GMR) | |
268 | DRM_INFO(" GMR.\n"); | |
269 | if (capabilities & SVGA_CAP_TRACES) | |
270 | DRM_INFO(" Traces.\n"); | |
dcca2862 TH |
271 | if (capabilities & SVGA_CAP_GMR2) |
272 | DRM_INFO(" GMR2.\n"); | |
273 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) | |
274 | DRM_INFO(" Screen Object 2.\n"); | |
c1234db7 TH |
275 | if (capabilities & SVGA_CAP_COMMAND_BUFFERS) |
276 | DRM_INFO(" Command Buffers.\n"); | |
277 | if (capabilities & SVGA_CAP_CMD_BUFFERS_2) | |
278 | DRM_INFO(" Command Buffers 2.\n"); | |
279 | if (capabilities & SVGA_CAP_GBOBJECTS) | |
280 | DRM_INFO(" Guest Backed Resources.\n"); | |
3eab3d9e TH |
281 | if (capabilities & SVGA_CAP_CMD_BUFFERS_3) |
282 | DRM_INFO(" Command Buffers 3.\n"); | |
fb1d9738 JB |
283 | } |
284 | ||
e2fa3a76 | 285 | /** |
4b9e45e6 | 286 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
e2fa3a76 | 287 | * |
4b9e45e6 | 288 | * @dev_priv: A device private structure. |
e2fa3a76 | 289 | * |
4b9e45e6 TH |
290 | * This function creates a small buffer object that holds the query |
291 | * result for dummy queries emitted as query barriers. | |
292 | * The function will then map the first page and initialize a pending | |
293 | * occlusion query result structure, Finally it will unmap the buffer. | |
294 | * No interruptible waits are done within this function. | |
e2fa3a76 | 295 | * |
4b9e45e6 | 296 | * Returns an error if bo creation or initialization fails. |
e2fa3a76 | 297 | */ |
4b9e45e6 | 298 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
e2fa3a76 | 299 | { |
4b9e45e6 | 300 | int ret; |
459d0fa7 | 301 | struct vmw_dma_buffer *vbo; |
e2fa3a76 TH |
302 | struct ttm_bo_kmap_obj map; |
303 | volatile SVGA3dQueryResult *result; | |
304 | bool dummy; | |
e2fa3a76 | 305 | |
4b9e45e6 | 306 | /* |
459d0fa7 | 307 | * Create the vbo as pinned, so that a tryreserve will |
4b9e45e6 TH |
308 | * immediately succeed. This is because we're the only |
309 | * user of the bo currently. | |
310 | */ | |
459d0fa7 TH |
311 | vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); |
312 | if (!vbo) | |
313 | return -ENOMEM; | |
4b9e45e6 | 314 | |
459d0fa7 TH |
315 | ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, |
316 | &vmw_sys_ne_placement, false, | |
317 | &vmw_dmabuf_bo_free); | |
e2fa3a76 | 318 | if (unlikely(ret != 0)) |
4b9e45e6 TH |
319 | return ret; |
320 | ||
459d0fa7 | 321 | ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL); |
4b9e45e6 | 322 | BUG_ON(ret != 0); |
459d0fa7 | 323 | vmw_bo_pin_reserved(vbo, true); |
e2fa3a76 | 324 | |
459d0fa7 | 325 | ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); |
e2fa3a76 TH |
326 | if (likely(ret == 0)) { |
327 | result = ttm_kmap_obj_virtual(&map, &dummy); | |
328 | result->totalSize = sizeof(*result); | |
329 | result->state = SVGA3D_QUERYSTATE_PENDING; | |
330 | result->result32 = 0xff; | |
331 | ttm_bo_kunmap(&map); | |
4b9e45e6 | 332 | } |
459d0fa7 TH |
333 | vmw_bo_pin_reserved(vbo, false); |
334 | ttm_bo_unreserve(&vbo->base); | |
e2fa3a76 | 335 | |
4b9e45e6 TH |
336 | if (unlikely(ret != 0)) { |
337 | DRM_ERROR("Dummy query buffer map failed.\n"); | |
459d0fa7 | 338 | vmw_dmabuf_unreference(&vbo); |
4b9e45e6 | 339 | } else |
459d0fa7 | 340 | dev_priv->dummy_query_bo = vbo; |
e2fa3a76 | 341 | |
4b9e45e6 | 342 | return ret; |
e2fa3a76 TH |
343 | } |
344 | ||
153b3d5b TH |
345 | /** |
346 | * vmw_request_device_late - Perform late device setup | |
347 | * | |
348 | * @dev_priv: Pointer to device private. | |
349 | * | |
350 | * This function performs setup of otables and enables large command | |
351 | * buffer submission. These tasks are split out to a separate function | |
352 | * because it reverts vmw_release_device_early and is intended to be used | |
353 | * by an error path in the hibernation code. | |
354 | */ | |
355 | static int vmw_request_device_late(struct vmw_private *dev_priv) | |
fb1d9738 JB |
356 | { |
357 | int ret; | |
358 | ||
3530bdc3 TH |
359 | if (dev_priv->has_mob) { |
360 | ret = vmw_otables_setup(dev_priv); | |
361 | if (unlikely(ret != 0)) { | |
362 | DRM_ERROR("Unable to initialize " | |
363 | "guest Memory OBjects.\n"); | |
153b3d5b | 364 | return ret; |
3530bdc3 TH |
365 | } |
366 | } | |
153b3d5b | 367 | |
3eab3d9e TH |
368 | if (dev_priv->cman) { |
369 | ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, | |
370 | 256*4096, 2*4096); | |
371 | if (ret) { | |
372 | struct vmw_cmdbuf_man *man = dev_priv->cman; | |
373 | ||
374 | dev_priv->cman = NULL; | |
375 | vmw_cmdbuf_man_destroy(man); | |
376 | } | |
377 | } | |
378 | ||
153b3d5b TH |
379 | return 0; |
380 | } | |
381 | ||
382 | static int vmw_request_device(struct vmw_private *dev_priv) | |
383 | { | |
384 | int ret; | |
385 | ||
386 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); | |
387 | if (unlikely(ret != 0)) { | |
388 | DRM_ERROR("Unable to initialize FIFO.\n"); | |
389 | return ret; | |
390 | } | |
391 | vmw_fence_fifo_up(dev_priv->fman); | |
3eab3d9e TH |
392 | dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); |
393 | if (IS_ERR(dev_priv->cman)) | |
394 | dev_priv->cman = NULL; | |
153b3d5b TH |
395 | |
396 | ret = vmw_request_device_late(dev_priv); | |
397 | if (ret) | |
398 | goto out_no_mob; | |
399 | ||
e2fa3a76 TH |
400 | ret = vmw_dummy_query_bo_create(dev_priv); |
401 | if (unlikely(ret != 0)) | |
402 | goto out_no_query_bo; | |
fb1d9738 JB |
403 | |
404 | return 0; | |
e2fa3a76 TH |
405 | |
406 | out_no_query_bo: | |
3eab3d9e TH |
407 | if (dev_priv->cman) |
408 | vmw_cmdbuf_remove_pool(dev_priv->cman); | |
153b3d5b TH |
409 | if (dev_priv->has_mob) { |
410 | (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); | |
3530bdc3 | 411 | vmw_otables_takedown(dev_priv); |
153b3d5b | 412 | } |
3eab3d9e TH |
413 | if (dev_priv->cman) |
414 | vmw_cmdbuf_man_destroy(dev_priv->cman); | |
3530bdc3 | 415 | out_no_mob: |
e2fa3a76 TH |
416 | vmw_fence_fifo_down(dev_priv->fman); |
417 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | |
418 | return ret; | |
fb1d9738 JB |
419 | } |
420 | ||
153b3d5b TH |
421 | /** |
422 | * vmw_release_device_early - Early part of fifo takedown. | |
423 | * | |
424 | * @dev_priv: Pointer to device private struct. | |
425 | * | |
426 | * This is the first part of command submission takedown, to be called before | |
427 | * buffer management is taken down. | |
428 | */ | |
429 | static void vmw_release_device_early(struct vmw_private *dev_priv) | |
fb1d9738 | 430 | { |
e2fa3a76 TH |
431 | /* |
432 | * Previous destructions should've released | |
433 | * the pinned bo. | |
434 | */ | |
435 | ||
436 | BUG_ON(dev_priv->pinned_bo != NULL); | |
437 | ||
459d0fa7 | 438 | vmw_dmabuf_unreference(&dev_priv->dummy_query_bo); |
3eab3d9e TH |
439 | if (dev_priv->cman) |
440 | vmw_cmdbuf_remove_pool(dev_priv->cman); | |
441 | ||
153b3d5b TH |
442 | if (dev_priv->has_mob) { |
443 | ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); | |
3530bdc3 | 444 | vmw_otables_takedown(dev_priv); |
30c78bb8 | 445 | } |
fb1d9738 JB |
446 | } |
447 | ||
05730b32 | 448 | /** |
153b3d5b TH |
449 | * vmw_release_device_late - Late part of fifo takedown. |
450 | * | |
451 | * @dev_priv: Pointer to device private struct. | |
452 | * | |
453 | * This is the last part of the command submission takedown, to be called when | |
454 | * command submission is no longer needed. It may wait on pending fences. | |
05730b32 | 455 | */ |
153b3d5b | 456 | static void vmw_release_device_late(struct vmw_private *dev_priv) |
30c78bb8 | 457 | { |
153b3d5b | 458 | vmw_fence_fifo_down(dev_priv->fman); |
3eab3d9e TH |
459 | if (dev_priv->cman) |
460 | vmw_cmdbuf_man_destroy(dev_priv->cman); | |
461 | ||
153b3d5b | 462 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
30c78bb8 TH |
463 | } |
464 | ||
eb4f923b JB |
465 | /** |
466 | * Sets the initial_[width|height] fields on the given vmw_private. | |
467 | * | |
468 | * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then | |
67d4a87b TH |
469 | * clamping the value to fb_max_[width|height] fields and the |
470 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. | |
471 | * If the values appear to be invalid, set them to | |
eb4f923b JB |
472 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
473 | */ | |
474 | static void vmw_get_initial_size(struct vmw_private *dev_priv) | |
475 | { | |
476 | uint32_t width; | |
477 | uint32_t height; | |
478 | ||
479 | width = vmw_read(dev_priv, SVGA_REG_WIDTH); | |
480 | height = vmw_read(dev_priv, SVGA_REG_HEIGHT); | |
481 | ||
482 | width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); | |
eb4f923b | 483 | height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); |
67d4a87b TH |
484 | |
485 | if (width > dev_priv->fb_max_width || | |
486 | height > dev_priv->fb_max_height) { | |
487 | ||
488 | /* | |
489 | * This is a host error and shouldn't occur. | |
490 | */ | |
491 | ||
492 | width = VMW_MIN_INITIAL_WIDTH; | |
493 | height = VMW_MIN_INITIAL_HEIGHT; | |
494 | } | |
eb4f923b JB |
495 | |
496 | dev_priv->initial_width = width; | |
497 | dev_priv->initial_height = height; | |
498 | } | |
499 | ||
d92d9851 TH |
500 | /** |
501 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this | |
502 | * system. | |
503 | * | |
504 | * @dev_priv: Pointer to a struct vmw_private | |
505 | * | |
506 | * This functions tries to determine the IOMMU setup and what actions | |
507 | * need to be taken by the driver to make system pages visible to the | |
508 | * device. | |
509 | * If this function decides that DMA is not possible, it returns -EINVAL. | |
510 | * The driver may then try to disable features of the device that require | |
511 | * DMA. | |
512 | */ | |
513 | static int vmw_dma_select_mode(struct vmw_private *dev_priv) | |
514 | { | |
d92d9851 TH |
515 | static const char *names[vmw_dma_map_max] = { |
516 | [vmw_dma_phys] = "Using physical TTM page addresses.", | |
517 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", | |
518 | [vmw_dma_map_populate] = "Keeping DMA mappings.", | |
519 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; | |
e14cd953 TH |
520 | #ifdef CONFIG_X86 |
521 | const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); | |
d92d9851 TH |
522 | |
523 | #ifdef CONFIG_INTEL_IOMMU | |
524 | if (intel_iommu_enabled) { | |
525 | dev_priv->map_mode = vmw_dma_map_populate; | |
526 | goto out_fixup; | |
527 | } | |
528 | #endif | |
529 | ||
530 | if (!(vmw_force_iommu || vmw_force_coherent)) { | |
531 | dev_priv->map_mode = vmw_dma_phys; | |
532 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); | |
533 | return 0; | |
534 | } | |
535 | ||
536 | dev_priv->map_mode = vmw_dma_map_populate; | |
537 | ||
538 | if (dma_ops->sync_single_for_cpu) | |
539 | dev_priv->map_mode = vmw_dma_alloc_coherent; | |
540 | #ifdef CONFIG_SWIOTLB | |
541 | if (swiotlb_nr_tbl() == 0) | |
542 | dev_priv->map_mode = vmw_dma_map_populate; | |
543 | #endif | |
544 | ||
21136946 | 545 | #ifdef CONFIG_INTEL_IOMMU |
d92d9851 | 546 | out_fixup: |
21136946 | 547 | #endif |
d92d9851 TH |
548 | if (dev_priv->map_mode == vmw_dma_map_populate && |
549 | vmw_restrict_iommu) | |
550 | dev_priv->map_mode = vmw_dma_map_bind; | |
551 | ||
552 | if (vmw_force_coherent) | |
553 | dev_priv->map_mode = vmw_dma_alloc_coherent; | |
554 | ||
555 | #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) | |
556 | /* | |
557 | * No coherent page pool | |
558 | */ | |
559 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) | |
560 | return -EINVAL; | |
561 | #endif | |
562 | ||
e14cd953 TH |
563 | #else /* CONFIG_X86 */ |
564 | dev_priv->map_mode = vmw_dma_map_populate; | |
565 | #endif /* CONFIG_X86 */ | |
566 | ||
d92d9851 TH |
567 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
568 | ||
569 | return 0; | |
570 | } | |
571 | ||
0d00c488 TH |
572 | /** |
573 | * vmw_dma_masks - set required page- and dma masks | |
574 | * | |
575 | * @dev: Pointer to struct drm-device | |
576 | * | |
577 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that | |
578 | * restriction also for 64-bit systems. | |
579 | */ | |
580 | #ifdef CONFIG_INTEL_IOMMU | |
581 | static int vmw_dma_masks(struct vmw_private *dev_priv) | |
582 | { | |
583 | struct drm_device *dev = dev_priv->dev; | |
584 | ||
585 | if (intel_iommu_enabled && | |
586 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { | |
587 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); | |
588 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); | |
589 | } | |
590 | return 0; | |
591 | } | |
592 | #else | |
593 | static int vmw_dma_masks(struct vmw_private *dev_priv) | |
594 | { | |
595 | return 0; | |
596 | } | |
597 | #endif | |
598 | ||
fb1d9738 JB |
599 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
600 | { | |
601 | struct vmw_private *dev_priv; | |
602 | int ret; | |
c188660f | 603 | uint32_t svga_id; |
c0951b79 | 604 | enum vmw_res_type i; |
d92d9851 | 605 | bool refuse_dma = false; |
fb1d9738 JB |
606 | |
607 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | |
608 | if (unlikely(dev_priv == NULL)) { | |
609 | DRM_ERROR("Failed allocating a device private struct.\n"); | |
610 | return -ENOMEM; | |
611 | } | |
fb1d9738 | 612 | |
466e69b8 DA |
613 | pci_set_master(dev->pdev); |
614 | ||
fb1d9738 JB |
615 | dev_priv->dev = dev; |
616 | dev_priv->vmw_chipset = chipset; | |
6bcd8d3c | 617 | dev_priv->last_read_seqno = (uint32_t) -100; |
fb1d9738 | 618 | mutex_init(&dev_priv->cmdbuf_mutex); |
30c78bb8 | 619 | mutex_init(&dev_priv->release_mutex); |
173fb7d4 | 620 | mutex_init(&dev_priv->binding_mutex); |
fb1d9738 | 621 | rwlock_init(&dev_priv->resource_lock); |
294adf7d | 622 | ttm_lock_init(&dev_priv->reservation_sem); |
496eb6fd TH |
623 | spin_lock_init(&dev_priv->hw_lock); |
624 | spin_lock_init(&dev_priv->waiter_lock); | |
625 | spin_lock_init(&dev_priv->cap_lock); | |
153b3d5b | 626 | spin_lock_init(&dev_priv->svga_lock); |
c0951b79 TH |
627 | |
628 | for (i = vmw_res_context; i < vmw_res_max; ++i) { | |
629 | idr_init(&dev_priv->res_idr[i]); | |
630 | INIT_LIST_HEAD(&dev_priv->res_lru[i]); | |
631 | } | |
632 | ||
fb1d9738 JB |
633 | mutex_init(&dev_priv->init_mutex); |
634 | init_waitqueue_head(&dev_priv->fence_queue); | |
635 | init_waitqueue_head(&dev_priv->fifo_queue); | |
4f73a96b | 636 | dev_priv->fence_queue_waiters = 0; |
fb1d9738 | 637 | atomic_set(&dev_priv->fifo_queue_waiters, 0); |
c0951b79 | 638 | |
5bb39e81 | 639 | dev_priv->used_memory_size = 0; |
fb1d9738 JB |
640 | |
641 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); | |
642 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | |
643 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); | |
644 | ||
30c78bb8 TH |
645 | dev_priv->enable_fb = enable_fbdev; |
646 | ||
c188660f PH |
647 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
648 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); | |
649 | if (svga_id != SVGA_ID_2) { | |
650 | ret = -ENOSYS; | |
49625904 | 651 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); |
c188660f PH |
652 | goto out_err0; |
653 | } | |
654 | ||
fb1d9738 | 655 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
d92d9851 TH |
656 | ret = vmw_dma_select_mode(dev_priv); |
657 | if (unlikely(ret != 0)) { | |
658 | DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); | |
659 | refuse_dma = true; | |
660 | } | |
fb1d9738 | 661 | |
5bb39e81 TH |
662 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); |
663 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); | |
664 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); | |
665 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); | |
eb4f923b JB |
666 | |
667 | vmw_get_initial_size(dev_priv); | |
668 | ||
0d00c488 | 669 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
fb1d9738 JB |
670 | dev_priv->max_gmr_ids = |
671 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); | |
fb17f189 TH |
672 | dev_priv->max_gmr_pages = |
673 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); | |
674 | dev_priv->memory_size = | |
675 | vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); | |
5bb39e81 TH |
676 | dev_priv->memory_size -= dev_priv->vram_size; |
677 | } else { | |
678 | /* | |
679 | * An arbitrary limit of 512MiB on surface | |
680 | * memory. But all HWV8 hardware supports GMR2. | |
681 | */ | |
682 | dev_priv->memory_size = 512*1024*1024; | |
fb17f189 | 683 | } |
6da768aa | 684 | dev_priv->max_mob_pages = 0; |
857aea1c | 685 | dev_priv->max_mob_size = 0; |
6da768aa TH |
686 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
687 | uint64_t mem_size = | |
688 | vmw_read(dev_priv, | |
689 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); | |
690 | ||
691 | dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; | |
afb0e50f TH |
692 | dev_priv->prim_bb_mem = |
693 | vmw_read(dev_priv, | |
694 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); | |
857aea1c CL |
695 | dev_priv->max_mob_size = |
696 | vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); | |
35c05125 SY |
697 | dev_priv->stdu_max_width = |
698 | vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); | |
699 | dev_priv->stdu_max_height = | |
700 | vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); | |
701 | ||
702 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, | |
703 | SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); | |
704 | dev_priv->texture_max_width = vmw_read(dev_priv, | |
705 | SVGA_REG_DEV_CAP); | |
706 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, | |
707 | SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); | |
708 | dev_priv->texture_max_height = vmw_read(dev_priv, | |
709 | SVGA_REG_DEV_CAP); | |
afb0e50f TH |
710 | } else |
711 | dev_priv->prim_bb_mem = dev_priv->vram_size; | |
35c05125 SY |
712 | |
713 | vmw_print_capabilities(dev_priv->capabilities); | |
fb1d9738 | 714 | |
0d00c488 | 715 | ret = vmw_dma_masks(dev_priv); |
496eb6fd | 716 | if (unlikely(ret != 0)) |
0d00c488 TH |
717 | goto out_err0; |
718 | ||
0d00c488 | 719 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
fb1d9738 JB |
720 | DRM_INFO("Max GMR ids is %u\n", |
721 | (unsigned)dev_priv->max_gmr_ids); | |
fb17f189 TH |
722 | DRM_INFO("Max number of GMR pages is %u\n", |
723 | (unsigned)dev_priv->max_gmr_pages); | |
5bb39e81 TH |
724 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
725 | (unsigned)dev_priv->memory_size / 1024); | |
fb17f189 | 726 | } |
bc2d6508 TH |
727 | DRM_INFO("Maximum display memory size is %u kiB\n", |
728 | dev_priv->prim_bb_mem / 1024); | |
fb1d9738 JB |
729 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
730 | dev_priv->vram_start, dev_priv->vram_size / 1024); | |
731 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", | |
732 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); | |
733 | ||
734 | ret = vmw_ttm_global_init(dev_priv); | |
735 | if (unlikely(ret != 0)) | |
736 | goto out_err0; | |
737 | ||
738 | ||
739 | vmw_master_init(&dev_priv->fbdev_master); | |
740 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | |
741 | dev_priv->active_master = &dev_priv->fbdev_master; | |
742 | ||
a2c06ee2 | 743 | |
247d36d7 AL |
744 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
745 | dev_priv->mmio_size); | |
fb1d9738 JB |
746 | |
747 | dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, | |
748 | dev_priv->mmio_size); | |
749 | ||
750 | if (unlikely(dev_priv->mmio_virt == NULL)) { | |
751 | ret = -ENOMEM; | |
752 | DRM_ERROR("Failed mapping MMIO.\n"); | |
753 | goto out_err3; | |
754 | } | |
755 | ||
d7e1958d JB |
756 | /* Need mmio memory to check for fifo pitchlock cap. */ |
757 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && | |
758 | !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && | |
759 | !vmw_fifo_have_pitchlock(dev_priv)) { | |
760 | ret = -ENOSYS; | |
761 | DRM_ERROR("Hardware has no pitchlock\n"); | |
762 | goto out_err4; | |
763 | } | |
764 | ||
fb1d9738 | 765 | dev_priv->tdev = ttm_object_device_init |
69977ff5 | 766 | (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); |
fb1d9738 JB |
767 | |
768 | if (unlikely(dev_priv->tdev == NULL)) { | |
769 | DRM_ERROR("Unable to initialize TTM object management.\n"); | |
770 | ret = -ENOMEM; | |
771 | goto out_err4; | |
772 | } | |
773 | ||
774 | dev->dev_private = dev_priv; | |
775 | ||
fb1d9738 JB |
776 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); |
777 | dev_priv->stealth = (ret != 0); | |
778 | if (dev_priv->stealth) { | |
779 | /** | |
780 | * Request at least the mmio PCI resource. | |
781 | */ | |
782 | ||
783 | DRM_INFO("It appears like vesafb is loaded. " | |
f2d12b8e | 784 | "Ignore above error if any.\n"); |
fb1d9738 JB |
785 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); |
786 | if (unlikely(ret != 0)) { | |
787 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); | |
788 | goto out_no_device; | |
789 | } | |
fb1d9738 | 790 | } |
ae2a1040 | 791 | |
506ff75c | 792 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { |
bb0f1b5c | 793 | ret = drm_irq_install(dev, dev->pdev->irq); |
506ff75c TH |
794 | if (ret != 0) { |
795 | DRM_ERROR("Failed installing irq: %d\n", ret); | |
796 | goto out_no_irq; | |
797 | } | |
798 | } | |
799 | ||
ae2a1040 | 800 | dev_priv->fman = vmw_fence_manager_init(dev_priv); |
14bbf20c WY |
801 | if (unlikely(dev_priv->fman == NULL)) { |
802 | ret = -ENOMEM; | |
ae2a1040 | 803 | goto out_no_fman; |
14bbf20c | 804 | } |
56d1c78d | 805 | |
153b3d5b TH |
806 | ret = ttm_bo_device_init(&dev_priv->bdev, |
807 | dev_priv->bo_global_ref.ref.object, | |
808 | &vmw_bo_driver, | |
809 | dev->anon_inode->i_mapping, | |
810 | VMWGFX_FILE_PAGE_OFFSET, | |
811 | false); | |
812 | if (unlikely(ret != 0)) { | |
813 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); | |
814 | goto out_no_bdev; | |
815 | } | |
3458390b | 816 | |
153b3d5b TH |
817 | /* |
818 | * Enable VRAM, but initially don't use it until SVGA is enabled and | |
819 | * unhidden. | |
820 | */ | |
3458390b TH |
821 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, |
822 | (dev_priv->vram_size >> PAGE_SHIFT)); | |
823 | if (unlikely(ret != 0)) { | |
824 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | |
825 | goto out_no_vram; | |
826 | } | |
153b3d5b | 827 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; |
3458390b TH |
828 | |
829 | dev_priv->has_gmr = true; | |
830 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | |
831 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | |
832 | VMW_PL_GMR) != 0) { | |
833 | DRM_INFO("No GMR memory available. " | |
834 | "Graphics memory resources are very limited.\n"); | |
835 | dev_priv->has_gmr = false; | |
836 | } | |
837 | ||
838 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | |
839 | dev_priv->has_mob = true; | |
840 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | |
841 | VMW_PL_MOB) != 0) { | |
842 | DRM_INFO("No MOB memory available. " | |
843 | "3D will be disabled.\n"); | |
844 | dev_priv->has_mob = false; | |
845 | } | |
846 | } | |
847 | ||
7a1c2f6c TH |
848 | ret = vmw_kms_init(dev_priv); |
849 | if (unlikely(ret != 0)) | |
850 | goto out_no_kms; | |
f2d12b8e | 851 | vmw_overlay_init(dev_priv); |
56d1c78d | 852 | |
153b3d5b TH |
853 | ret = vmw_request_device(dev_priv); |
854 | if (ret) | |
855 | goto out_no_fifo; | |
856 | ||
30c78bb8 | 857 | if (dev_priv->enable_fb) { |
153b3d5b TH |
858 | vmw_fifo_resource_inc(dev_priv); |
859 | vmw_svga_enable(dev_priv); | |
30c78bb8 | 860 | vmw_fb_init(dev_priv); |
7a1c2f6c TH |
861 | } |
862 | ||
d9f36a00 TH |
863 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
864 | register_pm_notifier(&dev_priv->pm_nb); | |
865 | ||
fb1d9738 JB |
866 | return 0; |
867 | ||
506ff75c | 868 | out_no_fifo: |
56d1c78d JB |
869 | vmw_overlay_close(dev_priv); |
870 | vmw_kms_close(dev_priv); | |
871 | out_no_kms: | |
3458390b TH |
872 | if (dev_priv->has_mob) |
873 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | |
874 | if (dev_priv->has_gmr) | |
875 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | |
876 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | |
877 | out_no_vram: | |
153b3d5b TH |
878 | (void)ttm_bo_device_release(&dev_priv->bdev); |
879 | out_no_bdev: | |
ae2a1040 TH |
880 | vmw_fence_manager_takedown(dev_priv->fman); |
881 | out_no_fman: | |
506ff75c TH |
882 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
883 | drm_irq_uninstall(dev_priv->dev); | |
884 | out_no_irq: | |
30c78bb8 TH |
885 | if (dev_priv->stealth) |
886 | pci_release_region(dev->pdev, 2); | |
887 | else | |
888 | pci_release_regions(dev->pdev); | |
fb1d9738 | 889 | out_no_device: |
fb1d9738 JB |
890 | ttm_object_device_release(&dev_priv->tdev); |
891 | out_err4: | |
892 | iounmap(dev_priv->mmio_virt); | |
893 | out_err3: | |
247d36d7 | 894 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
fb1d9738 JB |
895 | vmw_ttm_global_release(dev_priv); |
896 | out_err0: | |
c0951b79 TH |
897 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
898 | idr_destroy(&dev_priv->res_idr[i]); | |
899 | ||
fb1d9738 JB |
900 | kfree(dev_priv); |
901 | return ret; | |
902 | } | |
903 | ||
904 | static int vmw_driver_unload(struct drm_device *dev) | |
905 | { | |
906 | struct vmw_private *dev_priv = vmw_priv(dev); | |
c0951b79 | 907 | enum vmw_res_type i; |
fb1d9738 | 908 | |
d9f36a00 TH |
909 | unregister_pm_notifier(&dev_priv->pm_nb); |
910 | ||
c0951b79 TH |
911 | if (dev_priv->ctx.res_ht_initialized) |
912 | drm_ht_remove(&dev_priv->ctx.res_ht); | |
a3a1a667 | 913 | vfree(dev_priv->ctx.cmd_bounce); |
30c78bb8 TH |
914 | if (dev_priv->enable_fb) { |
915 | vmw_fb_close(dev_priv); | |
153b3d5b TH |
916 | vmw_fifo_resource_dec(dev_priv); |
917 | vmw_svga_disable(dev_priv); | |
30c78bb8 | 918 | } |
153b3d5b | 919 | |
f2d12b8e TH |
920 | vmw_kms_close(dev_priv); |
921 | vmw_overlay_close(dev_priv); | |
3458390b | 922 | |
3458390b TH |
923 | if (dev_priv->has_gmr) |
924 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | |
925 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | |
926 | ||
153b3d5b TH |
927 | vmw_release_device_early(dev_priv); |
928 | if (dev_priv->has_mob) | |
929 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | |
930 | (void) ttm_bo_device_release(&dev_priv->bdev); | |
931 | vmw_release_device_late(dev_priv); | |
ae2a1040 | 932 | vmw_fence_manager_takedown(dev_priv->fman); |
506ff75c TH |
933 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
934 | drm_irq_uninstall(dev_priv->dev); | |
f2d12b8e | 935 | if (dev_priv->stealth) |
fb1d9738 | 936 | pci_release_region(dev->pdev, 2); |
f2d12b8e TH |
937 | else |
938 | pci_release_regions(dev->pdev); | |
939 | ||
fb1d9738 JB |
940 | ttm_object_device_release(&dev_priv->tdev); |
941 | iounmap(dev_priv->mmio_virt); | |
247d36d7 | 942 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
fb1d9738 JB |
943 | (void)ttm_bo_device_release(&dev_priv->bdev); |
944 | vmw_ttm_global_release(dev_priv); | |
c0951b79 TH |
945 | |
946 | for (i = vmw_res_context; i < vmw_res_max; ++i) | |
947 | idr_destroy(&dev_priv->res_idr[i]); | |
fb1d9738 JB |
948 | |
949 | kfree(dev_priv); | |
950 | ||
951 | return 0; | |
952 | } | |
953 | ||
6b82ef50 TH |
954 | static void vmw_preclose(struct drm_device *dev, |
955 | struct drm_file *file_priv) | |
956 | { | |
957 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | |
958 | struct vmw_private *dev_priv = vmw_priv(dev); | |
959 | ||
960 | vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); | |
961 | } | |
962 | ||
fb1d9738 JB |
963 | static void vmw_postclose(struct drm_device *dev, |
964 | struct drm_file *file_priv) | |
965 | { | |
966 | struct vmw_fpriv *vmw_fp; | |
967 | ||
968 | vmw_fp = vmw_fpriv(file_priv); | |
c4249855 TH |
969 | |
970 | if (vmw_fp->locked_master) { | |
971 | struct vmw_master *vmaster = | |
972 | vmw_master(vmw_fp->locked_master); | |
973 | ||
974 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | |
975 | ttm_vt_unlock(&vmaster->lock); | |
fb1d9738 | 976 | drm_master_put(&vmw_fp->locked_master); |
c4249855 TH |
977 | } |
978 | ||
979 | ttm_object_file_release(&vmw_fp->tfile); | |
fb1d9738 JB |
980 | kfree(vmw_fp); |
981 | } | |
982 | ||
983 | static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) | |
984 | { | |
985 | struct vmw_private *dev_priv = vmw_priv(dev); | |
986 | struct vmw_fpriv *vmw_fp; | |
987 | int ret = -ENOMEM; | |
988 | ||
989 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); | |
990 | if (unlikely(vmw_fp == NULL)) | |
991 | return ret; | |
992 | ||
6b82ef50 | 993 | INIT_LIST_HEAD(&vmw_fp->fence_events); |
fb1d9738 JB |
994 | vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); |
995 | if (unlikely(vmw_fp->tfile == NULL)) | |
996 | goto out_no_tfile; | |
997 | ||
998 | file_priv->driver_priv = vmw_fp; | |
fb1d9738 JB |
999 | |
1000 | return 0; | |
1001 | ||
1002 | out_no_tfile: | |
1003 | kfree(vmw_fp); | |
1004 | return ret; | |
1005 | } | |
1006 | ||
64190bde TH |
1007 | static struct vmw_master *vmw_master_check(struct drm_device *dev, |
1008 | struct drm_file *file_priv, | |
1009 | unsigned int flags) | |
1010 | { | |
1011 | int ret; | |
1012 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | |
1013 | struct vmw_master *vmaster; | |
1014 | ||
1015 | if (file_priv->minor->type != DRM_MINOR_LEGACY || | |
1016 | !(flags & DRM_AUTH)) | |
1017 | return NULL; | |
1018 | ||
1019 | ret = mutex_lock_interruptible(&dev->master_mutex); | |
1020 | if (unlikely(ret != 0)) | |
1021 | return ERR_PTR(-ERESTARTSYS); | |
1022 | ||
7963e9db | 1023 | if (file_priv->is_master) { |
64190bde TH |
1024 | mutex_unlock(&dev->master_mutex); |
1025 | return NULL; | |
1026 | } | |
1027 | ||
1028 | /* | |
1029 | * Check if we were previously master, but now dropped. | |
1030 | */ | |
1031 | if (vmw_fp->locked_master) { | |
1032 | mutex_unlock(&dev->master_mutex); | |
1033 | DRM_ERROR("Dropped master trying to access ioctl that " | |
1034 | "requires authentication.\n"); | |
1035 | return ERR_PTR(-EACCES); | |
1036 | } | |
1037 | mutex_unlock(&dev->master_mutex); | |
1038 | ||
1039 | /* | |
1040 | * Taking the drm_global_mutex after the TTM lock might deadlock | |
1041 | */ | |
1042 | if (!(flags & DRM_UNLOCKED)) { | |
1043 | DRM_ERROR("Refusing locked ioctl access.\n"); | |
1044 | return ERR_PTR(-EDEADLK); | |
1045 | } | |
1046 | ||
1047 | /* | |
1048 | * Take the TTM lock. Possibly sleep waiting for the authenticating | |
1049 | * master to become master again, or for a SIGTERM if the | |
1050 | * authenticating master exits. | |
1051 | */ | |
1052 | vmaster = vmw_master(file_priv->master); | |
1053 | ret = ttm_read_lock(&vmaster->lock, true); | |
1054 | if (unlikely(ret != 0)) | |
1055 | vmaster = ERR_PTR(ret); | |
1056 | ||
1057 | return vmaster; | |
1058 | } | |
1059 | ||
1060 | static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, | |
1061 | unsigned long arg, | |
1062 | long (*ioctl_func)(struct file *, unsigned int, | |
1063 | unsigned long)) | |
fb1d9738 JB |
1064 | { |
1065 | struct drm_file *file_priv = filp->private_data; | |
1066 | struct drm_device *dev = file_priv->minor->dev; | |
1067 | unsigned int nr = DRM_IOCTL_NR(cmd); | |
64190bde TH |
1068 | struct vmw_master *vmaster; |
1069 | unsigned int flags; | |
1070 | long ret; | |
fb1d9738 JB |
1071 | |
1072 | /* | |
e1f78003 | 1073 | * Do extra checking on driver private ioctls. |
fb1d9738 JB |
1074 | */ |
1075 | ||
1076 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) | |
1077 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { | |
baa70943 | 1078 | const struct drm_ioctl_desc *ioctl = |
64190bde | 1079 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
fb1d9738 | 1080 | |
7e7392a6 | 1081 | if (unlikely(ioctl->cmd != cmd)) { |
fb1d9738 JB |
1082 | DRM_ERROR("Invalid command format, ioctl %d\n", |
1083 | nr - DRM_COMMAND_BASE); | |
1084 | return -EINVAL; | |
1085 | } | |
64190bde TH |
1086 | flags = ioctl->flags; |
1087 | } else if (!drm_ioctl_flags(nr, &flags)) | |
1088 | return -EINVAL; | |
1089 | ||
1090 | vmaster = vmw_master_check(dev, file_priv, flags); | |
1091 | if (unlikely(IS_ERR(vmaster))) { | |
e338c4c2 TH |
1092 | ret = PTR_ERR(vmaster); |
1093 | ||
1094 | if (ret != -ERESTARTSYS) | |
1095 | DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n", | |
1096 | nr, ret); | |
1097 | return ret; | |
fb1d9738 JB |
1098 | } |
1099 | ||
64190bde TH |
1100 | ret = ioctl_func(filp, cmd, arg); |
1101 | if (vmaster) | |
1102 | ttm_read_unlock(&vmaster->lock); | |
1103 | ||
1104 | return ret; | |
1105 | } | |
1106 | ||
1107 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | |
1108 | unsigned long arg) | |
1109 | { | |
1110 | return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); | |
fb1d9738 JB |
1111 | } |
1112 | ||
64190bde TH |
1113 | #ifdef CONFIG_COMPAT |
1114 | static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, | |
1115 | unsigned long arg) | |
1116 | { | |
1117 | return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); | |
1118 | } | |
1119 | #endif | |
1120 | ||
fb1d9738 JB |
1121 | static void vmw_lastclose(struct drm_device *dev) |
1122 | { | |
fb1d9738 JB |
1123 | struct drm_crtc *crtc; |
1124 | struct drm_mode_set set; | |
1125 | int ret; | |
1126 | ||
fb1d9738 JB |
1127 | set.x = 0; |
1128 | set.y = 0; | |
1129 | set.fb = NULL; | |
1130 | set.mode = NULL; | |
1131 | set.connectors = NULL; | |
1132 | set.num_connectors = 0; | |
1133 | ||
1134 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
1135 | set.crtc = crtc; | |
2d13b679 | 1136 | ret = drm_mode_set_config_internal(&set); |
fb1d9738 JB |
1137 | WARN_ON(ret != 0); |
1138 | } | |
1139 | ||
1140 | } | |
1141 | ||
1142 | static void vmw_master_init(struct vmw_master *vmaster) | |
1143 | { | |
1144 | ttm_lock_init(&vmaster->lock); | |
1145 | } | |
1146 | ||
1147 | static int vmw_master_create(struct drm_device *dev, | |
1148 | struct drm_master *master) | |
1149 | { | |
1150 | struct vmw_master *vmaster; | |
1151 | ||
fb1d9738 JB |
1152 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); |
1153 | if (unlikely(vmaster == NULL)) | |
1154 | return -ENOMEM; | |
1155 | ||
3a939a5e | 1156 | vmw_master_init(vmaster); |
fb1d9738 JB |
1157 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
1158 | master->driver_priv = vmaster; | |
1159 | ||
1160 | return 0; | |
1161 | } | |
1162 | ||
1163 | static void vmw_master_destroy(struct drm_device *dev, | |
1164 | struct drm_master *master) | |
1165 | { | |
1166 | struct vmw_master *vmaster = vmw_master(master); | |
1167 | ||
fb1d9738 JB |
1168 | master->driver_priv = NULL; |
1169 | kfree(vmaster); | |
1170 | } | |
1171 | ||
1172 | ||
1173 | static int vmw_master_set(struct drm_device *dev, | |
1174 | struct drm_file *file_priv, | |
1175 | bool from_open) | |
1176 | { | |
1177 | struct vmw_private *dev_priv = vmw_priv(dev); | |
1178 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | |
1179 | struct vmw_master *active = dev_priv->active_master; | |
1180 | struct vmw_master *vmaster = vmw_master(file_priv->master); | |
1181 | int ret = 0; | |
1182 | ||
fb1d9738 JB |
1183 | if (active) { |
1184 | BUG_ON(active != &dev_priv->fbdev_master); | |
1185 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | |
1186 | if (unlikely(ret != 0)) | |
153b3d5b | 1187 | return ret; |
fb1d9738 JB |
1188 | |
1189 | ttm_lock_set_kill(&active->lock, true, SIGTERM); | |
fb1d9738 JB |
1190 | dev_priv->active_master = NULL; |
1191 | } | |
1192 | ||
1193 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); | |
1194 | if (!from_open) { | |
1195 | ttm_vt_unlock(&vmaster->lock); | |
1196 | BUG_ON(vmw_fp->locked_master != file_priv->master); | |
1197 | drm_master_put(&vmw_fp->locked_master); | |
1198 | } | |
1199 | ||
1200 | dev_priv->active_master = vmaster; | |
1201 | ||
1202 | return 0; | |
fb1d9738 JB |
1203 | } |
1204 | ||
1205 | static void vmw_master_drop(struct drm_device *dev, | |
1206 | struct drm_file *file_priv, | |
1207 | bool from_release) | |
1208 | { | |
1209 | struct vmw_private *dev_priv = vmw_priv(dev); | |
1210 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | |
1211 | struct vmw_master *vmaster = vmw_master(file_priv->master); | |
1212 | int ret; | |
1213 | ||
fb1d9738 JB |
1214 | /** |
1215 | * Make sure the master doesn't disappear while we have | |
1216 | * it locked. | |
1217 | */ | |
1218 | ||
1219 | vmw_fp->locked_master = drm_master_get(file_priv->master); | |
1220 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | |
fb1d9738 JB |
1221 | if (unlikely((ret != 0))) { |
1222 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | |
1223 | drm_master_put(&vmw_fp->locked_master); | |
1224 | } | |
1225 | ||
c4249855 | 1226 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); |
fb1d9738 | 1227 | |
153b3d5b TH |
1228 | if (!dev_priv->enable_fb) |
1229 | vmw_svga_disable(dev_priv); | |
30c78bb8 | 1230 | |
fb1d9738 JB |
1231 | dev_priv->active_master = &dev_priv->fbdev_master; |
1232 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | |
1233 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | |
1234 | ||
30c78bb8 TH |
1235 | if (dev_priv->enable_fb) |
1236 | vmw_fb_on(dev_priv); | |
fb1d9738 JB |
1237 | } |
1238 | ||
153b3d5b TH |
1239 | /** |
1240 | * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. | |
1241 | * | |
1242 | * @dev_priv: Pointer to device private struct. | |
1243 | * Needs the reservation sem to be held in non-exclusive mode. | |
1244 | */ | |
1245 | void __vmw_svga_enable(struct vmw_private *dev_priv) | |
1246 | { | |
1247 | spin_lock(&dev_priv->svga_lock); | |
1248 | if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) { | |
1249 | vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); | |
1250 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = true; | |
1251 | } | |
1252 | spin_unlock(&dev_priv->svga_lock); | |
1253 | } | |
1254 | ||
1255 | /** | |
1256 | * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. | |
1257 | * | |
1258 | * @dev_priv: Pointer to device private struct. | |
1259 | */ | |
1260 | void vmw_svga_enable(struct vmw_private *dev_priv) | |
1261 | { | |
1262 | ttm_read_lock(&dev_priv->reservation_sem, false); | |
1263 | __vmw_svga_enable(dev_priv); | |
1264 | ttm_read_unlock(&dev_priv->reservation_sem); | |
1265 | } | |
1266 | ||
1267 | /** | |
1268 | * __vmw_svga_disable - Disable SVGA mode and use of VRAM. | |
1269 | * | |
1270 | * @dev_priv: Pointer to device private struct. | |
1271 | * Needs the reservation sem to be held in exclusive mode. | |
1272 | * Will not empty VRAM. VRAM must be emptied by caller. | |
1273 | */ | |
1274 | void __vmw_svga_disable(struct vmw_private *dev_priv) | |
1275 | { | |
1276 | spin_lock(&dev_priv->svga_lock); | |
1277 | if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { | |
1278 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; | |
1279 | vmw_write(dev_priv, SVGA_REG_ENABLE, | |
1280 | SVGA_REG_ENABLE_ENABLE_HIDE); | |
1281 | } | |
1282 | spin_unlock(&dev_priv->svga_lock); | |
1283 | } | |
1284 | ||
1285 | /** | |
1286 | * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo | |
1287 | * running. | |
1288 | * | |
1289 | * @dev_priv: Pointer to device private struct. | |
1290 | * Will empty VRAM. | |
1291 | */ | |
1292 | void vmw_svga_disable(struct vmw_private *dev_priv) | |
1293 | { | |
1294 | ttm_write_lock(&dev_priv->reservation_sem, false); | |
1295 | spin_lock(&dev_priv->svga_lock); | |
1296 | if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { | |
1297 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; | |
1298 | vmw_write(dev_priv, SVGA_REG_ENABLE, | |
1299 | SVGA_REG_ENABLE_ENABLE_HIDE); | |
1300 | spin_unlock(&dev_priv->svga_lock); | |
1301 | if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) | |
1302 | DRM_ERROR("Failed evicting VRAM buffers.\n"); | |
1303 | } else | |
1304 | spin_unlock(&dev_priv->svga_lock); | |
1305 | ttm_write_unlock(&dev_priv->reservation_sem); | |
1306 | } | |
fb1d9738 JB |
1307 | |
1308 | static void vmw_remove(struct pci_dev *pdev) | |
1309 | { | |
1310 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1311 | ||
fd3e4d6e | 1312 | pci_disable_device(pdev); |
fb1d9738 JB |
1313 | drm_put_dev(dev); |
1314 | } | |
1315 | ||
d9f36a00 TH |
1316 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
1317 | void *ptr) | |
1318 | { | |
1319 | struct vmw_private *dev_priv = | |
1320 | container_of(nb, struct vmw_private, pm_nb); | |
d9f36a00 TH |
1321 | |
1322 | switch (val) { | |
1323 | case PM_HIBERNATION_PREPARE: | |
294adf7d | 1324 | ttm_suspend_lock(&dev_priv->reservation_sem); |
d9f36a00 | 1325 | |
153b3d5b | 1326 | /* |
d9f36a00 TH |
1327 | * This empties VRAM and unbinds all GMR bindings. |
1328 | * Buffer contents is moved to swappable memory. | |
1329 | */ | |
c0951b79 TH |
1330 | vmw_execbuf_release_pinned_bo(dev_priv); |
1331 | vmw_resource_evict_all(dev_priv); | |
153b3d5b | 1332 | vmw_release_device_early(dev_priv); |
d9f36a00 | 1333 | ttm_bo_swapout_all(&dev_priv->bdev); |
153b3d5b | 1334 | vmw_fence_fifo_down(dev_priv->fman); |
d9f36a00 TH |
1335 | break; |
1336 | case PM_POST_HIBERNATION: | |
094e0fa8 | 1337 | case PM_POST_RESTORE: |
153b3d5b | 1338 | vmw_fence_fifo_up(dev_priv->fman); |
294adf7d | 1339 | ttm_suspend_unlock(&dev_priv->reservation_sem); |
094e0fa8 | 1340 | |
d9f36a00 TH |
1341 | break; |
1342 | case PM_RESTORE_PREPARE: | |
1343 | break; | |
d9f36a00 TH |
1344 | default: |
1345 | break; | |
1346 | } | |
1347 | return 0; | |
1348 | } | |
1349 | ||
7fbd721a | 1350 | static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) |
d9f36a00 | 1351 | { |
094e0fa8 TH |
1352 | struct drm_device *dev = pci_get_drvdata(pdev); |
1353 | struct vmw_private *dev_priv = vmw_priv(dev); | |
1354 | ||
153b3d5b | 1355 | if (dev_priv->refuse_hibernation) |
094e0fa8 | 1356 | return -EBUSY; |
094e0fa8 | 1357 | |
d9f36a00 TH |
1358 | pci_save_state(pdev); |
1359 | pci_disable_device(pdev); | |
1360 | pci_set_power_state(pdev, PCI_D3hot); | |
1361 | return 0; | |
1362 | } | |
1363 | ||
7fbd721a | 1364 | static int vmw_pci_resume(struct pci_dev *pdev) |
d9f36a00 TH |
1365 | { |
1366 | pci_set_power_state(pdev, PCI_D0); | |
1367 | pci_restore_state(pdev); | |
1368 | return pci_enable_device(pdev); | |
1369 | } | |
1370 | ||
7fbd721a TH |
1371 | static int vmw_pm_suspend(struct device *kdev) |
1372 | { | |
1373 | struct pci_dev *pdev = to_pci_dev(kdev); | |
1374 | struct pm_message dummy; | |
1375 | ||
1376 | dummy.event = 0; | |
1377 | ||
1378 | return vmw_pci_suspend(pdev, dummy); | |
1379 | } | |
1380 | ||
1381 | static int vmw_pm_resume(struct device *kdev) | |
1382 | { | |
1383 | struct pci_dev *pdev = to_pci_dev(kdev); | |
1384 | ||
1385 | return vmw_pci_resume(pdev); | |
1386 | } | |
1387 | ||
153b3d5b | 1388 | static int vmw_pm_freeze(struct device *kdev) |
7fbd721a TH |
1389 | { |
1390 | struct pci_dev *pdev = to_pci_dev(kdev); | |
1391 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1392 | struct vmw_private *dev_priv = vmw_priv(dev); | |
1393 | ||
7fbd721a TH |
1394 | dev_priv->suspended = true; |
1395 | if (dev_priv->enable_fb) | |
153b3d5b | 1396 | vmw_fifo_resource_dec(dev_priv); |
7fbd721a | 1397 | |
153b3d5b TH |
1398 | if (atomic_read(&dev_priv->num_fifo_resources) != 0) { |
1399 | DRM_ERROR("Can't hibernate while 3D resources are active.\n"); | |
7fbd721a | 1400 | if (dev_priv->enable_fb) |
153b3d5b TH |
1401 | vmw_fifo_resource_inc(dev_priv); |
1402 | WARN_ON(vmw_request_device_late(dev_priv)); | |
7fbd721a TH |
1403 | dev_priv->suspended = false; |
1404 | return -EBUSY; | |
1405 | } | |
1406 | ||
153b3d5b TH |
1407 | if (dev_priv->enable_fb) |
1408 | __vmw_svga_disable(dev_priv); | |
1409 | ||
1410 | vmw_release_device_late(dev_priv); | |
1411 | ||
7fbd721a TH |
1412 | return 0; |
1413 | } | |
1414 | ||
153b3d5b | 1415 | static int vmw_pm_restore(struct device *kdev) |
7fbd721a TH |
1416 | { |
1417 | struct pci_dev *pdev = to_pci_dev(kdev); | |
1418 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1419 | struct vmw_private *dev_priv = vmw_priv(dev); | |
153b3d5b | 1420 | int ret; |
7fbd721a | 1421 | |
95e8f6a2 TH |
1422 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
1423 | (void) vmw_read(dev_priv, SVGA_REG_ID); | |
95e8f6a2 | 1424 | |
7fbd721a | 1425 | if (dev_priv->enable_fb) |
153b3d5b TH |
1426 | vmw_fifo_resource_inc(dev_priv); |
1427 | ||
1428 | ret = vmw_request_device(dev_priv); | |
1429 | if (ret) | |
1430 | return ret; | |
1431 | ||
1432 | if (dev_priv->enable_fb) | |
1433 | __vmw_svga_enable(dev_priv); | |
7fbd721a TH |
1434 | |
1435 | dev_priv->suspended = false; | |
153b3d5b TH |
1436 | |
1437 | return 0; | |
7fbd721a TH |
1438 | } |
1439 | ||
1440 | static const struct dev_pm_ops vmw_pm_ops = { | |
153b3d5b TH |
1441 | .freeze = vmw_pm_freeze, |
1442 | .thaw = vmw_pm_restore, | |
1443 | .restore = vmw_pm_restore, | |
7fbd721a TH |
1444 | .suspend = vmw_pm_suspend, |
1445 | .resume = vmw_pm_resume, | |
1446 | }; | |
1447 | ||
e08e96de AV |
1448 | static const struct file_operations vmwgfx_driver_fops = { |
1449 | .owner = THIS_MODULE, | |
1450 | .open = drm_open, | |
1451 | .release = drm_release, | |
1452 | .unlocked_ioctl = vmw_unlocked_ioctl, | |
1453 | .mmap = vmw_mmap, | |
1454 | .poll = vmw_fops_poll, | |
1455 | .read = vmw_fops_read, | |
e08e96de | 1456 | #if defined(CONFIG_COMPAT) |
64190bde | 1457 | .compat_ioctl = vmw_compat_ioctl, |
e08e96de AV |
1458 | #endif |
1459 | .llseek = noop_llseek, | |
1460 | }; | |
1461 | ||
fb1d9738 JB |
1462 | static struct drm_driver driver = { |
1463 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | | |
03f80263 | 1464 | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER, |
fb1d9738 JB |
1465 | .load = vmw_driver_load, |
1466 | .unload = vmw_driver_unload, | |
fb1d9738 JB |
1467 | .lastclose = vmw_lastclose, |
1468 | .irq_preinstall = vmw_irq_preinstall, | |
1469 | .irq_postinstall = vmw_irq_postinstall, | |
1470 | .irq_uninstall = vmw_irq_uninstall, | |
1471 | .irq_handler = vmw_irq_handler, | |
7a1c2f6c | 1472 | .get_vblank_counter = vmw_get_vblank_counter, |
1c482ab3 JB |
1473 | .enable_vblank = vmw_enable_vblank, |
1474 | .disable_vblank = vmw_disable_vblank, | |
fb1d9738 | 1475 | .ioctls = vmw_ioctls, |
f95aeb17 | 1476 | .num_ioctls = ARRAY_SIZE(vmw_ioctls), |
fb1d9738 JB |
1477 | .master_create = vmw_master_create, |
1478 | .master_destroy = vmw_master_destroy, | |
1479 | .master_set = vmw_master_set, | |
1480 | .master_drop = vmw_master_drop, | |
1481 | .open = vmw_driver_open, | |
6b82ef50 | 1482 | .preclose = vmw_preclose, |
fb1d9738 | 1483 | .postclose = vmw_postclose, |
915b4d11 | 1484 | .set_busid = drm_pci_set_busid, |
5e1782d2 DA |
1485 | |
1486 | .dumb_create = vmw_dumb_create, | |
1487 | .dumb_map_offset = vmw_dumb_map_offset, | |
1488 | .dumb_destroy = vmw_dumb_destroy, | |
1489 | ||
69977ff5 TH |
1490 | .prime_fd_to_handle = vmw_prime_fd_to_handle, |
1491 | .prime_handle_to_fd = vmw_prime_handle_to_fd, | |
1492 | ||
e08e96de | 1493 | .fops = &vmwgfx_driver_fops, |
fb1d9738 JB |
1494 | .name = VMWGFX_DRIVER_NAME, |
1495 | .desc = VMWGFX_DRIVER_DESC, | |
1496 | .date = VMWGFX_DRIVER_DATE, | |
1497 | .major = VMWGFX_DRIVER_MAJOR, | |
1498 | .minor = VMWGFX_DRIVER_MINOR, | |
1499 | .patchlevel = VMWGFX_DRIVER_PATCHLEVEL | |
1500 | }; | |
1501 | ||
8410ea3b DA |
1502 | static struct pci_driver vmw_pci_driver = { |
1503 | .name = VMWGFX_DRIVER_NAME, | |
1504 | .id_table = vmw_pci_id_list, | |
1505 | .probe = vmw_probe, | |
1506 | .remove = vmw_remove, | |
1507 | .driver = { | |
1508 | .pm = &vmw_pm_ops | |
1509 | } | |
1510 | }; | |
1511 | ||
fb1d9738 JB |
1512 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1513 | { | |
dcdb1674 | 1514 | return drm_get_pci_dev(pdev, ent, &driver); |
fb1d9738 JB |
1515 | } |
1516 | ||
1517 | static int __init vmwgfx_init(void) | |
1518 | { | |
1519 | int ret; | |
8410ea3b | 1520 | ret = drm_pci_init(&driver, &vmw_pci_driver); |
fb1d9738 JB |
1521 | if (ret) |
1522 | DRM_ERROR("Failed initializing DRM.\n"); | |
1523 | return ret; | |
1524 | } | |
1525 | ||
1526 | static void __exit vmwgfx_exit(void) | |
1527 | { | |
8410ea3b | 1528 | drm_pci_exit(&driver, &vmw_pci_driver); |
fb1d9738 JB |
1529 | } |
1530 | ||
1531 | module_init(vmwgfx_init); | |
1532 | module_exit(vmwgfx_exit); | |
1533 | ||
1534 | MODULE_AUTHOR("VMware Inc. and others"); | |
1535 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); | |
1536 | MODULE_LICENSE("GPL and additional rights"); | |
73558ead TH |
1537 | MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." |
1538 | __stringify(VMWGFX_DRIVER_MINOR) "." | |
1539 | __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." | |
1540 | "0"); |