Commit | Line | Data |
---|---|---|
dff96888 | 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
fb1d9738 JB |
2 | /************************************************************************** |
3 | * | |
09881d29 | 4 | * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA |
fb1d9738 JB |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
09881d29 ZR |
27 | #include "vmwgfx_bo.h" |
28 | #include "vmwgfx_drv.h" | |
29 | #include "vmwgfx_devcaps.h" | |
6ae8748b | 30 | |
760285e7 | 31 | #include <drm/ttm/ttm_placement.h> |
fb1d9738 | 32 | |
09881d29 | 33 | #include <linux/sched/signal.h> |
0069455b | 34 | #include <linux/vmalloc.h> |
6ae8748b | 35 | |
8426ed9c | 36 | bool vmw_supports_3d(struct vmw_private *dev_priv) |
8e19a951 | 37 | { |
8e19a951 | 38 | uint32_t fifo_min, hwversion; |
2cd80dbd | 39 | const struct vmw_fifo_state *fifo = dev_priv->fifo; |
8e19a951 | 40 | |
d8c08b2b TH |
41 | if (!(dev_priv->capabilities & SVGA_CAP_3D)) |
42 | return false; | |
43 | ||
44 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | |
45 | uint32_t result; | |
46 | ||
47 | if (!dev_priv->has_mob) | |
48 | return false; | |
49 | ||
d92223ea | 50 | result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D); |
d8c08b2b TH |
51 | |
52 | return (result != 0); | |
53 | } | |
54 | ||
d7e1958d JB |
55 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) |
56 | return false; | |
57 | ||
2cd80dbd ZR |
58 | BUG_ON(vmw_is_svga_v3(dev_priv)); |
59 | ||
be4f77ac | 60 | fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); |
8e19a951 JB |
61 | if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) |
62 | return false; | |
63 | ||
be4f77ac | 64 | hwversion = vmw_fifo_mem_read(dev_priv, |
8426ed9c ZR |
65 | ((fifo->capabilities & |
66 | SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? | |
67 | SVGA_FIFO_3D_HWVERSION_REVISED : | |
68 | SVGA_FIFO_3D_HWVERSION)); | |
ebd4c6f6 | 69 | |
8e19a951 JB |
70 | if (hwversion == 0) |
71 | return false; | |
72 | ||
b7b70024 | 73 | if (hwversion < SVGA3D_HWVERSION_WS8_B1) |
8e19a951 JB |
74 | return false; |
75 | ||
c8261a96 SY |
76 | /* Legacy Display Unit does not support surfaces */ |
77 | if (dev_priv->active_display_unit == vmw_du_legacy) | |
01e81419 JB |
78 | return false; |
79 | ||
8e19a951 JB |
80 | return true; |
81 | } | |
82 | ||
d7e1958d JB |
83 | bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) |
84 | { | |
d7e1958d JB |
85 | uint32_t caps; |
86 | ||
87 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) | |
88 | return false; | |
89 | ||
be4f77ac | 90 | caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES); |
d7e1958d JB |
91 | if (caps & SVGA_FIFO_CAP_PITCHLOCK) |
92 | return true; | |
93 | ||
94 | return false; | |
95 | } | |
96 | ||
2cd80dbd | 97 | struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv) |
fb1d9738 | 98 | { |
2cd80dbd | 99 | struct vmw_fifo_state *fifo; |
fb1d9738 JB |
100 | uint32_t max; |
101 | uint32_t min; | |
fb1d9738 | 102 | |
2cd80dbd ZR |
103 | if (!dev_priv->fifo_mem) |
104 | return NULL; | |
105 | ||
106 | fifo = kzalloc(sizeof(*fifo), GFP_KERNEL); | |
2f70cbf7 CIK |
107 | if (!fifo) |
108 | return ERR_PTR(-ENOMEM); | |
fb1d9738 JB |
109 | fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; |
110 | fifo->static_buffer = vmalloc(fifo->static_buffer_size); | |
2f70cbf7 CIK |
111 | if (unlikely(fifo->static_buffer == NULL)) { |
112 | kfree(fifo); | |
2cd80dbd | 113 | return ERR_PTR(-ENOMEM); |
2f70cbf7 | 114 | } |
fb1d9738 | 115 | |
fb1d9738 JB |
116 | fifo->dynamic_buffer = NULL; |
117 | fifo->reserved_size = 0; | |
118 | fifo->using_bounce_buffer = false; | |
119 | ||
85b9e487 | 120 | mutex_init(&fifo->fifo_mutex); |
fb1d9738 | 121 | init_rwsem(&fifo->rwsem); |
fb1d9738 JB |
122 | min = 4; |
123 | if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) | |
124 | min = vmw_read(dev_priv, SVGA_REG_MEM_REGS); | |
125 | min <<= 2; | |
126 | ||
127 | if (min < PAGE_SIZE) | |
128 | min = PAGE_SIZE; | |
129 | ||
be4f77ac ZR |
130 | vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min); |
131 | vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size); | |
fb1d9738 | 132 | wmb(); |
be4f77ac ZR |
133 | vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min); |
134 | vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min); | |
135 | vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0); | |
fb1d9738 JB |
136 | mb(); |
137 | ||
138 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); | |
fb1d9738 | 139 | |
be4f77ac ZR |
140 | max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); |
141 | min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); | |
142 | fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES); | |
fb1d9738 | 143 | |
2b273544 ZR |
144 | drm_info(&dev_priv->drm, |
145 | "Fifo max 0x%08x min 0x%08x cap 0x%08x\n", | |
fb1d9738 JB |
146 | (unsigned int) max, |
147 | (unsigned int) min, | |
148 | (unsigned int) fifo->capabilities); | |
c451af78 ZR |
149 | |
150 | if (unlikely(min >= max)) { | |
151 | drm_warn(&dev_priv->drm, | |
152 | "FIFO memory is not usable. Driver failed to initialize."); | |
153 | return ERR_PTR(-ENXIO); | |
154 | } | |
155 | ||
2cd80dbd | 156 | return fifo; |
fb1d9738 JB |
157 | } |
158 | ||
496eb6fd | 159 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) |
fb1d9738 | 160 | { |
be4f77ac | 161 | u32 *fifo_mem = dev_priv->fifo_mem; |
2cd80dbd | 162 | if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0) |
fb1d9738 | 163 | vmw_write(dev_priv, SVGA_REG_SYNC, reason); |
2cd80dbd | 164 | |
fb1d9738 JB |
165 | } |
166 | ||
2cd80dbd | 167 | void vmw_fifo_destroy(struct vmw_private *dev_priv) |
fb1d9738 | 168 | { |
2cd80dbd | 169 | struct vmw_fifo_state *fifo = dev_priv->fifo; |
fb1d9738 | 170 | |
2cd80dbd ZR |
171 | if (!fifo) |
172 | return; | |
fb1d9738 | 173 | |
fb1d9738 JB |
174 | if (likely(fifo->static_buffer != NULL)) { |
175 | vfree(fifo->static_buffer); | |
176 | fifo->static_buffer = NULL; | |
177 | } | |
178 | ||
179 | if (likely(fifo->dynamic_buffer != NULL)) { | |
180 | vfree(fifo->dynamic_buffer); | |
181 | fifo->dynamic_buffer = NULL; | |
182 | } | |
2cd80dbd ZR |
183 | kfree(fifo); |
184 | dev_priv->fifo = NULL; | |
fb1d9738 JB |
185 | } |
186 | ||
187 | static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) | |
188 | { | |
be4f77ac ZR |
189 | uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); |
190 | uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); | |
191 | uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); | |
192 | uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP); | |
fb1d9738 JB |
193 | |
194 | return ((max - next_cmd) + (stop - min) <= bytes); | |
195 | } | |
196 | ||
197 | static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, | |
198 | uint32_t bytes, bool interruptible, | |
199 | unsigned long timeout) | |
200 | { | |
201 | int ret = 0; | |
202 | unsigned long end_jiffies = jiffies + timeout; | |
203 | DEFINE_WAIT(__wait); | |
204 | ||
205 | DRM_INFO("Fifo wait noirq.\n"); | |
206 | ||
207 | for (;;) { | |
208 | prepare_to_wait(&dev_priv->fifo_queue, &__wait, | |
209 | (interruptible) ? | |
210 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | |
211 | if (!vmw_fifo_is_full(dev_priv, bytes)) | |
212 | break; | |
213 | if (time_after_eq(jiffies, end_jiffies)) { | |
214 | ret = -EBUSY; | |
215 | DRM_ERROR("SVGA device lockup.\n"); | |
216 | break; | |
217 | } | |
218 | schedule_timeout(1); | |
219 | if (interruptible && signal_pending(current)) { | |
3d3a5b32 | 220 | ret = -ERESTARTSYS; |
fb1d9738 JB |
221 | break; |
222 | } | |
223 | } | |
224 | finish_wait(&dev_priv->fifo_queue, &__wait); | |
225 | wake_up_all(&dev_priv->fifo_queue); | |
226 | DRM_INFO("Fifo noirq exit.\n"); | |
227 | return ret; | |
228 | } | |
229 | ||
230 | static int vmw_fifo_wait(struct vmw_private *dev_priv, | |
231 | uint32_t bytes, bool interruptible, | |
232 | unsigned long timeout) | |
233 | { | |
234 | long ret = 1L; | |
fb1d9738 JB |
235 | |
236 | if (likely(!vmw_fifo_is_full(dev_priv, bytes))) | |
237 | return 0; | |
238 | ||
239 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL); | |
240 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | |
241 | return vmw_fifo_wait_noirq(dev_priv, bytes, | |
242 | interruptible, timeout); | |
243 | ||
d2e8851a TH |
244 | vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS, |
245 | &dev_priv->fifo_queue_waiters); | |
fb1d9738 JB |
246 | |
247 | if (interruptible) | |
248 | ret = wait_event_interruptible_timeout | |
249 | (dev_priv->fifo_queue, | |
250 | !vmw_fifo_is_full(dev_priv, bytes), timeout); | |
251 | else | |
252 | ret = wait_event_timeout | |
253 | (dev_priv->fifo_queue, | |
254 | !vmw_fifo_is_full(dev_priv, bytes), timeout); | |
255 | ||
3d3a5b32 | 256 | if (unlikely(ret == 0)) |
fb1d9738 JB |
257 | ret = -EBUSY; |
258 | else if (likely(ret > 0)) | |
259 | ret = 0; | |
260 | ||
d2e8851a TH |
261 | vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS, |
262 | &dev_priv->fifo_queue_waiters); | |
fb1d9738 JB |
263 | |
264 | return ret; | |
265 | } | |
266 | ||
b8441a4d | 267 | /* |
de12d44f JB |
268 | * Reserve @bytes number of bytes in the fifo. |
269 | * | |
270 | * This function will return NULL (error) on two conditions: | |
271 | * If it timeouts waiting for fifo space, or if @bytes is larger than the | |
272 | * available fifo space. | |
273 | * | |
274 | * Returns: | |
275 | * Pointer to the fifo, or null on error (possible hardware hang). | |
276 | */ | |
3eab3d9e TH |
277 | static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, |
278 | uint32_t bytes) | |
fb1d9738 | 279 | { |
2cd80dbd | 280 | struct vmw_fifo_state *fifo_state = dev_priv->fifo; |
be4f77ac | 281 | u32 *fifo_mem = dev_priv->fifo_mem; |
fb1d9738 JB |
282 | uint32_t max; |
283 | uint32_t min; | |
284 | uint32_t next_cmd; | |
285 | uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; | |
286 | int ret; | |
287 | ||
85b9e487 | 288 | mutex_lock(&fifo_state->fifo_mutex); |
be4f77ac ZR |
289 | max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); |
290 | min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); | |
291 | next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); | |
fb1d9738 JB |
292 | |
293 | if (unlikely(bytes >= (max - min))) | |
294 | goto out_err; | |
295 | ||
296 | BUG_ON(fifo_state->reserved_size != 0); | |
297 | BUG_ON(fifo_state->dynamic_buffer != NULL); | |
298 | ||
299 | fifo_state->reserved_size = bytes; | |
300 | ||
301 | while (1) { | |
be4f77ac | 302 | uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP); |
fb1d9738 JB |
303 | bool need_bounce = false; |
304 | bool reserve_in_place = false; | |
305 | ||
306 | if (next_cmd >= stop) { | |
307 | if (likely((next_cmd + bytes < max || | |
308 | (next_cmd + bytes == max && stop > min)))) | |
309 | reserve_in_place = true; | |
310 | ||
311 | else if (vmw_fifo_is_full(dev_priv, bytes)) { | |
312 | ret = vmw_fifo_wait(dev_priv, bytes, | |
313 | false, 3 * HZ); | |
314 | if (unlikely(ret != 0)) | |
315 | goto out_err; | |
316 | } else | |
317 | need_bounce = true; | |
318 | ||
319 | } else { | |
320 | ||
321 | if (likely((next_cmd + bytes < stop))) | |
322 | reserve_in_place = true; | |
323 | else { | |
324 | ret = vmw_fifo_wait(dev_priv, bytes, | |
325 | false, 3 * HZ); | |
326 | if (unlikely(ret != 0)) | |
327 | goto out_err; | |
328 | } | |
329 | } | |
330 | ||
331 | if (reserve_in_place) { | |
332 | if (reserveable || bytes <= sizeof(uint32_t)) { | |
333 | fifo_state->using_bounce_buffer = false; | |
334 | ||
335 | if (reserveable) | |
be4f77ac ZR |
336 | vmw_fifo_mem_write(dev_priv, |
337 | SVGA_FIFO_RESERVED, | |
338 | bytes); | |
b9eb1a61 TH |
339 | return (void __force *) (fifo_mem + |
340 | (next_cmd >> 2)); | |
fb1d9738 JB |
341 | } else { |
342 | need_bounce = true; | |
343 | } | |
344 | } | |
345 | ||
346 | if (need_bounce) { | |
347 | fifo_state->using_bounce_buffer = true; | |
348 | if (bytes < fifo_state->static_buffer_size) | |
349 | return fifo_state->static_buffer; | |
350 | else { | |
351 | fifo_state->dynamic_buffer = vmalloc(bytes); | |
f0c62e98 DC |
352 | if (!fifo_state->dynamic_buffer) |
353 | goto out_err; | |
fb1d9738 JB |
354 | return fifo_state->dynamic_buffer; |
355 | } | |
356 | } | |
357 | } | |
358 | out_err: | |
359 | fifo_state->reserved_size = 0; | |
85b9e487 | 360 | mutex_unlock(&fifo_state->fifo_mutex); |
3eab3d9e | 361 | |
fb1d9738 JB |
362 | return NULL; |
363 | } | |
364 | ||
8426ed9c | 365 | void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, |
d80efd5c | 366 | int ctx_id) |
3eab3d9e TH |
367 | { |
368 | void *ret; | |
369 | ||
370 | if (dev_priv->cman) | |
371 | ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, | |
d80efd5c TH |
372 | ctx_id, false, NULL); |
373 | else if (ctx_id == SVGA3D_INVALID_ID) | |
3eab3d9e | 374 | ret = vmw_local_fifo_reserve(dev_priv, bytes); |
d80efd5c | 375 | else { |
99f9be4c | 376 | WARN(1, "Command buffer has not been allocated.\n"); |
d80efd5c TH |
377 | ret = NULL; |
378 | } | |
11c45419 | 379 | if (IS_ERR_OR_NULL(ret)) |
3eab3d9e | 380 | return NULL; |
3eab3d9e TH |
381 | |
382 | return ret; | |
383 | } | |
384 | ||
fb1d9738 | 385 | static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, |
be4f77ac | 386 | struct vmw_private *vmw, |
fb1d9738 JB |
387 | uint32_t next_cmd, |
388 | uint32_t max, uint32_t min, uint32_t bytes) | |
389 | { | |
be4f77ac | 390 | u32 *fifo_mem = vmw->fifo_mem; |
fb1d9738 JB |
391 | uint32_t chunk_size = max - next_cmd; |
392 | uint32_t rest; | |
393 | uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? | |
394 | fifo_state->dynamic_buffer : fifo_state->static_buffer; | |
395 | ||
396 | if (bytes < chunk_size) | |
397 | chunk_size = bytes; | |
398 | ||
be4f77ac | 399 | vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes); |
fb1d9738 | 400 | mb(); |
b76ff5ea | 401 | memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size); |
fb1d9738 JB |
402 | rest = bytes - chunk_size; |
403 | if (rest) | |
b76ff5ea | 404 | memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest); |
fb1d9738 JB |
405 | } |
406 | ||
407 | static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, | |
be4f77ac | 408 | struct vmw_private *vmw, |
fb1d9738 JB |
409 | uint32_t next_cmd, |
410 | uint32_t max, uint32_t min, uint32_t bytes) | |
411 | { | |
412 | uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? | |
413 | fifo_state->dynamic_buffer : fifo_state->static_buffer; | |
414 | ||
415 | while (bytes > 0) { | |
be4f77ac | 416 | vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++); |
fb1d9738 JB |
417 | next_cmd += sizeof(uint32_t); |
418 | if (unlikely(next_cmd == max)) | |
419 | next_cmd = min; | |
420 | mb(); | |
be4f77ac | 421 | vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd); |
fb1d9738 JB |
422 | mb(); |
423 | bytes -= sizeof(uint32_t); | |
424 | } | |
425 | } | |
426 | ||
b9eb1a61 | 427 | static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) |
fb1d9738 | 428 | { |
2cd80dbd | 429 | struct vmw_fifo_state *fifo_state = dev_priv->fifo; |
be4f77ac ZR |
430 | uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); |
431 | uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); | |
432 | uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); | |
fb1d9738 JB |
433 | bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; |
434 | ||
435 | BUG_ON((bytes & 3) != 0); | |
436 | BUG_ON(bytes > fifo_state->reserved_size); | |
437 | ||
438 | fifo_state->reserved_size = 0; | |
439 | ||
440 | if (fifo_state->using_bounce_buffer) { | |
441 | if (reserveable) | |
be4f77ac | 442 | vmw_fifo_res_copy(fifo_state, dev_priv, |
fb1d9738 JB |
443 | next_cmd, max, min, bytes); |
444 | else | |
be4f77ac | 445 | vmw_fifo_slow_copy(fifo_state, dev_priv, |
fb1d9738 JB |
446 | next_cmd, max, min, bytes); |
447 | ||
448 | if (fifo_state->dynamic_buffer) { | |
449 | vfree(fifo_state->dynamic_buffer); | |
450 | fifo_state->dynamic_buffer = NULL; | |
451 | } | |
452 | ||
453 | } | |
454 | ||
85b9e487 | 455 | down_write(&fifo_state->rwsem); |
fb1d9738 JB |
456 | if (fifo_state->using_bounce_buffer || reserveable) { |
457 | next_cmd += bytes; | |
458 | if (next_cmd >= max) | |
459 | next_cmd -= max - min; | |
460 | mb(); | |
be4f77ac | 461 | vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd); |
fb1d9738 JB |
462 | } |
463 | ||
464 | if (reserveable) | |
be4f77ac | 465 | vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0); |
fb1d9738 | 466 | mb(); |
fb1d9738 | 467 | up_write(&fifo_state->rwsem); |
85b9e487 TH |
468 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
469 | mutex_unlock(&fifo_state->fifo_mutex); | |
fb1d9738 JB |
470 | } |
471 | ||
8426ed9c | 472 | void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes) |
3eab3d9e TH |
473 | { |
474 | if (dev_priv->cman) | |
475 | vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false); | |
476 | else | |
477 | vmw_local_fifo_commit(dev_priv, bytes); | |
478 | } | |
479 | ||
480 | ||
481 | /** | |
2cd80dbd | 482 | * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands. |
3eab3d9e TH |
483 | * |
484 | * @dev_priv: Pointer to device private structure. | |
485 | * @bytes: Number of bytes to commit. | |
486 | */ | |
8426ed9c | 487 | void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) |
3eab3d9e TH |
488 | { |
489 | if (dev_priv->cman) | |
490 | vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); | |
491 | else | |
492 | vmw_local_fifo_commit(dev_priv, bytes); | |
493 | } | |
494 | ||
495 | /** | |
2cd80dbd | 496 | * vmw_cmd_flush - Flush any buffered commands and make sure command processing |
3eab3d9e TH |
497 | * starts. |
498 | * | |
499 | * @dev_priv: Pointer to device private structure. | |
500 | * @interruptible: Whether to wait interruptible if function needs to sleep. | |
501 | */ | |
8426ed9c | 502 | int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible) |
3eab3d9e TH |
503 | { |
504 | might_sleep(); | |
505 | ||
506 | if (dev_priv->cman) | |
507 | return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible); | |
508 | else | |
509 | return 0; | |
510 | } | |
511 | ||
8426ed9c | 512 | int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) |
fb1d9738 | 513 | { |
fb1d9738 | 514 | struct svga_fifo_cmd_fence *cmd_fence; |
b9eb1a61 | 515 | u32 *fm; |
fb1d9738 | 516 | int ret = 0; |
b9eb1a61 | 517 | uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence); |
fb1d9738 | 518 | |
8426ed9c | 519 | fm = VMW_CMD_RESERVE(dev_priv, bytes); |
fb1d9738 | 520 | if (unlikely(fm == NULL)) { |
6bcd8d3c | 521 | *seqno = atomic_read(&dev_priv->marker_seq); |
fb1d9738 | 522 | ret = -ENOMEM; |
6bcd8d3c | 523 | (void)vmw_fallback_wait(dev_priv, false, true, *seqno, |
fb1d9738 JB |
524 | false, 3*HZ); |
525 | goto out_err; | |
526 | } | |
527 | ||
528 | do { | |
6bcd8d3c TH |
529 | *seqno = atomic_add_return(1, &dev_priv->marker_seq); |
530 | } while (*seqno == 0); | |
fb1d9738 | 531 | |
c593197b | 532 | if (!vmw_has_fences(dev_priv)) { |
fb1d9738 JB |
533 | |
534 | /* | |
535 | * Don't request hardware to send a fence. The | |
536 | * waiting code in vmwgfx_irq.c will emulate this. | |
537 | */ | |
538 | ||
8426ed9c | 539 | vmw_cmd_commit(dev_priv, 0); |
fb1d9738 JB |
540 | return 0; |
541 | } | |
542 | ||
b9eb1a61 TH |
543 | *fm++ = SVGA_CMD_FENCE; |
544 | cmd_fence = (struct svga_fifo_cmd_fence *) fm; | |
545 | cmd_fence->fence = *seqno; | |
8426ed9c | 546 | vmw_cmd_commit_flush(dev_priv, bytes); |
2cd80dbd | 547 | vmw_update_seqno(dev_priv); |
fb1d9738 JB |
548 | |
549 | out_err: | |
550 | return ret; | |
551 | } | |
e2fa3a76 TH |
552 | |
553 | /** | |
2cd80dbd | 554 | * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using |
ddcda24e | 555 | * legacy query commands. |
e2fa3a76 TH |
556 | * |
557 | * @dev_priv: The device private structure. | |
558 | * @cid: The hardware context id used for the query. | |
559 | * | |
2cd80dbd | 560 | * See the vmw_cmd_emit_dummy_query documentation. |
e2fa3a76 | 561 | */ |
2cd80dbd | 562 | static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv, |
ddcda24e | 563 | uint32_t cid) |
e2fa3a76 TH |
564 | { |
565 | /* | |
566 | * A query wait without a preceding query end will | |
567 | * actually finish all queries for this cid | |
568 | * without writing to the query result structure. | |
569 | */ | |
570 | ||
668b2066 | 571 | struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo; |
e2fa3a76 TH |
572 | struct { |
573 | SVGA3dCmdHeader header; | |
574 | SVGA3dCmdWaitForQuery body; | |
575 | } *cmd; | |
576 | ||
8426ed9c | 577 | cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); |
11c45419 | 578 | if (unlikely(cmd == NULL)) |
e2fa3a76 | 579 | return -ENOMEM; |
e2fa3a76 TH |
580 | |
581 | cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY; | |
582 | cmd->header.size = sizeof(cmd->body); | |
583 | cmd->body.cid = cid; | |
584 | cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; | |
585 | ||
d3116756 | 586 | if (bo->resource->mem_type == TTM_PL_VRAM) { |
e2fa3a76 | 587 | cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER; |
d3116756 | 588 | cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT; |
e2fa3a76 | 589 | } else { |
d3116756 | 590 | cmd->body.guestResult.gmrId = bo->resource->start; |
e2fa3a76 TH |
591 | cmd->body.guestResult.offset = 0; |
592 | } | |
593 | ||
8426ed9c | 594 | vmw_cmd_commit(dev_priv, sizeof(*cmd)); |
e2fa3a76 TH |
595 | |
596 | return 0; | |
597 | } | |
ddcda24e TH |
598 | |
599 | /** | |
2cd80dbd | 600 | * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using |
ddcda24e TH |
601 | * guest-backed resource query commands. |
602 | * | |
603 | * @dev_priv: The device private structure. | |
604 | * @cid: The hardware context id used for the query. | |
605 | * | |
2cd80dbd | 606 | * See the vmw_cmd_emit_dummy_query documentation. |
ddcda24e | 607 | */ |
2cd80dbd ZR |
608 | static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv, |
609 | uint32_t cid) | |
ddcda24e TH |
610 | { |
611 | /* | |
612 | * A query wait without a preceding query end will | |
613 | * actually finish all queries for this cid | |
614 | * without writing to the query result structure. | |
615 | */ | |
616 | ||
668b2066 | 617 | struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo; |
ddcda24e TH |
618 | struct { |
619 | SVGA3dCmdHeader header; | |
620 | SVGA3dCmdWaitForGBQuery body; | |
621 | } *cmd; | |
622 | ||
8426ed9c | 623 | cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); |
11c45419 | 624 | if (unlikely(cmd == NULL)) |
ddcda24e | 625 | return -ENOMEM; |
ddcda24e TH |
626 | |
627 | cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; | |
628 | cmd->header.size = sizeof(cmd->body); | |
629 | cmd->body.cid = cid; | |
630 | cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; | |
d3116756 CK |
631 | BUG_ON(bo->resource->mem_type != VMW_PL_MOB); |
632 | cmd->body.mobid = bo->resource->start; | |
ddcda24e TH |
633 | cmd->body.offset = 0; |
634 | ||
8426ed9c | 635 | vmw_cmd_commit(dev_priv, sizeof(*cmd)); |
ddcda24e TH |
636 | |
637 | return 0; | |
638 | } | |
639 | ||
640 | ||
641 | /** | |
2cd80dbd | 642 | * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using |
ddcda24e TH |
643 | * appropriate resource query commands. |
644 | * | |
645 | * @dev_priv: The device private structure. | |
646 | * @cid: The hardware context id used for the query. | |
647 | * | |
648 | * This function is used to emit a dummy occlusion query with | |
649 | * no primitives rendered between query begin and query end. | |
650 | * It's used to provide a query barrier, in order to know that when | |
651 | * this query is finished, all preceding queries are also finished. | |
652 | * | |
653 | * A Query results structure should have been initialized at the start | |
654 | * of the dev_priv->dummy_query_bo buffer object. And that buffer object | |
655 | * must also be either reserved or pinned when this function is called. | |
656 | * | |
657 | * Returns -ENOMEM on failure to reserve fifo space. | |
658 | */ | |
8426ed9c | 659 | int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, |
ddcda24e TH |
660 | uint32_t cid) |
661 | { | |
662 | if (dev_priv->has_mob) | |
2cd80dbd | 663 | return vmw_cmd_emit_dummy_gb_query(dev_priv, cid); |
ddcda24e | 664 | |
2cd80dbd ZR |
665 | return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid); |
666 | } | |
667 | ||
668 | ||
669 | /** | |
670 | * vmw_cmd_supported - returns true if the given device supports | |
671 | * command queues. | |
672 | * | |
673 | * @vmw: The device private structure. | |
674 | * | |
675 | * Returns true if we can issue commands. | |
676 | */ | |
677 | bool vmw_cmd_supported(struct vmw_private *vmw) | |
678 | { | |
6f6f9788 ZR |
679 | bool has_cmdbufs = |
680 | (vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | | |
681 | SVGA_CAP_CMD_BUFFERS_2)) != 0; | |
682 | if (vmw_is_svga_v3(vmw)) | |
683 | return (has_cmdbufs && | |
684 | (vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0); | |
2cd80dbd ZR |
685 | /* |
686 | * We have FIFO cmd's | |
687 | */ | |
6f6f9788 | 688 | return has_cmdbufs || vmw->fifo_mem != NULL; |
ddcda24e | 689 | } |