Commit | Line | Data |
---|---|---|
fb1d9738 JB |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | ||
28 | #include "vmwgfx_drv.h" | |
29 | #include "vmwgfx_reg.h" | |
30 | #include "ttm/ttm_bo_api.h" | |
31 | #include "ttm/ttm_placement.h" | |
32 | ||
33 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, | |
34 | struct vmw_sw_context *sw_context, | |
35 | SVGA3dCmdHeader *header) | |
36 | { | |
37 | return capable(CAP_SYS_ADMIN) ? : -EINVAL; | |
38 | } | |
39 | ||
40 | static int vmw_cmd_ok(struct vmw_private *dev_priv, | |
41 | struct vmw_sw_context *sw_context, | |
42 | SVGA3dCmdHeader *header) | |
43 | { | |
44 | return 0; | |
45 | } | |
46 | ||
be38ab6e TH |
47 | |
48 | static int vmw_resource_to_validate_list(struct vmw_sw_context *sw_context, | |
49 | struct vmw_resource **p_res) | |
50 | { | |
51 | int ret = 0; | |
52 | struct vmw_resource *res = *p_res; | |
53 | ||
54 | if (!res->on_validate_list) { | |
55 | if (sw_context->num_ref_resources >= VMWGFX_MAX_VALIDATIONS) { | |
56 | DRM_ERROR("Too many resources referenced in " | |
57 | "command stream.\n"); | |
58 | ret = -ENOMEM; | |
59 | goto out; | |
60 | } | |
61 | sw_context->resources[sw_context->num_ref_resources++] = res; | |
62 | res->on_validate_list = true; | |
63 | return 0; | |
64 | } | |
65 | ||
66 | out: | |
67 | vmw_resource_unreference(p_res); | |
68 | return ret; | |
69 | } | |
70 | ||
fb1d9738 JB |
71 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
72 | struct vmw_sw_context *sw_context, | |
73 | SVGA3dCmdHeader *header) | |
74 | { | |
be38ab6e TH |
75 | struct vmw_resource *ctx; |
76 | ||
fb1d9738 JB |
77 | struct vmw_cid_cmd { |
78 | SVGA3dCmdHeader header; | |
79 | __le32 cid; | |
80 | } *cmd; | |
81 | int ret; | |
82 | ||
83 | cmd = container_of(header, struct vmw_cid_cmd, header); | |
84 | if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) | |
85 | return 0; | |
86 | ||
be38ab6e TH |
87 | ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid, |
88 | &ctx); | |
fb1d9738 JB |
89 | if (unlikely(ret != 0)) { |
90 | DRM_ERROR("Could not find or use context %u\n", | |
91 | (unsigned) cmd->cid); | |
92 | return ret; | |
93 | } | |
94 | ||
95 | sw_context->last_cid = cmd->cid; | |
96 | sw_context->cid_valid = true; | |
be38ab6e | 97 | return vmw_resource_to_validate_list(sw_context, &ctx); |
fb1d9738 JB |
98 | } |
99 | ||
100 | static int vmw_cmd_sid_check(struct vmw_private *dev_priv, | |
101 | struct vmw_sw_context *sw_context, | |
7a73ba74 | 102 | uint32_t *sid) |
fb1d9738 | 103 | { |
be38ab6e TH |
104 | struct vmw_surface *srf; |
105 | int ret; | |
106 | struct vmw_resource *res; | |
107 | ||
7a73ba74 TH |
108 | if (*sid == SVGA3D_INVALID_ID) |
109 | return 0; | |
110 | ||
be38ab6e TH |
111 | if (likely((sw_context->sid_valid && |
112 | *sid == sw_context->last_sid))) { | |
7a73ba74 | 113 | *sid = sw_context->sid_translation; |
be38ab6e TH |
114 | return 0; |
115 | } | |
7a73ba74 | 116 | |
be38ab6e TH |
117 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, |
118 | *sid, &srf); | |
119 | if (unlikely(ret != 0)) { | |
120 | DRM_ERROR("Could ot find or use surface 0x%08x " | |
121 | "address 0x%08lx\n", | |
122 | (unsigned int) *sid, | |
123 | (unsigned long) sid); | |
124 | return ret; | |
125 | } | |
126 | ||
127 | sw_context->last_sid = *sid; | |
128 | sw_context->sid_valid = true; | |
129 | sw_context->sid_translation = srf->res.id; | |
130 | *sid = sw_context->sid_translation; | |
131 | ||
132 | res = &srf->res; | |
133 | return vmw_resource_to_validate_list(sw_context, &res); | |
fb1d9738 JB |
134 | } |
135 | ||
136 | ||
137 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |
138 | struct vmw_sw_context *sw_context, | |
139 | SVGA3dCmdHeader *header) | |
140 | { | |
141 | struct vmw_sid_cmd { | |
142 | SVGA3dCmdHeader header; | |
143 | SVGA3dCmdSetRenderTarget body; | |
144 | } *cmd; | |
145 | int ret; | |
146 | ||
147 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | |
148 | if (unlikely(ret != 0)) | |
149 | return ret; | |
150 | ||
151 | cmd = container_of(header, struct vmw_sid_cmd, header); | |
7a73ba74 TH |
152 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid); |
153 | return ret; | |
fb1d9738 JB |
154 | } |
155 | ||
156 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, | |
157 | struct vmw_sw_context *sw_context, | |
158 | SVGA3dCmdHeader *header) | |
159 | { | |
160 | struct vmw_sid_cmd { | |
161 | SVGA3dCmdHeader header; | |
162 | SVGA3dCmdSurfaceCopy body; | |
163 | } *cmd; | |
164 | int ret; | |
165 | ||
166 | cmd = container_of(header, struct vmw_sid_cmd, header); | |
7a73ba74 | 167 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); |
fb1d9738 JB |
168 | if (unlikely(ret != 0)) |
169 | return ret; | |
7a73ba74 | 170 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); |
fb1d9738 JB |
171 | } |
172 | ||
173 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, | |
174 | struct vmw_sw_context *sw_context, | |
175 | SVGA3dCmdHeader *header) | |
176 | { | |
177 | struct vmw_sid_cmd { | |
178 | SVGA3dCmdHeader header; | |
179 | SVGA3dCmdSurfaceStretchBlt body; | |
180 | } *cmd; | |
181 | int ret; | |
182 | ||
183 | cmd = container_of(header, struct vmw_sid_cmd, header); | |
7a73ba74 | 184 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); |
fb1d9738 JB |
185 | if (unlikely(ret != 0)) |
186 | return ret; | |
7a73ba74 | 187 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); |
fb1d9738 JB |
188 | } |
189 | ||
190 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | |
191 | struct vmw_sw_context *sw_context, | |
192 | SVGA3dCmdHeader *header) | |
193 | { | |
194 | struct vmw_sid_cmd { | |
195 | SVGA3dCmdHeader header; | |
196 | SVGA3dCmdBlitSurfaceToScreen body; | |
197 | } *cmd; | |
198 | ||
199 | cmd = container_of(header, struct vmw_sid_cmd, header); | |
7a73ba74 | 200 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); |
fb1d9738 JB |
201 | } |
202 | ||
203 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |
204 | struct vmw_sw_context *sw_context, | |
205 | SVGA3dCmdHeader *header) | |
206 | { | |
207 | struct vmw_sid_cmd { | |
208 | SVGA3dCmdHeader header; | |
209 | SVGA3dCmdPresent body; | |
210 | } *cmd; | |
211 | ||
212 | cmd = container_of(header, struct vmw_sid_cmd, header); | |
7a73ba74 | 213 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); |
fb1d9738 JB |
214 | } |
215 | ||
4e4ddd47 TH |
216 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
217 | struct vmw_sw_context *sw_context, | |
218 | SVGAGuestPtr *ptr, | |
219 | struct vmw_dma_buffer **vmw_bo_p) | |
fb1d9738 | 220 | { |
fb1d9738 JB |
221 | struct vmw_dma_buffer *vmw_bo = NULL; |
222 | struct ttm_buffer_object *bo; | |
4e4ddd47 | 223 | uint32_t handle = ptr->gmrId; |
fb1d9738 | 224 | struct vmw_relocation *reloc; |
fb1d9738 JB |
225 | uint32_t cur_validate_node; |
226 | struct ttm_validate_buffer *val_buf; | |
4e4ddd47 | 227 | int ret; |
fb1d9738 | 228 | |
fb1d9738 JB |
229 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
230 | if (unlikely(ret != 0)) { | |
231 | DRM_ERROR("Could not find or use GMR region.\n"); | |
232 | return -EINVAL; | |
233 | } | |
234 | bo = &vmw_bo->base; | |
235 | ||
236 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { | |
4e4ddd47 | 237 | DRM_ERROR("Max number relocations per submission" |
fb1d9738 JB |
238 | " exceeded\n"); |
239 | ret = -EINVAL; | |
240 | goto out_no_reloc; | |
241 | } | |
242 | ||
243 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | |
4e4ddd47 | 244 | reloc->location = ptr; |
fb1d9738 JB |
245 | |
246 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | |
be38ab6e | 247 | if (unlikely(cur_validate_node >= VMWGFX_MAX_VALIDATIONS)) { |
fb1d9738 JB |
248 | DRM_ERROR("Max number of DMA buffers per submission" |
249 | " exceeded.\n"); | |
250 | ret = -EINVAL; | |
251 | goto out_no_reloc; | |
252 | } | |
253 | ||
254 | reloc->index = cur_validate_node; | |
255 | if (unlikely(cur_validate_node == sw_context->cur_val_buf)) { | |
256 | val_buf = &sw_context->val_bufs[cur_validate_node]; | |
257 | val_buf->bo = ttm_bo_reference(bo); | |
dfadbbdb | 258 | val_buf->usage = TTM_USAGE_READWRITE; |
ae2a1040 | 259 | val_buf->new_sync_obj_arg = (void *) DRM_VMW_FENCE_FLAG_EXEC; |
fb1d9738 JB |
260 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
261 | ++sw_context->cur_val_buf; | |
262 | } | |
4e4ddd47 TH |
263 | *vmw_bo_p = vmw_bo; |
264 | return 0; | |
265 | ||
266 | out_no_reloc: | |
267 | vmw_dmabuf_unreference(&vmw_bo); | |
268 | vmw_bo_p = NULL; | |
269 | return ret; | |
270 | } | |
271 | ||
272 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, | |
273 | struct vmw_sw_context *sw_context, | |
274 | SVGA3dCmdHeader *header) | |
275 | { | |
276 | struct vmw_dma_buffer *vmw_bo; | |
277 | struct vmw_query_cmd { | |
278 | SVGA3dCmdHeader header; | |
279 | SVGA3dCmdEndQuery q; | |
280 | } *cmd; | |
281 | int ret; | |
282 | ||
283 | cmd = container_of(header, struct vmw_query_cmd, header); | |
284 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | |
285 | if (unlikely(ret != 0)) | |
286 | return ret; | |
287 | ||
288 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | |
289 | &cmd->q.guestResult, | |
290 | &vmw_bo); | |
291 | if (unlikely(ret != 0)) | |
292 | return ret; | |
293 | ||
294 | vmw_dmabuf_unreference(&vmw_bo); | |
295 | return 0; | |
296 | } | |
fb1d9738 | 297 | |
4e4ddd47 TH |
298 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
299 | struct vmw_sw_context *sw_context, | |
300 | SVGA3dCmdHeader *header) | |
301 | { | |
302 | struct vmw_dma_buffer *vmw_bo; | |
303 | struct vmw_query_cmd { | |
304 | SVGA3dCmdHeader header; | |
305 | SVGA3dCmdWaitForQuery q; | |
306 | } *cmd; | |
307 | int ret; | |
308 | ||
309 | cmd = container_of(header, struct vmw_query_cmd, header); | |
310 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | |
311 | if (unlikely(ret != 0)) | |
312 | return ret; | |
313 | ||
314 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | |
315 | &cmd->q.guestResult, | |
316 | &vmw_bo); | |
317 | if (unlikely(ret != 0)) | |
318 | return ret; | |
319 | ||
320 | vmw_dmabuf_unreference(&vmw_bo); | |
321 | return 0; | |
322 | } | |
323 | ||
4e4ddd47 TH |
324 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
325 | struct vmw_sw_context *sw_context, | |
326 | SVGA3dCmdHeader *header) | |
327 | { | |
328 | struct vmw_dma_buffer *vmw_bo = NULL; | |
329 | struct ttm_buffer_object *bo; | |
330 | struct vmw_surface *srf = NULL; | |
331 | struct vmw_dma_cmd { | |
332 | SVGA3dCmdHeader header; | |
333 | SVGA3dCmdSurfaceDMA dma; | |
334 | } *cmd; | |
335 | int ret; | |
be38ab6e | 336 | struct vmw_resource *res; |
4e4ddd47 TH |
337 | |
338 | cmd = container_of(header, struct vmw_dma_cmd, header); | |
339 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | |
340 | &cmd->dma.guest.ptr, | |
341 | &vmw_bo); | |
342 | if (unlikely(ret != 0)) | |
343 | return ret; | |
344 | ||
345 | bo = &vmw_bo->base; | |
7a73ba74 TH |
346 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, |
347 | cmd->dma.host.sid, &srf); | |
fb1d9738 JB |
348 | if (ret) { |
349 | DRM_ERROR("could not find surface\n"); | |
350 | goto out_no_reloc; | |
351 | } | |
352 | ||
be38ab6e | 353 | /* |
7a73ba74 TH |
354 | * Patch command stream with device SID. |
355 | */ | |
7a73ba74 | 356 | cmd->dma.host.sid = srf->res.id; |
fb1d9738 | 357 | vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); |
be38ab6e TH |
358 | |
359 | vmw_dmabuf_unreference(&vmw_bo); | |
360 | ||
361 | res = &srf->res; | |
362 | return vmw_resource_to_validate_list(sw_context, &res); | |
fb1d9738 JB |
363 | |
364 | out_no_reloc: | |
365 | vmw_dmabuf_unreference(&vmw_bo); | |
366 | return ret; | |
367 | } | |
368 | ||
7a73ba74 TH |
369 | static int vmw_cmd_draw(struct vmw_private *dev_priv, |
370 | struct vmw_sw_context *sw_context, | |
371 | SVGA3dCmdHeader *header) | |
372 | { | |
373 | struct vmw_draw_cmd { | |
374 | SVGA3dCmdHeader header; | |
375 | SVGA3dCmdDrawPrimitives body; | |
376 | } *cmd; | |
377 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( | |
378 | (unsigned long)header + sizeof(*cmd)); | |
379 | SVGA3dPrimitiveRange *range; | |
380 | uint32_t i; | |
381 | uint32_t maxnum; | |
382 | int ret; | |
383 | ||
384 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | |
385 | if (unlikely(ret != 0)) | |
386 | return ret; | |
387 | ||
388 | cmd = container_of(header, struct vmw_draw_cmd, header); | |
389 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); | |
390 | ||
391 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { | |
392 | DRM_ERROR("Illegal number of vertex declarations.\n"); | |
393 | return -EINVAL; | |
394 | } | |
395 | ||
396 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { | |
397 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | |
398 | &decl->array.surfaceId); | |
399 | if (unlikely(ret != 0)) | |
400 | return ret; | |
401 | } | |
402 | ||
403 | maxnum = (header->size - sizeof(cmd->body) - | |
404 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); | |
405 | if (unlikely(cmd->body.numRanges > maxnum)) { | |
406 | DRM_ERROR("Illegal number of index ranges.\n"); | |
407 | return -EINVAL; | |
408 | } | |
409 | ||
410 | range = (SVGA3dPrimitiveRange *) decl; | |
411 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { | |
412 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | |
413 | &range->indexArray.surfaceId); | |
414 | if (unlikely(ret != 0)) | |
415 | return ret; | |
416 | } | |
417 | return 0; | |
418 | } | |
419 | ||
420 | ||
421 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |
422 | struct vmw_sw_context *sw_context, | |
423 | SVGA3dCmdHeader *header) | |
424 | { | |
425 | struct vmw_tex_state_cmd { | |
426 | SVGA3dCmdHeader header; | |
427 | SVGA3dCmdSetTextureState state; | |
428 | }; | |
429 | ||
430 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) | |
431 | ((unsigned long) header + header->size + sizeof(header)); | |
432 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) | |
433 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); | |
434 | int ret; | |
435 | ||
436 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | |
437 | if (unlikely(ret != 0)) | |
438 | return ret; | |
439 | ||
440 | for (; cur_state < last_state; ++cur_state) { | |
441 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) | |
442 | continue; | |
443 | ||
444 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | |
445 | &cur_state->value); | |
446 | if (unlikely(ret != 0)) | |
447 | return ret; | |
448 | } | |
449 | ||
450 | return 0; | |
451 | } | |
452 | ||
4084fb89 JB |
453 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, |
454 | struct vmw_sw_context *sw_context, | |
455 | void *buf) | |
456 | { | |
457 | struct vmw_dma_buffer *vmw_bo; | |
458 | int ret; | |
459 | ||
460 | struct { | |
461 | uint32_t header; | |
462 | SVGAFifoCmdDefineGMRFB body; | |
463 | } *cmd = buf; | |
464 | ||
465 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | |
466 | &cmd->body.ptr, | |
467 | &vmw_bo); | |
468 | if (unlikely(ret != 0)) | |
469 | return ret; | |
470 | ||
471 | vmw_dmabuf_unreference(&vmw_bo); | |
472 | ||
473 | return ret; | |
474 | } | |
475 | ||
476 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | |
477 | struct vmw_sw_context *sw_context, | |
478 | void *buf, uint32_t *size) | |
479 | { | |
480 | uint32_t size_remaining = *size; | |
481 | bool need_kernel = true; | |
482 | uint32_t cmd_id; | |
483 | ||
484 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); | |
485 | switch (cmd_id) { | |
486 | case SVGA_CMD_UPDATE: | |
487 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); | |
488 | need_kernel = false; | |
489 | break; | |
490 | case SVGA_CMD_DEFINE_GMRFB: | |
491 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); | |
492 | break; | |
493 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: | |
494 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); | |
495 | break; | |
496 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: | |
497 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); | |
498 | break; | |
499 | default: | |
500 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); | |
501 | return -EINVAL; | |
502 | } | |
503 | ||
504 | if (*size > size_remaining) { | |
505 | DRM_ERROR("Invalid SVGA command (size mismatch):" | |
506 | " %u.\n", cmd_id); | |
507 | return -EINVAL; | |
508 | } | |
509 | ||
510 | if (unlikely(need_kernel && !sw_context->kernel)) { | |
511 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); | |
512 | return -EPERM; | |
513 | } | |
514 | ||
515 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) | |
516 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); | |
517 | ||
518 | return 0; | |
519 | } | |
fb1d9738 JB |
520 | |
521 | typedef int (*vmw_cmd_func) (struct vmw_private *, | |
522 | struct vmw_sw_context *, | |
523 | SVGA3dCmdHeader *); | |
524 | ||
525 | #define VMW_CMD_DEF(cmd, func) \ | |
526 | [cmd - SVGA_3D_CMD_BASE] = func | |
527 | ||
528 | static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | |
529 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid), | |
530 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid), | |
531 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check), | |
532 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check), | |
533 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma), | |
534 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid), | |
535 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid), | |
536 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check), | |
537 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check), | |
538 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), | |
539 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, | |
540 | &vmw_cmd_set_render_target_check), | |
7a73ba74 | 541 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state), |
fb1d9738 JB |
542 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), |
543 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), | |
544 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), | |
545 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check), | |
546 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check), | |
547 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check), | |
548 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), | |
549 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), | |
550 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), | |
551 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), | |
552 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), | |
7a73ba74 | 553 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), |
fb1d9738 JB |
554 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), |
555 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), | |
4e4ddd47 TH |
556 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), |
557 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), | |
fb1d9738 JB |
558 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), |
559 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | |
560 | &vmw_cmd_blt_surf_screen_check) | |
561 | }; | |
562 | ||
563 | static int vmw_cmd_check(struct vmw_private *dev_priv, | |
564 | struct vmw_sw_context *sw_context, | |
565 | void *buf, uint32_t *size) | |
566 | { | |
567 | uint32_t cmd_id; | |
7a73ba74 | 568 | uint32_t size_remaining = *size; |
fb1d9738 JB |
569 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
570 | int ret; | |
571 | ||
4084fb89 JB |
572 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
573 | /* Handle any none 3D commands */ | |
574 | if (unlikely(cmd_id < SVGA_CMD_MAX)) | |
575 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); | |
576 | ||
fb1d9738 JB |
577 | |
578 | cmd_id = le32_to_cpu(header->id); | |
579 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); | |
580 | ||
581 | cmd_id -= SVGA_3D_CMD_BASE; | |
7a73ba74 TH |
582 | if (unlikely(*size > size_remaining)) |
583 | goto out_err; | |
584 | ||
fb1d9738 JB |
585 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
586 | goto out_err; | |
587 | ||
588 | ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header); | |
589 | if (unlikely(ret != 0)) | |
590 | goto out_err; | |
591 | ||
592 | return 0; | |
593 | out_err: | |
594 | DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n", | |
595 | cmd_id + SVGA_3D_CMD_BASE); | |
596 | return -EINVAL; | |
597 | } | |
598 | ||
599 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, | |
600 | struct vmw_sw_context *sw_context, | |
922ade0d | 601 | void *buf, |
be38ab6e | 602 | uint32_t size) |
fb1d9738 JB |
603 | { |
604 | int32_t cur_size = size; | |
605 | int ret; | |
606 | ||
607 | while (cur_size > 0) { | |
7a73ba74 | 608 | size = cur_size; |
fb1d9738 JB |
609 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
610 | if (unlikely(ret != 0)) | |
611 | return ret; | |
612 | buf = (void *)((unsigned long) buf + size); | |
613 | cur_size -= size; | |
614 | } | |
615 | ||
616 | if (unlikely(cur_size != 0)) { | |
617 | DRM_ERROR("Command verifier out of sync.\n"); | |
618 | return -EINVAL; | |
619 | } | |
620 | ||
621 | return 0; | |
622 | } | |
623 | ||
624 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) | |
625 | { | |
626 | sw_context->cur_reloc = 0; | |
627 | } | |
628 | ||
629 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |
630 | { | |
631 | uint32_t i; | |
632 | struct vmw_relocation *reloc; | |
633 | struct ttm_validate_buffer *validate; | |
634 | struct ttm_buffer_object *bo; | |
635 | ||
636 | for (i = 0; i < sw_context->cur_reloc; ++i) { | |
637 | reloc = &sw_context->relocs[i]; | |
638 | validate = &sw_context->val_bufs[reloc->index]; | |
639 | bo = validate->bo; | |
135cba0d TH |
640 | if (bo->mem.mem_type == TTM_PL_VRAM) { |
641 | reloc->location->offset += bo->offset; | |
642 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; | |
643 | } else | |
644 | reloc->location->gmrId = bo->mem.start; | |
fb1d9738 JB |
645 | } |
646 | vmw_free_relocations(sw_context); | |
647 | } | |
648 | ||
649 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) | |
650 | { | |
651 | struct ttm_validate_buffer *entry, *next; | |
be38ab6e | 652 | uint32_t i = sw_context->num_ref_resources; |
fb1d9738 | 653 | |
be38ab6e TH |
654 | /* |
655 | * Drop references to DMA buffers held during command submission. | |
656 | */ | |
fb1d9738 JB |
657 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
658 | head) { | |
659 | list_del(&entry->head); | |
660 | vmw_dmabuf_validate_clear(entry->bo); | |
661 | ttm_bo_unref(&entry->bo); | |
662 | sw_context->cur_val_buf--; | |
663 | } | |
664 | BUG_ON(sw_context->cur_val_buf != 0); | |
be38ab6e TH |
665 | |
666 | /* | |
667 | * Drop references to resources held during command submission. | |
668 | */ | |
669 | while (i-- > 0) { | |
670 | sw_context->resources[i]->on_validate_list = false; | |
671 | vmw_resource_unreference(&sw_context->resources[i]); | |
672 | } | |
fb1d9738 JB |
673 | } |
674 | ||
675 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |
676 | struct ttm_buffer_object *bo) | |
677 | { | |
678 | int ret; | |
679 | ||
8ba5152a | 680 | /** |
135cba0d TH |
681 | * Put BO in VRAM if there is space, otherwise as a GMR. |
682 | * If there is no space in VRAM and GMR ids are all used up, | |
683 | * start evicting GMRs to make room. If the DMA buffer can't be | |
684 | * used as a GMR, this will return -ENOMEM. | |
8ba5152a TH |
685 | */ |
686 | ||
135cba0d | 687 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false); |
3d3a5b32 | 688 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
fb1d9738 JB |
689 | return ret; |
690 | ||
8ba5152a TH |
691 | /** |
692 | * If that failed, try VRAM again, this time evicting | |
693 | * previous contents. | |
694 | */ | |
fb1d9738 | 695 | |
135cba0d | 696 | DRM_INFO("Falling through to VRAM.\n"); |
9d87fa21 | 697 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); |
fb1d9738 JB |
698 | return ret; |
699 | } | |
700 | ||
701 | ||
702 | static int vmw_validate_buffers(struct vmw_private *dev_priv, | |
703 | struct vmw_sw_context *sw_context) | |
704 | { | |
705 | struct ttm_validate_buffer *entry; | |
706 | int ret; | |
707 | ||
708 | list_for_each_entry(entry, &sw_context->validate_nodes, head) { | |
709 | ret = vmw_validate_single_buffer(dev_priv, entry->bo); | |
710 | if (unlikely(ret != 0)) | |
711 | return ret; | |
712 | } | |
713 | return 0; | |
714 | } | |
715 | ||
be38ab6e TH |
716 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
717 | uint32_t size) | |
718 | { | |
719 | if (likely(sw_context->cmd_bounce_size >= size)) | |
720 | return 0; | |
721 | ||
722 | if (sw_context->cmd_bounce_size == 0) | |
723 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; | |
724 | ||
725 | while (sw_context->cmd_bounce_size < size) { | |
726 | sw_context->cmd_bounce_size = | |
727 | PAGE_ALIGN(sw_context->cmd_bounce_size + | |
728 | (sw_context->cmd_bounce_size >> 1)); | |
729 | } | |
730 | ||
731 | if (sw_context->cmd_bounce != NULL) | |
732 | vfree(sw_context->cmd_bounce); | |
733 | ||
734 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); | |
735 | ||
736 | if (sw_context->cmd_bounce == NULL) { | |
737 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); | |
738 | sw_context->cmd_bounce_size = 0; | |
739 | return -ENOMEM; | |
740 | } | |
741 | ||
742 | return 0; | |
743 | } | |
744 | ||
ae2a1040 TH |
745 | /** |
746 | * vmw_execbuf_fence_commands - create and submit a command stream fence | |
747 | * | |
748 | * Creates a fence object and submits a command stream marker. | |
749 | * If this fails for some reason, We sync the fifo and return NULL. | |
750 | * It is then safe to fence buffers with a NULL pointer. | |
6070e9fa JB |
751 | * |
752 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates | |
753 | * a userspace handle if @p_handle is not NULL, otherwise not. | |
ae2a1040 TH |
754 | */ |
755 | ||
756 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, | |
757 | struct vmw_private *dev_priv, | |
758 | struct vmw_fence_obj **p_fence, | |
759 | uint32_t *p_handle) | |
760 | { | |
761 | uint32_t sequence; | |
762 | int ret; | |
763 | bool synced = false; | |
764 | ||
6070e9fa JB |
765 | /* p_handle implies file_priv. */ |
766 | BUG_ON(p_handle != NULL && file_priv == NULL); | |
ae2a1040 TH |
767 | |
768 | ret = vmw_fifo_send_fence(dev_priv, &sequence); | |
769 | if (unlikely(ret != 0)) { | |
770 | DRM_ERROR("Fence submission error. Syncing.\n"); | |
771 | synced = true; | |
772 | } | |
773 | ||
774 | if (p_handle != NULL) | |
775 | ret = vmw_user_fence_create(file_priv, dev_priv->fman, | |
776 | sequence, | |
777 | DRM_VMW_FENCE_FLAG_EXEC, | |
778 | p_fence, p_handle); | |
779 | else | |
780 | ret = vmw_fence_create(dev_priv->fman, sequence, | |
781 | DRM_VMW_FENCE_FLAG_EXEC, | |
782 | p_fence); | |
783 | ||
784 | if (unlikely(ret != 0 && !synced)) { | |
785 | (void) vmw_fallback_wait(dev_priv, false, false, | |
786 | sequence, false, | |
787 | VMW_FENCE_WAIT_TIMEOUT); | |
788 | *p_fence = NULL; | |
789 | } | |
790 | ||
791 | return 0; | |
792 | } | |
793 | ||
922ade0d TH |
794 | int vmw_execbuf_process(struct drm_file *file_priv, |
795 | struct vmw_private *dev_priv, | |
796 | void __user *user_commands, | |
797 | void *kernel_commands, | |
798 | uint32_t command_size, | |
799 | uint64_t throttle_us, | |
800 | struct drm_vmw_fence_rep __user *user_fence_rep) | |
fb1d9738 | 801 | { |
fb1d9738 | 802 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
922ade0d | 803 | struct drm_vmw_fence_rep fence_rep; |
ae2a1040 TH |
804 | struct vmw_fence_obj *fence; |
805 | uint32_t handle; | |
922ade0d TH |
806 | void *cmd; |
807 | int ret; | |
fb1d9738 | 808 | |
922ade0d | 809 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
fb1d9738 | 810 | if (unlikely(ret != 0)) |
922ade0d | 811 | return -ERESTARTSYS; |
fb1d9738 | 812 | |
922ade0d TH |
813 | if (kernel_commands == NULL) { |
814 | sw_context->kernel = false; | |
fb1d9738 | 815 | |
922ade0d TH |
816 | ret = vmw_resize_cmd_bounce(sw_context, command_size); |
817 | if (unlikely(ret != 0)) | |
818 | goto out_unlock; | |
fb1d9738 | 819 | |
fb1d9738 | 820 | |
922ade0d TH |
821 | ret = copy_from_user(sw_context->cmd_bounce, |
822 | user_commands, command_size); | |
823 | ||
824 | if (unlikely(ret != 0)) { | |
825 | ret = -EFAULT; | |
826 | DRM_ERROR("Failed copying commands.\n"); | |
827 | goto out_unlock; | |
828 | } | |
829 | kernel_commands = sw_context->cmd_bounce; | |
830 | } else | |
831 | sw_context->kernel = true; | |
fb1d9738 JB |
832 | |
833 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; | |
834 | sw_context->cid_valid = false; | |
835 | sw_context->sid_valid = false; | |
836 | sw_context->cur_reloc = 0; | |
837 | sw_context->cur_val_buf = 0; | |
be38ab6e | 838 | sw_context->num_ref_resources = 0; |
fb1d9738 JB |
839 | |
840 | INIT_LIST_HEAD(&sw_context->validate_nodes); | |
841 | ||
922ade0d TH |
842 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
843 | command_size); | |
fb1d9738 JB |
844 | if (unlikely(ret != 0)) |
845 | goto out_err; | |
be38ab6e | 846 | |
65705962 | 847 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); |
fb1d9738 JB |
848 | if (unlikely(ret != 0)) |
849 | goto out_err; | |
850 | ||
851 | ret = vmw_validate_buffers(dev_priv, sw_context); | |
852 | if (unlikely(ret != 0)) | |
853 | goto out_err; | |
854 | ||
855 | vmw_apply_relocations(sw_context); | |
1925d456 | 856 | |
922ade0d | 857 | if (throttle_us) { |
6bcd8d3c | 858 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
922ade0d | 859 | throttle_us); |
1925d456 TH |
860 | |
861 | if (unlikely(ret != 0)) | |
be38ab6e TH |
862 | goto out_throttle; |
863 | } | |
864 | ||
922ade0d | 865 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
be38ab6e TH |
866 | if (unlikely(cmd == NULL)) { |
867 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | |
868 | ret = -ENOMEM; | |
922ade0d | 869 | goto out_throttle; |
1925d456 TH |
870 | } |
871 | ||
922ade0d TH |
872 | memcpy(cmd, kernel_commands, command_size); |
873 | vmw_fifo_commit(dev_priv, command_size); | |
fb1d9738 | 874 | |
ae2a1040 TH |
875 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
876 | &fence, | |
877 | (user_fence_rep) ? &handle : NULL); | |
fb1d9738 JB |
878 | /* |
879 | * This error is harmless, because if fence submission fails, | |
ae2a1040 TH |
880 | * vmw_fifo_send_fence will sync. The error will be propagated to |
881 | * user-space in @fence_rep | |
fb1d9738 JB |
882 | */ |
883 | ||
884 | if (ret != 0) | |
885 | DRM_ERROR("Fence submission error. Syncing.\n"); | |
886 | ||
ae2a1040 TH |
887 | ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, |
888 | (void *) fence); | |
fb1d9738 | 889 | |
ae2a1040 | 890 | vmw_clear_validations(sw_context); |
fb1d9738 | 891 | |
ae2a1040 TH |
892 | if (user_fence_rep) { |
893 | fence_rep.error = ret; | |
894 | fence_rep.handle = handle; | |
895 | fence_rep.seqno = fence->seqno; | |
896 | vmw_update_seqno(dev_priv, &dev_priv->fifo); | |
897 | fence_rep.passed_seqno = dev_priv->last_read_seqno; | |
898 | ||
899 | /* | |
900 | * copy_to_user errors will be detected by user space not | |
901 | * seeing fence_rep::error filled in. Typically | |
902 | * user-space would have pre-set that member to -EFAULT. | |
903 | */ | |
904 | ret = copy_to_user(user_fence_rep, &fence_rep, | |
905 | sizeof(fence_rep)); | |
906 | ||
907 | /* | |
908 | * User-space lost the fence object. We need to sync | |
909 | * and unreference the handle. | |
910 | */ | |
911 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { | |
912 | BUG_ON(fence == NULL); | |
913 | ||
914 | ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | |
915 | handle, TTM_REF_USAGE); | |
916 | DRM_ERROR("Fence copy error. Syncing.\n"); | |
917 | (void) vmw_fence_obj_wait(fence, | |
918 | fence->signal_mask, | |
919 | false, false, | |
920 | VMW_FENCE_WAIT_TIMEOUT); | |
921 | } | |
922 | } | |
fb1d9738 | 923 | |
ae2a1040 TH |
924 | if (likely(fence != NULL)) |
925 | vmw_fence_obj_unreference(&fence); | |
fb1d9738 | 926 | |
922ade0d | 927 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
fb1d9738 | 928 | return 0; |
922ade0d | 929 | |
fb1d9738 JB |
930 | out_err: |
931 | vmw_free_relocations(sw_context); | |
be38ab6e | 932 | out_throttle: |
fb1d9738 JB |
933 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); |
934 | vmw_clear_validations(sw_context); | |
fb1d9738 JB |
935 | out_unlock: |
936 | mutex_unlock(&dev_priv->cmdbuf_mutex); | |
922ade0d TH |
937 | return ret; |
938 | } | |
939 | ||
940 | ||
941 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |
942 | struct drm_file *file_priv) | |
943 | { | |
944 | struct vmw_private *dev_priv = vmw_priv(dev); | |
945 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; | |
946 | struct vmw_master *vmaster = vmw_master(file_priv->master); | |
947 | int ret; | |
948 | ||
949 | /* | |
950 | * This will allow us to extend the ioctl argument while | |
951 | * maintaining backwards compatibility: | |
952 | * We take different code paths depending on the value of | |
953 | * arg->version. | |
954 | */ | |
955 | ||
956 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { | |
957 | DRM_ERROR("Incorrect execbuf version.\n"); | |
958 | DRM_ERROR("You're running outdated experimental " | |
959 | "vmwgfx user-space drivers."); | |
960 | return -EINVAL; | |
961 | } | |
962 | ||
963 | ret = ttm_read_lock(&vmaster->lock, true); | |
964 | if (unlikely(ret != 0)) | |
965 | return ret; | |
966 | ||
967 | ret = vmw_execbuf_process(file_priv, dev_priv, | |
968 | (void __user *)(unsigned long)arg->commands, | |
969 | NULL, arg->command_size, arg->throttle_us, | |
970 | (void __user *)(unsigned long)arg->fence_rep); | |
971 | ||
972 | if (unlikely(ret != 0)) | |
973 | goto out_unlock; | |
974 | ||
975 | vmw_kms_cursor_post_execbuf(dev_priv); | |
976 | ||
977 | out_unlock: | |
fb1d9738 JB |
978 | ttm_read_unlock(&vmaster->lock); |
979 | return ret; | |
980 | } |