Commit | Line | Data |
---|---|---|
fb1d9738 JB |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | ||
28 | #include "vmwgfx_drv.h" | |
760285e7 DH |
29 | #include <drm/vmwgfx_drm.h> |
30 | #include <drm/ttm/ttm_object.h> | |
31 | #include <drm/ttm/ttm_placement.h> | |
32 | #include <drm/drmP.h> | |
543831cf | 33 | #include "vmwgfx_resource_priv.h" |
fb1d9738 JB |
34 | |
35 | struct vmw_user_dma_buffer { | |
36 | struct ttm_base_object base; | |
37 | struct vmw_dma_buffer dma; | |
38 | }; | |
39 | ||
40 | struct vmw_bo_user_rep { | |
41 | uint32_t handle; | |
42 | uint64_t map_handle; | |
43 | }; | |
44 | ||
45 | struct vmw_stream { | |
46 | struct vmw_resource res; | |
47 | uint32_t stream_id; | |
48 | }; | |
49 | ||
50 | struct vmw_user_stream { | |
51 | struct ttm_base_object base; | |
52 | struct vmw_stream stream; | |
53 | }; | |
54 | ||
c0951b79 TH |
55 | |
56 | static uint64_t vmw_user_stream_size; | |
57 | ||
58 | static const struct vmw_res_func vmw_stream_func = { | |
59 | .res_type = vmw_res_stream, | |
60 | .needs_backup = false, | |
61 | .may_evict = false, | |
62 | .type_name = "video streams", | |
63 | .backup_placement = NULL, | |
64 | .create = NULL, | |
65 | .destroy = NULL, | |
66 | .bind = NULL, | |
67 | .unbind = NULL | |
68 | }; | |
69 | ||
fb1d9738 JB |
70 | static inline struct vmw_dma_buffer * |
71 | vmw_dma_buffer(struct ttm_buffer_object *bo) | |
72 | { | |
73 | return container_of(bo, struct vmw_dma_buffer, base); | |
74 | } | |
75 | ||
76 | static inline struct vmw_user_dma_buffer * | |
77 | vmw_user_dma_buffer(struct ttm_buffer_object *bo) | |
78 | { | |
79 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | |
80 | return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); | |
81 | } | |
82 | ||
83 | struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | |
84 | { | |
85 | kref_get(&res->kref); | |
86 | return res; | |
87 | } | |
88 | ||
5bb39e81 TH |
89 | |
90 | /** | |
91 | * vmw_resource_release_id - release a resource id to the id manager. | |
92 | * | |
93 | * @res: Pointer to the resource. | |
94 | * | |
95 | * Release the resource id to the resource id manager and set it to -1 | |
96 | */ | |
543831cf | 97 | void vmw_resource_release_id(struct vmw_resource *res) |
5bb39e81 TH |
98 | { |
99 | struct vmw_private *dev_priv = res->dev_priv; | |
c0951b79 | 100 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
5bb39e81 TH |
101 | |
102 | write_lock(&dev_priv->resource_lock); | |
103 | if (res->id != -1) | |
c0951b79 | 104 | idr_remove(idr, res->id); |
5bb39e81 TH |
105 | res->id = -1; |
106 | write_unlock(&dev_priv->resource_lock); | |
107 | } | |
108 | ||
fb1d9738 JB |
109 | static void vmw_resource_release(struct kref *kref) |
110 | { | |
111 | struct vmw_resource *res = | |
112 | container_of(kref, struct vmw_resource, kref); | |
113 | struct vmw_private *dev_priv = res->dev_priv; | |
c0951b79 TH |
114 | int id; |
115 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; | |
fb1d9738 | 116 | |
5bb39e81 | 117 | res->avail = false; |
c0951b79 | 118 | list_del_init(&res->lru_head); |
fb1d9738 | 119 | write_unlock(&dev_priv->resource_lock); |
c0951b79 TH |
120 | if (res->backup) { |
121 | struct ttm_buffer_object *bo = &res->backup->base; | |
122 | ||
123 | ttm_bo_reserve(bo, false, false, false, 0); | |
124 | if (!list_empty(&res->mob_head) && | |
125 | res->func->unbind != NULL) { | |
126 | struct ttm_validate_buffer val_buf; | |
127 | ||
128 | val_buf.bo = bo; | |
129 | res->func->unbind(res, false, &val_buf); | |
130 | } | |
131 | res->backup_dirty = false; | |
132 | list_del_init(&res->mob_head); | |
133 | ttm_bo_unreserve(bo); | |
134 | vmw_dmabuf_unreference(&res->backup); | |
135 | } | |
fb1d9738 JB |
136 | |
137 | if (likely(res->hw_destroy != NULL)) | |
138 | res->hw_destroy(res); | |
139 | ||
c0951b79 | 140 | id = res->id; |
fb1d9738 JB |
141 | if (res->res_free != NULL) |
142 | res->res_free(res); | |
143 | else | |
144 | kfree(res); | |
145 | ||
146 | write_lock(&dev_priv->resource_lock); | |
5bb39e81 TH |
147 | |
148 | if (id != -1) | |
149 | idr_remove(idr, id); | |
fb1d9738 JB |
150 | } |
151 | ||
152 | void vmw_resource_unreference(struct vmw_resource **p_res) | |
153 | { | |
154 | struct vmw_resource *res = *p_res; | |
155 | struct vmw_private *dev_priv = res->dev_priv; | |
156 | ||
157 | *p_res = NULL; | |
158 | write_lock(&dev_priv->resource_lock); | |
159 | kref_put(&res->kref, vmw_resource_release); | |
160 | write_unlock(&dev_priv->resource_lock); | |
161 | } | |
162 | ||
5bb39e81 TH |
163 | |
164 | /** | |
165 | * vmw_resource_alloc_id - release a resource id to the id manager. | |
166 | * | |
5bb39e81 TH |
167 | * @res: Pointer to the resource. |
168 | * | |
169 | * Allocate the lowest free resource from the resource manager, and set | |
170 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. | |
171 | */ | |
543831cf | 172 | int vmw_resource_alloc_id(struct vmw_resource *res) |
5bb39e81 | 173 | { |
c0951b79 | 174 | struct vmw_private *dev_priv = res->dev_priv; |
5bb39e81 | 175 | int ret; |
c0951b79 | 176 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
5bb39e81 TH |
177 | |
178 | BUG_ON(res->id != -1); | |
179 | ||
cc39a8fa TH |
180 | idr_preload(GFP_KERNEL); |
181 | write_lock(&dev_priv->resource_lock); | |
5bb39e81 | 182 | |
cc39a8fa TH |
183 | ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT); |
184 | if (ret >= 0) | |
185 | res->id = ret; | |
5bb39e81 | 186 | |
cc39a8fa TH |
187 | write_unlock(&dev_priv->resource_lock); |
188 | idr_preload_end(); | |
189 | return ret < 0 ? ret : 0; | |
5bb39e81 TH |
190 | } |
191 | ||
c0951b79 TH |
192 | /** |
193 | * vmw_resource_init - initialize a struct vmw_resource | |
194 | * | |
195 | * @dev_priv: Pointer to a device private struct. | |
196 | * @res: The struct vmw_resource to initialize. | |
197 | * @obj_type: Resource object type. | |
198 | * @delay_id: Boolean whether to defer device id allocation until | |
199 | * the first validation. | |
200 | * @res_free: Resource destructor. | |
201 | * @func: Resource function table. | |
202 | */ | |
543831cf TH |
203 | int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, |
204 | bool delay_id, | |
205 | void (*res_free) (struct vmw_resource *res), | |
206 | const struct vmw_res_func *func) | |
fb1d9738 | 207 | { |
fb1d9738 JB |
208 | kref_init(&res->kref); |
209 | res->hw_destroy = NULL; | |
210 | res->res_free = res_free; | |
fb1d9738 JB |
211 | res->avail = false; |
212 | res->dev_priv = dev_priv; | |
c0951b79 TH |
213 | res->func = func; |
214 | INIT_LIST_HEAD(&res->lru_head); | |
215 | INIT_LIST_HEAD(&res->mob_head); | |
5bb39e81 | 216 | res->id = -1; |
c0951b79 TH |
217 | res->backup = NULL; |
218 | res->backup_offset = 0; | |
219 | res->backup_dirty = false; | |
220 | res->res_dirty = false; | |
5bb39e81 TH |
221 | if (delay_id) |
222 | return 0; | |
223 | else | |
c0951b79 | 224 | return vmw_resource_alloc_id(res); |
fb1d9738 JB |
225 | } |
226 | ||
227 | /** | |
228 | * vmw_resource_activate | |
229 | * | |
230 | * @res: Pointer to the newly created resource | |
231 | * @hw_destroy: Destroy function. NULL if none. | |
232 | * | |
233 | * Activate a resource after the hardware has been made aware of it. | |
234 | * Set tye destroy function to @destroy. Typically this frees the | |
235 | * resource and destroys the hardware resources associated with it. | |
236 | * Activate basically means that the function vmw_resource_lookup will | |
237 | * find it. | |
238 | */ | |
543831cf TH |
239 | void vmw_resource_activate(struct vmw_resource *res, |
240 | void (*hw_destroy) (struct vmw_resource *)) | |
fb1d9738 JB |
241 | { |
242 | struct vmw_private *dev_priv = res->dev_priv; | |
243 | ||
244 | write_lock(&dev_priv->resource_lock); | |
245 | res->avail = true; | |
246 | res->hw_destroy = hw_destroy; | |
247 | write_unlock(&dev_priv->resource_lock); | |
248 | } | |
249 | ||
250 | struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, | |
251 | struct idr *idr, int id) | |
252 | { | |
253 | struct vmw_resource *res; | |
254 | ||
255 | read_lock(&dev_priv->resource_lock); | |
256 | res = idr_find(idr, id); | |
257 | if (res && res->avail) | |
258 | kref_get(&res->kref); | |
259 | else | |
260 | res = NULL; | |
261 | read_unlock(&dev_priv->resource_lock); | |
262 | ||
263 | if (unlikely(res == NULL)) | |
264 | return NULL; | |
265 | ||
266 | return res; | |
267 | } | |
268 | ||
c0951b79 TH |
269 | /** |
270 | * vmw_user_resource_lookup_handle - lookup a struct resource from a | |
271 | * TTM user-space handle and perform basic type checks | |
272 | * | |
273 | * @dev_priv: Pointer to a device private struct | |
274 | * @tfile: Pointer to a struct ttm_object_file identifying the caller | |
275 | * @handle: The TTM user-space handle | |
276 | * @converter: Pointer to an object describing the resource type | |
277 | * @p_res: On successful return the location pointed to will contain | |
278 | * a pointer to a refcounted struct vmw_resource. | |
279 | * | |
280 | * If the handle can't be found or is associated with an incorrect resource | |
281 | * type, -EINVAL will be returned. | |
282 | */ | |
283 | int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, | |
284 | struct ttm_object_file *tfile, | |
285 | uint32_t handle, | |
286 | const struct vmw_user_resource_conv | |
287 | *converter, | |
288 | struct vmw_resource **p_res) | |
fb1d9738 | 289 | { |
7a73ba74 | 290 | struct ttm_base_object *base; |
c0951b79 TH |
291 | struct vmw_resource *res; |
292 | int ret = -EINVAL; | |
fb1d9738 | 293 | |
7a73ba74 TH |
294 | base = ttm_base_object_lookup(tfile, handle); |
295 | if (unlikely(base == NULL)) | |
296 | return -EINVAL; | |
297 | ||
c0951b79 TH |
298 | if (unlikely(base->object_type != converter->object_type)) |
299 | goto out_bad_resource; | |
7a73ba74 | 300 | |
c0951b79 | 301 | res = converter->base_obj_to_res(base); |
7a73ba74 | 302 | |
c0951b79 TH |
303 | read_lock(&dev_priv->resource_lock); |
304 | if (!res->avail || res->res_free != converter->res_free) { | |
305 | read_unlock(&dev_priv->resource_lock); | |
306 | goto out_bad_resource; | |
307 | } | |
fb1d9738 | 308 | |
c0951b79 TH |
309 | kref_get(&res->kref); |
310 | read_unlock(&dev_priv->resource_lock); | |
311 | ||
312 | *p_res = res; | |
313 | ret = 0; | |
314 | ||
315 | out_bad_resource: | |
7a73ba74 | 316 | ttm_base_object_unref(&base); |
c0951b79 TH |
317 | |
318 | return ret; | |
319 | } | |
320 | ||
321 | /** | |
322 | * Helper function that looks either a surface or dmabuf. | |
323 | * | |
324 | * The pointer this pointed at by out_surf and out_buf needs to be null. | |
325 | */ | |
326 | int vmw_user_lookup_handle(struct vmw_private *dev_priv, | |
327 | struct ttm_object_file *tfile, | |
328 | uint32_t handle, | |
329 | struct vmw_surface **out_surf, | |
330 | struct vmw_dma_buffer **out_buf) | |
331 | { | |
332 | struct vmw_resource *res; | |
333 | int ret; | |
334 | ||
335 | BUG_ON(*out_surf || *out_buf); | |
336 | ||
337 | ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, | |
338 | user_surface_converter, | |
339 | &res); | |
340 | if (!ret) { | |
341 | *out_surf = vmw_res_to_srf(res); | |
342 | return 0; | |
343 | } | |
344 | ||
345 | *out_surf = NULL; | |
346 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); | |
fb1d9738 JB |
347 | return ret; |
348 | } | |
349 | ||
350 | /** | |
351 | * Buffer management. | |
352 | */ | |
effe1105 TH |
353 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) |
354 | { | |
355 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | |
effe1105 | 356 | |
fb1d9738 JB |
357 | kfree(vmw_bo); |
358 | } | |
359 | ||
360 | int vmw_dmabuf_init(struct vmw_private *dev_priv, | |
361 | struct vmw_dma_buffer *vmw_bo, | |
362 | size_t size, struct ttm_placement *placement, | |
363 | bool interruptible, | |
364 | void (*bo_free) (struct ttm_buffer_object *bo)) | |
365 | { | |
366 | struct ttm_bo_device *bdev = &dev_priv->bdev; | |
fb1d9738 JB |
367 | size_t acc_size; |
368 | int ret; | |
369 | ||
370 | BUG_ON(!bo_free); | |
371 | ||
57de4ba9 | 372 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); |
fb1d9738 JB |
373 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
374 | ||
c0951b79 | 375 | INIT_LIST_HEAD(&vmw_bo->res_list); |
fb1d9738 JB |
376 | |
377 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | |
378 | ttm_bo_type_device, placement, | |
0b91c4a1 | 379 | 0, interruptible, |
129b78bf | 380 | NULL, acc_size, NULL, bo_free); |
fb1d9738 JB |
381 | return ret; |
382 | } | |
383 | ||
384 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) | |
385 | { | |
386 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); | |
fb1d9738 | 387 | |
cdad0521 | 388 | ttm_base_object_kfree(vmw_user_bo, base); |
fb1d9738 JB |
389 | } |
390 | ||
391 | static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) | |
392 | { | |
393 | struct vmw_user_dma_buffer *vmw_user_bo; | |
394 | struct ttm_base_object *base = *p_base; | |
395 | struct ttm_buffer_object *bo; | |
396 | ||
397 | *p_base = NULL; | |
398 | ||
399 | if (unlikely(base == NULL)) | |
400 | return; | |
401 | ||
402 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); | |
403 | bo = &vmw_user_bo->dma.base; | |
404 | ttm_bo_unref(&bo); | |
405 | } | |
406 | ||
c0951b79 TH |
407 | /** |
408 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer | |
409 | * | |
410 | * @dev_priv: Pointer to a struct device private. | |
411 | * @tfile: Pointer to a struct ttm_object_file on which to register the user | |
412 | * object. | |
413 | * @size: Size of the dma buffer. | |
414 | * @shareable: Boolean whether the buffer is shareable with other open files. | |
415 | * @handle: Pointer to where the handle value should be assigned. | |
416 | * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer | |
417 | * should be assigned. | |
418 | */ | |
419 | int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |
420 | struct ttm_object_file *tfile, | |
421 | uint32_t size, | |
422 | bool shareable, | |
423 | uint32_t *handle, | |
424 | struct vmw_dma_buffer **p_dma_buf) | |
425 | { | |
426 | struct vmw_user_dma_buffer *user_bo; | |
427 | struct ttm_buffer_object *tmp; | |
428 | int ret; | |
429 | ||
430 | user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); | |
431 | if (unlikely(user_bo == NULL)) { | |
432 | DRM_ERROR("Failed to allocate a buffer.\n"); | |
433 | return -ENOMEM; | |
434 | } | |
435 | ||
436 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, | |
437 | &vmw_vram_sys_placement, true, | |
438 | &vmw_user_dmabuf_destroy); | |
439 | if (unlikely(ret != 0)) | |
440 | return ret; | |
441 | ||
442 | tmp = ttm_bo_reference(&user_bo->dma.base); | |
443 | ret = ttm_base_object_init(tfile, | |
444 | &user_bo->base, | |
445 | shareable, | |
446 | ttm_buffer_type, | |
447 | &vmw_user_dmabuf_release, NULL); | |
448 | if (unlikely(ret != 0)) { | |
449 | ttm_bo_unref(&tmp); | |
450 | goto out_no_base_object; | |
451 | } | |
452 | ||
453 | *p_dma_buf = &user_bo->dma; | |
454 | *handle = user_bo->base.hash.key; | |
455 | ||
456 | out_no_base_object: | |
457 | return ret; | |
458 | } | |
459 | ||
d08a9b9c TH |
460 | /** |
461 | * vmw_user_dmabuf_verify_access - verify access permissions on this | |
462 | * buffer object. | |
463 | * | |
464 | * @bo: Pointer to the buffer object being accessed | |
465 | * @tfile: Identifying the caller. | |
466 | */ | |
467 | int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, | |
468 | struct ttm_object_file *tfile) | |
469 | { | |
470 | struct vmw_user_dma_buffer *vmw_user_bo; | |
471 | ||
472 | if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) | |
473 | return -EPERM; | |
474 | ||
475 | vmw_user_bo = vmw_user_dma_buffer(bo); | |
476 | return (vmw_user_bo->base.tfile == tfile || | |
477 | vmw_user_bo->base.shareable) ? 0 : -EPERM; | |
478 | } | |
479 | ||
fb1d9738 JB |
480 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
481 | struct drm_file *file_priv) | |
482 | { | |
483 | struct vmw_private *dev_priv = vmw_priv(dev); | |
484 | union drm_vmw_alloc_dmabuf_arg *arg = | |
485 | (union drm_vmw_alloc_dmabuf_arg *)data; | |
486 | struct drm_vmw_alloc_dmabuf_req *req = &arg->req; | |
487 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; | |
c0951b79 TH |
488 | struct vmw_dma_buffer *dma_buf; |
489 | uint32_t handle; | |
fb1d9738 JB |
490 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
491 | int ret; | |
492 | ||
fb1d9738 | 493 | ret = ttm_read_lock(&vmaster->lock, true); |
c0951b79 | 494 | if (unlikely(ret != 0)) |
fb1d9738 | 495 | return ret; |
fb1d9738 | 496 | |
c0951b79 TH |
497 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
498 | req->size, false, &handle, &dma_buf); | |
fb1d9738 | 499 | if (unlikely(ret != 0)) |
2f5993cc | 500 | goto out_no_dmabuf; |
fb1d9738 | 501 | |
c0951b79 TH |
502 | rep->handle = handle; |
503 | rep->map_handle = dma_buf->base.addr_space_offset; | |
504 | rep->cur_gmr_id = handle; | |
505 | rep->cur_gmr_offset = 0; | |
506 | ||
507 | vmw_dmabuf_unreference(&dma_buf); | |
fb1d9738 | 508 | |
2f5993cc | 509 | out_no_dmabuf: |
fb1d9738 JB |
510 | ttm_read_unlock(&vmaster->lock); |
511 | ||
2f5993cc | 512 | return ret; |
fb1d9738 JB |
513 | } |
514 | ||
515 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | |
516 | struct drm_file *file_priv) | |
517 | { | |
518 | struct drm_vmw_unref_dmabuf_arg *arg = | |
519 | (struct drm_vmw_unref_dmabuf_arg *)data; | |
520 | ||
521 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | |
522 | arg->handle, | |
523 | TTM_REF_USAGE); | |
524 | } | |
525 | ||
fb1d9738 JB |
526 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
527 | uint32_t handle, struct vmw_dma_buffer **out) | |
528 | { | |
529 | struct vmw_user_dma_buffer *vmw_user_bo; | |
530 | struct ttm_base_object *base; | |
531 | ||
532 | base = ttm_base_object_lookup(tfile, handle); | |
533 | if (unlikely(base == NULL)) { | |
534 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", | |
535 | (unsigned long)handle); | |
536 | return -ESRCH; | |
537 | } | |
538 | ||
539 | if (unlikely(base->object_type != ttm_buffer_type)) { | |
540 | ttm_base_object_unref(&base); | |
541 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", | |
542 | (unsigned long)handle); | |
543 | return -EINVAL; | |
544 | } | |
545 | ||
546 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); | |
547 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); | |
548 | ttm_base_object_unref(&base); | |
549 | *out = &vmw_user_bo->dma; | |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
c0951b79 TH |
554 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
555 | struct vmw_dma_buffer *dma_buf) | |
556 | { | |
557 | struct vmw_user_dma_buffer *user_bo; | |
558 | ||
559 | if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) | |
560 | return -EINVAL; | |
561 | ||
562 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); | |
563 | return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); | |
564 | } | |
565 | ||
fb1d9738 | 566 | /* |
65155b37 | 567 | * Stream management |
fb1d9738 JB |
568 | */ |
569 | ||
570 | static void vmw_stream_destroy(struct vmw_resource *res) | |
571 | { | |
572 | struct vmw_private *dev_priv = res->dev_priv; | |
573 | struct vmw_stream *stream; | |
574 | int ret; | |
575 | ||
576 | DRM_INFO("%s: unref\n", __func__); | |
577 | stream = container_of(res, struct vmw_stream, res); | |
578 | ||
579 | ret = vmw_overlay_unref(dev_priv, stream->stream_id); | |
580 | WARN_ON(ret != 0); | |
581 | } | |
582 | ||
583 | static int vmw_stream_init(struct vmw_private *dev_priv, | |
584 | struct vmw_stream *stream, | |
585 | void (*res_free) (struct vmw_resource *res)) | |
586 | { | |
587 | struct vmw_resource *res = &stream->res; | |
588 | int ret; | |
589 | ||
c0951b79 TH |
590 | ret = vmw_resource_init(dev_priv, res, false, res_free, |
591 | &vmw_stream_func); | |
fb1d9738 JB |
592 | |
593 | if (unlikely(ret != 0)) { | |
594 | if (res_free == NULL) | |
595 | kfree(stream); | |
596 | else | |
597 | res_free(&stream->res); | |
598 | return ret; | |
599 | } | |
600 | ||
601 | ret = vmw_overlay_claim(dev_priv, &stream->stream_id); | |
602 | if (ret) { | |
603 | vmw_resource_unreference(&res); | |
604 | return ret; | |
605 | } | |
606 | ||
607 | DRM_INFO("%s: claimed\n", __func__); | |
608 | ||
609 | vmw_resource_activate(&stream->res, vmw_stream_destroy); | |
610 | return 0; | |
611 | } | |
612 | ||
fb1d9738 JB |
613 | static void vmw_user_stream_free(struct vmw_resource *res) |
614 | { | |
615 | struct vmw_user_stream *stream = | |
616 | container_of(res, struct vmw_user_stream, stream.res); | |
414ee50b | 617 | struct vmw_private *dev_priv = res->dev_priv; |
fb1d9738 | 618 | |
cdad0521 | 619 | ttm_base_object_kfree(stream, base); |
414ee50b TH |
620 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
621 | vmw_user_stream_size); | |
fb1d9738 JB |
622 | } |
623 | ||
624 | /** | |
625 | * This function is called when user space has no more references on the | |
626 | * base object. It releases the base-object's reference on the resource object. | |
627 | */ | |
628 | ||
629 | static void vmw_user_stream_base_release(struct ttm_base_object **p_base) | |
630 | { | |
631 | struct ttm_base_object *base = *p_base; | |
632 | struct vmw_user_stream *stream = | |
633 | container_of(base, struct vmw_user_stream, base); | |
634 | struct vmw_resource *res = &stream->stream.res; | |
635 | ||
636 | *p_base = NULL; | |
637 | vmw_resource_unreference(&res); | |
638 | } | |
639 | ||
640 | int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | |
641 | struct drm_file *file_priv) | |
642 | { | |
643 | struct vmw_private *dev_priv = vmw_priv(dev); | |
644 | struct vmw_resource *res; | |
645 | struct vmw_user_stream *stream; | |
646 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; | |
647 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | |
c0951b79 | 648 | struct idr *idr = &dev_priv->res_idr[vmw_res_stream]; |
fb1d9738 JB |
649 | int ret = 0; |
650 | ||
c0951b79 TH |
651 | |
652 | res = vmw_resource_lookup(dev_priv, idr, arg->stream_id); | |
fb1d9738 JB |
653 | if (unlikely(res == NULL)) |
654 | return -EINVAL; | |
655 | ||
656 | if (res->res_free != &vmw_user_stream_free) { | |
657 | ret = -EINVAL; | |
658 | goto out; | |
659 | } | |
660 | ||
661 | stream = container_of(res, struct vmw_user_stream, stream.res); | |
662 | if (stream->base.tfile != tfile) { | |
663 | ret = -EINVAL; | |
664 | goto out; | |
665 | } | |
666 | ||
667 | ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE); | |
668 | out: | |
669 | vmw_resource_unreference(&res); | |
670 | return ret; | |
671 | } | |
672 | ||
673 | int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | |
674 | struct drm_file *file_priv) | |
675 | { | |
676 | struct vmw_private *dev_priv = vmw_priv(dev); | |
414ee50b | 677 | struct vmw_user_stream *stream; |
fb1d9738 JB |
678 | struct vmw_resource *res; |
679 | struct vmw_resource *tmp; | |
680 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; | |
681 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | |
414ee50b | 682 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
fb1d9738 JB |
683 | int ret; |
684 | ||
414ee50b TH |
685 | /* |
686 | * Approximate idr memory usage with 128 bytes. It will be limited | |
687 | * by maximum number_of streams anyway? | |
688 | */ | |
689 | ||
690 | if (unlikely(vmw_user_stream_size == 0)) | |
691 | vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128; | |
692 | ||
693 | ret = ttm_read_lock(&vmaster->lock, true); | |
694 | if (unlikely(ret != 0)) | |
695 | return ret; | |
696 | ||
697 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | |
698 | vmw_user_stream_size, | |
699 | false, true); | |
700 | if (unlikely(ret != 0)) { | |
701 | if (ret != -ERESTARTSYS) | |
702 | DRM_ERROR("Out of graphics memory for stream" | |
703 | " creation.\n"); | |
704 | goto out_unlock; | |
705 | } | |
706 | ||
707 | ||
708 | stream = kmalloc(sizeof(*stream), GFP_KERNEL); | |
709 | if (unlikely(stream == NULL)) { | |
710 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | |
711 | vmw_user_stream_size); | |
712 | ret = -ENOMEM; | |
713 | goto out_unlock; | |
714 | } | |
fb1d9738 JB |
715 | |
716 | res = &stream->stream.res; | |
717 | stream->base.shareable = false; | |
718 | stream->base.tfile = NULL; | |
719 | ||
414ee50b TH |
720 | /* |
721 | * From here on, the destructor takes over resource freeing. | |
722 | */ | |
723 | ||
fb1d9738 JB |
724 | ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); |
725 | if (unlikely(ret != 0)) | |
414ee50b | 726 | goto out_unlock; |
fb1d9738 JB |
727 | |
728 | tmp = vmw_resource_reference(res); | |
729 | ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, | |
730 | &vmw_user_stream_base_release, NULL); | |
731 | ||
732 | if (unlikely(ret != 0)) { | |
733 | vmw_resource_unreference(&tmp); | |
734 | goto out_err; | |
735 | } | |
736 | ||
737 | arg->stream_id = res->id; | |
738 | out_err: | |
739 | vmw_resource_unreference(&res); | |
414ee50b TH |
740 | out_unlock: |
741 | ttm_read_unlock(&vmaster->lock); | |
fb1d9738 JB |
742 | return ret; |
743 | } | |
744 | ||
745 | int vmw_user_stream_lookup(struct vmw_private *dev_priv, | |
746 | struct ttm_object_file *tfile, | |
747 | uint32_t *inout_id, struct vmw_resource **out) | |
748 | { | |
749 | struct vmw_user_stream *stream; | |
750 | struct vmw_resource *res; | |
751 | int ret; | |
752 | ||
c0951b79 TH |
753 | res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream], |
754 | *inout_id); | |
fb1d9738 JB |
755 | if (unlikely(res == NULL)) |
756 | return -EINVAL; | |
757 | ||
758 | if (res->res_free != &vmw_user_stream_free) { | |
759 | ret = -EINVAL; | |
760 | goto err_ref; | |
761 | } | |
762 | ||
763 | stream = container_of(res, struct vmw_user_stream, stream.res); | |
764 | if (stream->base.tfile != tfile) { | |
765 | ret = -EPERM; | |
766 | goto err_ref; | |
767 | } | |
768 | ||
769 | *inout_id = stream->stream.stream_id; | |
770 | *out = res; | |
771 | return 0; | |
772 | err_ref: | |
773 | vmw_resource_unreference(&res); | |
774 | return ret; | |
775 | } | |
5e1782d2 DA |
776 | |
777 | ||
778 | int vmw_dumb_create(struct drm_file *file_priv, | |
779 | struct drm_device *dev, | |
780 | struct drm_mode_create_dumb *args) | |
781 | { | |
782 | struct vmw_private *dev_priv = vmw_priv(dev); | |
783 | struct vmw_master *vmaster = vmw_master(file_priv->master); | |
784 | struct vmw_user_dma_buffer *vmw_user_bo; | |
785 | struct ttm_buffer_object *tmp; | |
786 | int ret; | |
787 | ||
788 | args->pitch = args->width * ((args->bpp + 7) / 8); | |
789 | args->size = args->pitch * args->height; | |
790 | ||
791 | vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); | |
792 | if (vmw_user_bo == NULL) | |
793 | return -ENOMEM; | |
794 | ||
795 | ret = ttm_read_lock(&vmaster->lock, true); | |
796 | if (ret != 0) { | |
797 | kfree(vmw_user_bo); | |
798 | return ret; | |
799 | } | |
800 | ||
801 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, | |
802 | &vmw_vram_sys_placement, true, | |
803 | &vmw_user_dmabuf_destroy); | |
804 | if (ret != 0) | |
805 | goto out_no_dmabuf; | |
806 | ||
807 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); | |
808 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, | |
809 | &vmw_user_bo->base, | |
810 | false, | |
811 | ttm_buffer_type, | |
812 | &vmw_user_dmabuf_release, NULL); | |
813 | if (unlikely(ret != 0)) | |
814 | goto out_no_base_object; | |
815 | ||
816 | args->handle = vmw_user_bo->base.hash.key; | |
817 | ||
818 | out_no_base_object: | |
819 | ttm_bo_unref(&tmp); | |
820 | out_no_dmabuf: | |
821 | ttm_read_unlock(&vmaster->lock); | |
822 | return ret; | |
823 | } | |
824 | ||
825 | int vmw_dumb_map_offset(struct drm_file *file_priv, | |
826 | struct drm_device *dev, uint32_t handle, | |
827 | uint64_t *offset) | |
828 | { | |
829 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | |
830 | struct vmw_dma_buffer *out_buf; | |
831 | int ret; | |
832 | ||
833 | ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); | |
834 | if (ret != 0) | |
835 | return -EINVAL; | |
836 | ||
837 | *offset = out_buf->base.addr_space_offset; | |
838 | vmw_dmabuf_unreference(&out_buf); | |
839 | return 0; | |
840 | } | |
841 | ||
842 | int vmw_dumb_destroy(struct drm_file *file_priv, | |
843 | struct drm_device *dev, | |
844 | uint32_t handle) | |
845 | { | |
846 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | |
847 | handle, TTM_REF_USAGE); | |
848 | } | |
c0951b79 TH |
849 | |
850 | /** | |
851 | * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. | |
852 | * | |
853 | * @res: The resource for which to allocate a backup buffer. | |
854 | * @interruptible: Whether any sleeps during allocation should be | |
855 | * performed while interruptible. | |
856 | */ | |
857 | static int vmw_resource_buf_alloc(struct vmw_resource *res, | |
858 | bool interruptible) | |
859 | { | |
860 | unsigned long size = | |
861 | (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; | |
862 | struct vmw_dma_buffer *backup; | |
863 | int ret; | |
864 | ||
865 | if (likely(res->backup)) { | |
866 | BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); | |
867 | return 0; | |
868 | } | |
869 | ||
870 | backup = kzalloc(sizeof(*backup), GFP_KERNEL); | |
871 | if (unlikely(backup == NULL)) | |
872 | return -ENOMEM; | |
873 | ||
874 | ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, | |
875 | res->func->backup_placement, | |
876 | interruptible, | |
877 | &vmw_dmabuf_bo_free); | |
878 | if (unlikely(ret != 0)) | |
879 | goto out_no_dmabuf; | |
880 | ||
881 | res->backup = backup; | |
882 | ||
883 | out_no_dmabuf: | |
884 | return ret; | |
885 | } | |
886 | ||
887 | /** | |
888 | * vmw_resource_do_validate - Make a resource up-to-date and visible | |
889 | * to the device. | |
890 | * | |
891 | * @res: The resource to make visible to the device. | |
892 | * @val_buf: Information about a buffer possibly | |
893 | * containing backup data if a bind operation is needed. | |
894 | * | |
895 | * On hardware resource shortage, this function returns -EBUSY and | |
896 | * should be retried once resources have been freed up. | |
897 | */ | |
898 | static int vmw_resource_do_validate(struct vmw_resource *res, | |
899 | struct ttm_validate_buffer *val_buf) | |
900 | { | |
901 | int ret = 0; | |
902 | const struct vmw_res_func *func = res->func; | |
903 | ||
904 | if (unlikely(res->id == -1)) { | |
905 | ret = func->create(res); | |
906 | if (unlikely(ret != 0)) | |
907 | return ret; | |
908 | } | |
909 | ||
910 | if (func->bind && | |
911 | ((func->needs_backup && list_empty(&res->mob_head) && | |
912 | val_buf->bo != NULL) || | |
913 | (!func->needs_backup && val_buf->bo != NULL))) { | |
914 | ret = func->bind(res, val_buf); | |
915 | if (unlikely(ret != 0)) | |
916 | goto out_bind_failed; | |
917 | if (func->needs_backup) | |
918 | list_add_tail(&res->mob_head, &res->backup->res_list); | |
919 | } | |
920 | ||
921 | /* | |
922 | * Only do this on write operations, and move to | |
923 | * vmw_resource_unreserve if it can be called after | |
924 | * backup buffers have been unreserved. Otherwise | |
925 | * sort out locking. | |
926 | */ | |
927 | res->res_dirty = true; | |
928 | ||
929 | return 0; | |
930 | ||
931 | out_bind_failed: | |
932 | func->destroy(res); | |
933 | ||
934 | return ret; | |
935 | } | |
936 | ||
937 | /** | |
938 | * vmw_resource_unreserve - Unreserve a resource previously reserved for | |
939 | * command submission. | |
940 | * | |
941 | * @res: Pointer to the struct vmw_resource to unreserve. | |
942 | * @new_backup: Pointer to new backup buffer if command submission | |
943 | * switched. | |
944 | * @new_backup_offset: New backup offset if @new_backup is !NULL. | |
945 | * | |
946 | * Currently unreserving a resource means putting it back on the device's | |
947 | * resource lru list, so that it can be evicted if necessary. | |
948 | */ | |
949 | void vmw_resource_unreserve(struct vmw_resource *res, | |
950 | struct vmw_dma_buffer *new_backup, | |
951 | unsigned long new_backup_offset) | |
952 | { | |
953 | struct vmw_private *dev_priv = res->dev_priv; | |
954 | ||
955 | if (!list_empty(&res->lru_head)) | |
956 | return; | |
957 | ||
958 | if (new_backup && new_backup != res->backup) { | |
959 | ||
960 | if (res->backup) { | |
42f6e3da | 961 | BUG_ON(!ttm_bo_is_reserved(&res->backup->base)); |
c0951b79 TH |
962 | list_del_init(&res->mob_head); |
963 | vmw_dmabuf_unreference(&res->backup); | |
964 | } | |
965 | ||
966 | res->backup = vmw_dmabuf_reference(new_backup); | |
42f6e3da | 967 | BUG_ON(!ttm_bo_is_reserved(&new_backup->base)); |
c0951b79 TH |
968 | list_add_tail(&res->mob_head, &new_backup->res_list); |
969 | } | |
970 | if (new_backup) | |
971 | res->backup_offset = new_backup_offset; | |
972 | ||
973 | if (!res->func->may_evict) | |
974 | return; | |
975 | ||
976 | write_lock(&dev_priv->resource_lock); | |
977 | list_add_tail(&res->lru_head, | |
978 | &res->dev_priv->res_lru[res->func->res_type]); | |
979 | write_unlock(&dev_priv->resource_lock); | |
980 | } | |
981 | ||
982 | /** | |
983 | * vmw_resource_check_buffer - Check whether a backup buffer is needed | |
984 | * for a resource and in that case, allocate | |
985 | * one, reserve and validate it. | |
986 | * | |
987 | * @res: The resource for which to allocate a backup buffer. | |
988 | * @interruptible: Whether any sleeps during allocation should be | |
989 | * performed while interruptible. | |
990 | * @val_buf: On successful return contains data about the | |
991 | * reserved and validated backup buffer. | |
992 | */ | |
ecff665f ML |
993 | static int |
994 | vmw_resource_check_buffer(struct vmw_resource *res, | |
995 | struct ww_acquire_ctx *ticket, | |
996 | bool interruptible, | |
997 | struct ttm_validate_buffer *val_buf) | |
c0951b79 TH |
998 | { |
999 | struct list_head val_list; | |
1000 | bool backup_dirty = false; | |
1001 | int ret; | |
1002 | ||
1003 | if (unlikely(res->backup == NULL)) { | |
1004 | ret = vmw_resource_buf_alloc(res, interruptible); | |
1005 | if (unlikely(ret != 0)) | |
1006 | return ret; | |
1007 | } | |
1008 | ||
1009 | INIT_LIST_HEAD(&val_list); | |
1010 | val_buf->bo = ttm_bo_reference(&res->backup->base); | |
1011 | list_add_tail(&val_buf->head, &val_list); | |
ecff665f | 1012 | ret = ttm_eu_reserve_buffers(ticket, &val_list); |
c0951b79 TH |
1013 | if (unlikely(ret != 0)) |
1014 | goto out_no_reserve; | |
1015 | ||
1016 | if (res->func->needs_backup && list_empty(&res->mob_head)) | |
1017 | return 0; | |
1018 | ||
1019 | backup_dirty = res->backup_dirty; | |
1020 | ret = ttm_bo_validate(&res->backup->base, | |
1021 | res->func->backup_placement, | |
97a875cb | 1022 | true, false); |
c0951b79 TH |
1023 | |
1024 | if (unlikely(ret != 0)) | |
1025 | goto out_no_validate; | |
1026 | ||
1027 | return 0; | |
1028 | ||
1029 | out_no_validate: | |
ecff665f | 1030 | ttm_eu_backoff_reservation(ticket, &val_list); |
c0951b79 TH |
1031 | out_no_reserve: |
1032 | ttm_bo_unref(&val_buf->bo); | |
1033 | if (backup_dirty) | |
1034 | vmw_dmabuf_unreference(&res->backup); | |
1035 | ||
1036 | return ret; | |
1037 | } | |
1038 | ||
1039 | /** | |
1040 | * vmw_resource_reserve - Reserve a resource for command submission | |
1041 | * | |
1042 | * @res: The resource to reserve. | |
1043 | * | |
1044 | * This function takes the resource off the LRU list and make sure | |
1045 | * a backup buffer is present for guest-backed resources. However, | |
1046 | * the buffer may not be bound to the resource at this point. | |
1047 | * | |
1048 | */ | |
1049 | int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) | |
1050 | { | |
1051 | struct vmw_private *dev_priv = res->dev_priv; | |
1052 | int ret; | |
1053 | ||
1054 | write_lock(&dev_priv->resource_lock); | |
1055 | list_del_init(&res->lru_head); | |
1056 | write_unlock(&dev_priv->resource_lock); | |
1057 | ||
1058 | if (res->func->needs_backup && res->backup == NULL && | |
1059 | !no_backup) { | |
1060 | ret = vmw_resource_buf_alloc(res, true); | |
1061 | if (unlikely(ret != 0)) | |
1062 | return ret; | |
1063 | } | |
1064 | ||
1065 | return 0; | |
1066 | } | |
1067 | ||
1068 | /** | |
1069 | * vmw_resource_backoff_reservation - Unreserve and unreference a | |
1070 | * backup buffer | |
1071 | *. | |
1072 | * @val_buf: Backup buffer information. | |
1073 | */ | |
ecff665f ML |
1074 | static void |
1075 | vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, | |
1076 | struct ttm_validate_buffer *val_buf) | |
c0951b79 TH |
1077 | { |
1078 | struct list_head val_list; | |
1079 | ||
1080 | if (likely(val_buf->bo == NULL)) | |
1081 | return; | |
1082 | ||
1083 | INIT_LIST_HEAD(&val_list); | |
1084 | list_add_tail(&val_buf->head, &val_list); | |
ecff665f | 1085 | ttm_eu_backoff_reservation(ticket, &val_list); |
c0951b79 TH |
1086 | ttm_bo_unref(&val_buf->bo); |
1087 | } | |
1088 | ||
1089 | /** | |
1090 | * vmw_resource_do_evict - Evict a resource, and transfer its data | |
1091 | * to a backup buffer. | |
1092 | * | |
1093 | * @res: The resource to evict. | |
1094 | */ | |
1095 | int vmw_resource_do_evict(struct vmw_resource *res) | |
1096 | { | |
1097 | struct ttm_validate_buffer val_buf; | |
1098 | const struct vmw_res_func *func = res->func; | |
ecff665f | 1099 | struct ww_acquire_ctx ticket; |
c0951b79 TH |
1100 | int ret; |
1101 | ||
1102 | BUG_ON(!func->may_evict); | |
1103 | ||
1104 | val_buf.bo = NULL; | |
ecff665f | 1105 | ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf); |
c0951b79 TH |
1106 | if (unlikely(ret != 0)) |
1107 | return ret; | |
1108 | ||
1109 | if (unlikely(func->unbind != NULL && | |
1110 | (!func->needs_backup || !list_empty(&res->mob_head)))) { | |
1111 | ret = func->unbind(res, res->res_dirty, &val_buf); | |
1112 | if (unlikely(ret != 0)) | |
1113 | goto out_no_unbind; | |
1114 | list_del_init(&res->mob_head); | |
1115 | } | |
1116 | ret = func->destroy(res); | |
1117 | res->backup_dirty = true; | |
1118 | res->res_dirty = false; | |
1119 | out_no_unbind: | |
ecff665f | 1120 | vmw_resource_backoff_reservation(&ticket, &val_buf); |
c0951b79 TH |
1121 | |
1122 | return ret; | |
1123 | } | |
1124 | ||
1125 | ||
1126 | /** | |
1127 | * vmw_resource_validate - Make a resource up-to-date and visible | |
1128 | * to the device. | |
1129 | * | |
1130 | * @res: The resource to make visible to the device. | |
1131 | * | |
1132 | * On succesful return, any backup DMA buffer pointed to by @res->backup will | |
1133 | * be reserved and validated. | |
1134 | * On hardware resource shortage, this function will repeatedly evict | |
1135 | * resources of the same type until the validation succeeds. | |
1136 | */ | |
1137 | int vmw_resource_validate(struct vmw_resource *res) | |
1138 | { | |
1139 | int ret; | |
1140 | struct vmw_resource *evict_res; | |
1141 | struct vmw_private *dev_priv = res->dev_priv; | |
1142 | struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; | |
1143 | struct ttm_validate_buffer val_buf; | |
1144 | ||
1145 | if (likely(!res->func->may_evict)) | |
1146 | return 0; | |
1147 | ||
1148 | val_buf.bo = NULL; | |
1149 | if (res->backup) | |
1150 | val_buf.bo = &res->backup->base; | |
1151 | do { | |
1152 | ret = vmw_resource_do_validate(res, &val_buf); | |
1153 | if (likely(ret != -EBUSY)) | |
1154 | break; | |
1155 | ||
1156 | write_lock(&dev_priv->resource_lock); | |
1157 | if (list_empty(lru_list) || !res->func->may_evict) { | |
1158 | DRM_ERROR("Out of device device id entries " | |
1159 | "for %s.\n", res->func->type_name); | |
1160 | ret = -EBUSY; | |
1161 | write_unlock(&dev_priv->resource_lock); | |
1162 | break; | |
1163 | } | |
1164 | ||
1165 | evict_res = vmw_resource_reference | |
1166 | (list_first_entry(lru_list, struct vmw_resource, | |
1167 | lru_head)); | |
1168 | list_del_init(&evict_res->lru_head); | |
1169 | ||
1170 | write_unlock(&dev_priv->resource_lock); | |
1171 | vmw_resource_do_evict(evict_res); | |
1172 | vmw_resource_unreference(&evict_res); | |
1173 | } while (1); | |
1174 | ||
1175 | if (unlikely(ret != 0)) | |
1176 | goto out_no_validate; | |
1177 | else if (!res->func->needs_backup && res->backup) { | |
1178 | list_del_init(&res->mob_head); | |
1179 | vmw_dmabuf_unreference(&res->backup); | |
1180 | } | |
1181 | ||
1182 | return 0; | |
1183 | ||
1184 | out_no_validate: | |
1185 | return ret; | |
1186 | } | |
1187 | ||
1188 | /** | |
1189 | * vmw_fence_single_bo - Utility function to fence a single TTM buffer | |
1190 | * object without unreserving it. | |
1191 | * | |
1192 | * @bo: Pointer to the struct ttm_buffer_object to fence. | |
1193 | * @fence: Pointer to the fence. If NULL, this function will | |
1194 | * insert a fence into the command stream.. | |
1195 | * | |
1196 | * Contrary to the ttm_eu version of this function, it takes only | |
1197 | * a single buffer object instead of a list, and it also doesn't | |
1198 | * unreserve the buffer object, which needs to be done separately. | |
1199 | */ | |
1200 | void vmw_fence_single_bo(struct ttm_buffer_object *bo, | |
1201 | struct vmw_fence_obj *fence) | |
1202 | { | |
1203 | struct ttm_bo_device *bdev = bo->bdev; | |
1204 | struct ttm_bo_driver *driver = bdev->driver; | |
1205 | struct vmw_fence_obj *old_fence_obj; | |
1206 | struct vmw_private *dev_priv = | |
1207 | container_of(bdev, struct vmw_private, bdev); | |
1208 | ||
1209 | if (fence == NULL) | |
1210 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | |
1211 | else | |
1212 | driver->sync_obj_ref(fence); | |
1213 | ||
1214 | spin_lock(&bdev->fence_lock); | |
1215 | ||
1216 | old_fence_obj = bo->sync_obj; | |
1217 | bo->sync_obj = fence; | |
1218 | ||
1219 | spin_unlock(&bdev->fence_lock); | |
1220 | ||
1221 | if (old_fence_obj) | |
1222 | vmw_fence_obj_unreference(&old_fence_obj); | |
1223 | } | |
1224 | ||
1225 | /** | |
1226 | * vmw_resource_move_notify - TTM move_notify_callback | |
1227 | * | |
1228 | * @bo: The TTM buffer object about to move. | |
1229 | * @mem: The truct ttm_mem_reg indicating to what memory | |
1230 | * region the move is taking place. | |
1231 | * | |
1232 | * For now does nothing. | |
1233 | */ | |
1234 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, | |
1235 | struct ttm_mem_reg *mem) | |
1236 | { | |
1237 | } | |
1238 | ||
1239 | /** | |
1240 | * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. | |
1241 | * | |
1242 | * @res: The resource being queried. | |
1243 | */ | |
1244 | bool vmw_resource_needs_backup(const struct vmw_resource *res) | |
1245 | { | |
1246 | return res->func->needs_backup; | |
1247 | } | |
1248 | ||
1249 | /** | |
1250 | * vmw_resource_evict_type - Evict all resources of a specific type | |
1251 | * | |
1252 | * @dev_priv: Pointer to a device private struct | |
1253 | * @type: The resource type to evict | |
1254 | * | |
1255 | * To avoid thrashing starvation or as part of the hibernation sequence, | |
1256 | * evict all evictable resources of a specific type. | |
1257 | */ | |
1258 | static void vmw_resource_evict_type(struct vmw_private *dev_priv, | |
1259 | enum vmw_res_type type) | |
1260 | { | |
1261 | struct list_head *lru_list = &dev_priv->res_lru[type]; | |
1262 | struct vmw_resource *evict_res; | |
1263 | ||
1264 | do { | |
1265 | write_lock(&dev_priv->resource_lock); | |
1266 | ||
1267 | if (list_empty(lru_list)) | |
1268 | goto out_unlock; | |
1269 | ||
1270 | evict_res = vmw_resource_reference( | |
1271 | list_first_entry(lru_list, struct vmw_resource, | |
1272 | lru_head)); | |
1273 | list_del_init(&evict_res->lru_head); | |
1274 | write_unlock(&dev_priv->resource_lock); | |
1275 | vmw_resource_do_evict(evict_res); | |
1276 | vmw_resource_unreference(&evict_res); | |
1277 | } while (1); | |
1278 | ||
1279 | out_unlock: | |
1280 | write_unlock(&dev_priv->resource_lock); | |
1281 | } | |
1282 | ||
1283 | /** | |
1284 | * vmw_resource_evict_all - Evict all evictable resources | |
1285 | * | |
1286 | * @dev_priv: Pointer to a device private struct | |
1287 | * | |
1288 | * To avoid thrashing starvation or as part of the hibernation sequence, | |
1289 | * evict all evictable resources. In particular this means that all | |
1290 | * guest-backed resources that are registered with the device are | |
1291 | * evicted and the OTable becomes clean. | |
1292 | */ | |
1293 | void vmw_resource_evict_all(struct vmw_private *dev_priv) | |
1294 | { | |
1295 | enum vmw_res_type type; | |
1296 | ||
1297 | mutex_lock(&dev_priv->cmdbuf_mutex); | |
1298 | ||
1299 | for (type = 0; type < vmw_res_max; ++type) | |
1300 | vmw_resource_evict_type(dev_priv, type); | |
1301 | ||
1302 | mutex_unlock(&dev_priv->cmdbuf_mutex); | |
1303 | } |