Commit | Line | Data |
---|---|---|
1297bf2e | 1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
ba4e7d97 TH |
2 | /************************************************************************** |
3 | * | |
4 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | **************************************************************************/ | |
28 | /* | |
29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
30 | */ | |
31 | ||
760285e7 DH |
32 | #include <drm/ttm/ttm_bo_driver.h> |
33 | #include <drm/ttm/ttm_placement.h> | |
053c5769 | 34 | #include <drm/drm_cache.h> |
72525b3f | 35 | #include <drm/drm_vma_manager.h> |
43676605 | 36 | #include <linux/dma-buf-map.h> |
ba4e7d97 TH |
37 | #include <linux/io.h> |
38 | #include <linux/highmem.h> | |
39 | #include <linux/wait.h> | |
5a0e3ad6 | 40 | #include <linux/slab.h> |
ba4e7d97 | 41 | #include <linux/vmalloc.h> |
ba4e7d97 | 42 | #include <linux/module.h> |
52791eee | 43 | #include <linux/dma-resv.h> |
ba4e7d97 | 44 | |
5452cf44 CK |
45 | struct ttm_transfer_obj { |
46 | struct ttm_buffer_object base; | |
47 | struct ttm_buffer_object *bo; | |
48 | }; | |
49 | ||
8af8a109 | 50 | int ttm_mem_io_reserve(struct ttm_device *bdev, |
2966141a | 51 | struct ttm_resource *mem) |
eba67093 | 52 | { |
54d04ea8 | 53 | if (mem->bus.offset || mem->bus.addr) |
c1c440d4 | 54 | return 0; |
eba67093 | 55 | |
fe662d84 | 56 | mem->bus.is_iomem = false; |
8af8a109 | 57 | if (!bdev->funcs->io_mem_reserve) |
eba67093 | 58 | return 0; |
eba67093 | 59 | |
8af8a109 | 60 | return bdev->funcs->io_mem_reserve(bdev, mem); |
eba67093 TH |
61 | } |
62 | ||
8af8a109 | 63 | void ttm_mem_io_free(struct ttm_device *bdev, |
2966141a | 64 | struct ttm_resource *mem) |
eba67093 | 65 | { |
2dbd9c27 PS |
66 | if (!mem) |
67 | return; | |
68 | ||
54d04ea8 | 69 | if (!mem->bus.offset && !mem->bus.addr) |
c1c440d4 | 70 | return; |
eba67093 | 71 | |
8af8a109 CK |
72 | if (bdev->funcs->io_mem_free) |
73 | bdev->funcs->io_mem_free(bdev, mem); | |
c1c440d4 | 74 | |
fe662d84 CK |
75 | mem->bus.offset = 0; |
76 | mem->bus.addr = NULL; | |
82c5da6b JG |
77 | } |
78 | ||
3bf3710e TH |
79 | /** |
80 | * ttm_move_memcpy - Helper to perform a memcpy ttm move operation. | |
66907633 TH |
81 | * @clear: Whether to clear rather than copy. |
82 | * @num_pages: Number of pages of the operation. | |
83 | * @dst_iter: A struct ttm_kmap_iter representing the destination resource. | |
3bf3710e TH |
84 | * @src_iter: A struct ttm_kmap_iter representing the source resource. |
85 | * | |
86 | * This function is intended to be able to move out async under a | |
87 | * dma-fence if desired. | |
88 | */ | |
66907633 | 89 | void ttm_move_memcpy(bool clear, |
3bf3710e TH |
90 | u32 num_pages, |
91 | struct ttm_kmap_iter *dst_iter, | |
92 | struct ttm_kmap_iter *src_iter) | |
ba4e7d97 | 93 | { |
3bf3710e TH |
94 | const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops; |
95 | const struct ttm_kmap_iter_ops *src_ops = src_iter->ops; | |
3bf3710e TH |
96 | struct dma_buf_map src_map, dst_map; |
97 | pgoff_t i; | |
ba4e7d97 | 98 | |
3bf3710e TH |
99 | /* Single TTM move. NOP */ |
100 | if (dst_ops->maps_tt && src_ops->maps_tt) | |
101 | return; | |
ebb21aa1 | 102 | |
3bf3710e | 103 | /* Don't move nonexistent data. Clear destination instead. */ |
66907633 | 104 | if (clear) { |
3bf3710e TH |
105 | for (i = 0; i < num_pages; ++i) { |
106 | dst_ops->map_local(dst_iter, &dst_map, i); | |
107 | if (dst_map.is_iomem) | |
108 | memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE); | |
109 | else | |
110 | memset(dst_map.vaddr, 0, PAGE_SIZE); | |
111 | if (dst_ops->unmap_local) | |
112 | dst_ops->unmap_local(dst_iter, &dst_map); | |
82c5da6b | 113 | } |
3bf3710e | 114 | return; |
ba4e7d97 | 115 | } |
ba4e7d97 | 116 | |
3bf3710e TH |
117 | for (i = 0; i < num_pages; ++i) { |
118 | dst_ops->map_local(dst_iter, &dst_map, i); | |
119 | src_ops->map_local(src_iter, &src_map, i); | |
120 | ||
053c5769 TH |
121 | drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE); |
122 | ||
3bf3710e TH |
123 | if (src_ops->unmap_local) |
124 | src_ops->unmap_local(src_iter, &src_map); | |
125 | if (dst_ops->unmap_local) | |
126 | dst_ops->unmap_local(dst_iter, &dst_map); | |
127 | } | |
ba4e7d97 | 128 | } |
3bf3710e | 129 | EXPORT_SYMBOL(ttm_move_memcpy); |
ba4e7d97 TH |
130 | |
131 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |
3e98d829 | 132 | struct ttm_operation_ctx *ctx, |
3bf3710e | 133 | struct ttm_resource *dst_mem) |
ba4e7d97 | 134 | { |
8af8a109 | 135 | struct ttm_device *bdev = bo->bdev; |
3bf3710e TH |
136 | struct ttm_resource_manager *dst_man = |
137 | ttm_manager_type(bo->bdev, dst_mem->mem_type); | |
ba4e7d97 | 138 | struct ttm_tt *ttm = bo->ttm; |
3bf3710e TH |
139 | struct ttm_resource *src_mem = bo->resource; |
140 | struct ttm_resource_manager *src_man = | |
141 | ttm_manager_type(bdev, src_mem->mem_type); | |
3bf3710e TH |
142 | union { |
143 | struct ttm_kmap_iter_tt tt; | |
144 | struct ttm_kmap_iter_linear_io io; | |
145 | } _dst_iter, _src_iter; | |
146 | struct ttm_kmap_iter *dst_iter, *src_iter; | |
66907633 | 147 | bool clear; |
3bf3710e TH |
148 | int ret = 0; |
149 | ||
43d46f0b | 150 | if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || |
3bf3710e | 151 | dst_man->use_tt)) { |
0a667b50 | 152 | ret = ttm_tt_populate(bdev, ttm, ctx); |
da95c788 | 153 | if (ret) |
3bf3710e | 154 | return ret; |
b1e5f172 JG |
155 | } |
156 | ||
3bf3710e TH |
157 | dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem); |
158 | if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt) | |
159 | dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm); | |
160 | if (IS_ERR(dst_iter)) | |
161 | return PTR_ERR(dst_iter); | |
162 | ||
163 | src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem); | |
164 | if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt) | |
165 | src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm); | |
166 | if (IS_ERR(src_iter)) { | |
167 | ret = PTR_ERR(src_iter); | |
168 | goto out_src_iter; | |
bfa3357e | 169 | } |
2ee476f7 | 170 | |
66907633 | 171 | clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); |
1176d15f | 172 | if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) |
66907633 | 173 | ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter); |
ba4e7d97 | 174 | |
3bf3710e | 175 | if (!src_iter->ops->maps_tt) |
efcefc71 TH |
176 | ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem); |
177 | ttm_bo_move_sync_cleanup(bo, dst_mem); | |
178 | ||
3bf3710e TH |
179 | out_src_iter: |
180 | if (!dst_iter->ops->maps_tt) | |
181 | ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem); | |
ba4e7d97 | 182 | |
ba4e7d97 TH |
183 | return ret; |
184 | } | |
185 | EXPORT_SYMBOL(ttm_bo_move_memcpy); | |
186 | ||
187 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) | |
188 | { | |
5452cf44 CK |
189 | struct ttm_transfer_obj *fbo; |
190 | ||
191 | fbo = container_of(bo, struct ttm_transfer_obj, base); | |
0db55f9a | 192 | dma_resv_fini(&fbo->base.base._resv); |
f4490759 | 193 | ttm_bo_put(fbo->bo); |
5452cf44 | 194 | kfree(fbo); |
ba4e7d97 TH |
195 | } |
196 | ||
197 | /** | |
198 | * ttm_buffer_object_transfer | |
199 | * | |
200 | * @bo: A pointer to a struct ttm_buffer_object. | |
201 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, | |
202 | * holding the data of @bo with the old placement. | |
203 | * | |
204 | * This is a utility function that may be called after an accelerated move | |
205 | * has been scheduled. A new buffer object is created as a placeholder for | |
206 | * the old data while it's being copied. When that buffer object is idle, | |
207 | * it can be destroyed, releasing the space of the old placement. | |
208 | * Returns: | |
209 | * !0: Failure. | |
210 | */ | |
211 | ||
212 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |
213 | struct ttm_buffer_object **new_obj) | |
214 | { | |
5452cf44 | 215 | struct ttm_transfer_obj *fbo; |
5e338405 | 216 | int ret; |
ba4e7d97 | 217 | |
ff7c60c5 | 218 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
ba4e7d97 TH |
219 | if (!fbo) |
220 | return -ENOMEM; | |
221 | ||
5452cf44 | 222 | fbo->base = *bo; |
d6e820fc CK |
223 | |
224 | ttm_bo_get(bo); | |
8129fdad | 225 | fbo->bo = bo; |
ba4e7d97 TH |
226 | |
227 | /** | |
228 | * Fix up members that we shouldn't copy directly: | |
229 | * TODO: Explicit member copy would probably be better here. | |
230 | */ | |
231 | ||
8af8a109 | 232 | atomic_inc(&ttm_glob.bo_count); |
5452cf44 CK |
233 | INIT_LIST_HEAD(&fbo->base.ddestroy); |
234 | INIT_LIST_HEAD(&fbo->base.lru); | |
5452cf44 | 235 | fbo->base.moving = NULL; |
b96f3e7c | 236 | drm_vma_node_reset(&fbo->base.base.vma_node); |
5452cf44 | 237 | |
5452cf44 CK |
238 | kref_init(&fbo->base.kref); |
239 | fbo->base.destroy = &ttm_transfered_destroy; | |
b73cd1e2 | 240 | fbo->base.pin_count = 0; |
5b34406f | 241 | if (bo->type != ttm_bo_type_sg) |
ef383218 CK |
242 | fbo->base.base.resv = &fbo->base.base._resv; |
243 | ||
244 | dma_resv_init(&fbo->base.base._resv); | |
8c8c0620 | 245 | fbo->base.base.dev = NULL; |
ef383218 | 246 | ret = dma_resv_trylock(&fbo->base.base._resv); |
5e338405 | 247 | WARN_ON(!ret); |
ba4e7d97 | 248 | |
b73cd1e2 CK |
249 | ttm_bo_move_to_lru_tail_unlocked(&fbo->base); |
250 | ||
5452cf44 | 251 | *new_obj = &fbo->base; |
ba4e7d97 TH |
252 | return 0; |
253 | } | |
254 | ||
867bcecd CK |
255 | pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, |
256 | pgprot_t tmp) | |
ba4e7d97 | 257 | { |
867bcecd CK |
258 | struct ttm_resource_manager *man; |
259 | enum ttm_caching caching; | |
260 | ||
261 | man = ttm_manager_type(bo->bdev, res->mem_type); | |
262 | caching = man->use_tt ? bo->ttm->caching : res->bus.caching; | |
263 | ||
3bf3710e | 264 | return ttm_prot_from_caching(caching, tmp); |
ba4e7d97 | 265 | } |
4bfd75cb | 266 | EXPORT_SYMBOL(ttm_io_prot); |
ba4e7d97 TH |
267 | |
268 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | |
82c5da6b JG |
269 | unsigned long offset, |
270 | unsigned long size, | |
ba4e7d97 TH |
271 | struct ttm_bo_kmap_obj *map) |
272 | { | |
d3116756 | 273 | struct ttm_resource *mem = bo->resource; |
ba4e7d97 | 274 | |
d3116756 | 275 | if (bo->resource->bus.addr) { |
ba4e7d97 | 276 | map->bo_kmap_type = ttm_bo_map_premapped; |
d3116756 | 277 | map->virtual = ((u8 *)bo->resource->bus.addr) + offset; |
ba4e7d97 | 278 | } else { |
d3116756 CK |
279 | resource_size_t res = bo->resource->bus.offset + offset; |
280 | ||
ba4e7d97 | 281 | map->bo_kmap_type = ttm_bo_map_iomap; |
ce65b874 | 282 | if (mem->bus.caching == ttm_write_combined) |
d3116756 | 283 | map->virtual = ioremap_wc(res, size); |
b849bec2 OZ |
284 | #ifdef CONFIG_X86 |
285 | else if (mem->bus.caching == ttm_cached) | |
d3116756 | 286 | map->virtual = ioremap_cache(res, size); |
b849bec2 | 287 | #endif |
ba4e7d97 | 288 | else |
d3116756 | 289 | map->virtual = ioremap(res, size); |
ba4e7d97 TH |
290 | } |
291 | return (!map->virtual) ? -ENOMEM : 0; | |
292 | } | |
293 | ||
294 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, | |
295 | unsigned long start_page, | |
296 | unsigned long num_pages, | |
297 | struct ttm_bo_kmap_obj *map) | |
298 | { | |
d3116756 | 299 | struct ttm_resource *mem = bo->resource; |
d0cef9fa RH |
300 | struct ttm_operation_ctx ctx = { |
301 | .interruptible = false, | |
302 | .no_wait_gpu = false | |
303 | }; | |
62975d27 | 304 | struct ttm_tt *ttm = bo->ttm; |
d0cef9fa | 305 | pgprot_t prot; |
b1e5f172 | 306 | int ret; |
ba4e7d97 | 307 | |
62975d27 | 308 | BUG_ON(!ttm); |
b1e5f172 | 309 | |
0a667b50 | 310 | ret = ttm_tt_populate(bo->bdev, ttm, &ctx); |
25893a14 CK |
311 | if (ret) |
312 | return ret; | |
b1e5f172 | 313 | |
ce65b874 | 314 | if (num_pages == 1 && ttm->caching == ttm_cached) { |
ba4e7d97 TH |
315 | /* |
316 | * We're mapping a single page, and the desired | |
317 | * page protection is consistent with the bo. | |
318 | */ | |
319 | ||
320 | map->bo_kmap_type = ttm_bo_map_kmap; | |
b1e5f172 | 321 | map->page = ttm->pages[start_page]; |
ba4e7d97 TH |
322 | map->virtual = kmap(map->page); |
323 | } else { | |
ba4e7d97 TH |
324 | /* |
325 | * We need to use vmap to get the desired page protection | |
af901ca1 | 326 | * or to make the buffer object look contiguous. |
ba4e7d97 | 327 | */ |
867bcecd | 328 | prot = ttm_io_prot(bo, mem, PAGE_KERNEL); |
ba4e7d97 TH |
329 | map->bo_kmap_type = ttm_bo_map_vmap; |
330 | map->virtual = vmap(ttm->pages + start_page, num_pages, | |
331 | 0, prot); | |
332 | } | |
333 | return (!map->virtual) ? -ENOMEM : 0; | |
334 | } | |
335 | ||
336 | int ttm_bo_kmap(struct ttm_buffer_object *bo, | |
337 | unsigned long start_page, unsigned long num_pages, | |
338 | struct ttm_bo_kmap_obj *map) | |
339 | { | |
82c5da6b | 340 | unsigned long offset, size; |
ba4e7d97 | 341 | int ret; |
ba4e7d97 | 342 | |
ba4e7d97 | 343 | map->virtual = NULL; |
82c5da6b | 344 | map->bo = bo; |
d3116756 | 345 | if (num_pages > bo->resource->num_pages) |
ba4e7d97 | 346 | return -EINVAL; |
d3116756 | 347 | if ((start_page + num_pages) > bo->resource->num_pages) |
ba4e7d97 | 348 | return -EINVAL; |
02b29caf | 349 | |
d3116756 | 350 | ret = ttm_mem_io_reserve(bo->bdev, bo->resource); |
ba4e7d97 TH |
351 | if (ret) |
352 | return ret; | |
d3116756 | 353 | if (!bo->resource->bus.is_iomem) { |
ba4e7d97 TH |
354 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
355 | } else { | |
82c5da6b JG |
356 | offset = start_page << PAGE_SHIFT; |
357 | size = num_pages << PAGE_SHIFT; | |
358 | return ttm_bo_ioremap(bo, offset, size, map); | |
ba4e7d97 TH |
359 | } |
360 | } | |
361 | EXPORT_SYMBOL(ttm_bo_kmap); | |
362 | ||
363 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |
364 | { | |
365 | if (!map->virtual) | |
366 | return; | |
367 | switch (map->bo_kmap_type) { | |
368 | case ttm_bo_map_iomap: | |
369 | iounmap(map->virtual); | |
370 | break; | |
371 | case ttm_bo_map_vmap: | |
372 | vunmap(map->virtual); | |
373 | break; | |
374 | case ttm_bo_map_kmap: | |
375 | kunmap(map->page); | |
376 | break; | |
377 | case ttm_bo_map_premapped: | |
378 | break; | |
379 | default: | |
380 | BUG(); | |
381 | } | |
d3116756 | 382 | ttm_mem_io_free(map->bo->bdev, map->bo->resource); |
ba4e7d97 TH |
383 | map->virtual = NULL; |
384 | map->page = NULL; | |
385 | } | |
386 | EXPORT_SYMBOL(ttm_bo_kunmap); | |
387 | ||
43676605 TZ |
388 | int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) |
389 | { | |
d3116756 | 390 | struct ttm_resource *mem = bo->resource; |
43676605 TZ |
391 | int ret; |
392 | ||
393 | ret = ttm_mem_io_reserve(bo->bdev, mem); | |
394 | if (ret) | |
395 | return ret; | |
396 | ||
397 | if (mem->bus.is_iomem) { | |
398 | void __iomem *vaddr_iomem; | |
43676605 TZ |
399 | |
400 | if (mem->bus.addr) | |
401 | vaddr_iomem = (void __iomem *)mem->bus.addr; | |
402 | else if (mem->bus.caching == ttm_write_combined) | |
e11bfb99 CK |
403 | vaddr_iomem = ioremap_wc(mem->bus.offset, |
404 | bo->base.size); | |
b849bec2 OZ |
405 | #ifdef CONFIG_X86 |
406 | else if (mem->bus.caching == ttm_cached) | |
407 | vaddr_iomem = ioremap_cache(mem->bus.offset, | |
408 | bo->base.size); | |
409 | #endif | |
43676605 | 410 | else |
e11bfb99 | 411 | vaddr_iomem = ioremap(mem->bus.offset, bo->base.size); |
43676605 TZ |
412 | |
413 | if (!vaddr_iomem) | |
414 | return -ENOMEM; | |
415 | ||
416 | dma_buf_map_set_vaddr_iomem(map, vaddr_iomem); | |
417 | ||
418 | } else { | |
419 | struct ttm_operation_ctx ctx = { | |
420 | .interruptible = false, | |
421 | .no_wait_gpu = false | |
422 | }; | |
423 | struct ttm_tt *ttm = bo->ttm; | |
424 | pgprot_t prot; | |
425 | void *vaddr; | |
426 | ||
427 | ret = ttm_tt_populate(bo->bdev, ttm, &ctx); | |
428 | if (ret) | |
429 | return ret; | |
430 | ||
431 | /* | |
432 | * We need to use vmap to get the desired page protection | |
433 | * or to make the buffer object look contiguous. | |
434 | */ | |
435 | prot = ttm_io_prot(bo, mem, PAGE_KERNEL); | |
e11bfb99 | 436 | vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot); |
43676605 TZ |
437 | if (!vaddr) |
438 | return -ENOMEM; | |
439 | ||
440 | dma_buf_map_set_vaddr(map, vaddr); | |
441 | } | |
442 | ||
443 | return 0; | |
444 | } | |
445 | EXPORT_SYMBOL(ttm_bo_vmap); | |
446 | ||
447 | void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) | |
448 | { | |
d3116756 | 449 | struct ttm_resource *mem = bo->resource; |
43676605 TZ |
450 | |
451 | if (dma_buf_map_is_null(map)) | |
452 | return; | |
453 | ||
454 | if (!map->is_iomem) | |
455 | vunmap(map->vaddr); | |
456 | else if (!mem->bus.addr) | |
457 | iounmap(map->vaddr_iomem); | |
458 | dma_buf_map_clear(map); | |
459 | ||
d3116756 | 460 | ttm_mem_io_free(bo->bdev, bo->resource); |
43676605 TZ |
461 | } |
462 | EXPORT_SYMBOL(ttm_bo_vunmap); | |
463 | ||
92afce90 DA |
464 | static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, |
465 | bool dst_use_tt) | |
466 | { | |
467 | int ret; | |
468 | ret = ttm_bo_wait(bo, false, false); | |
469 | if (ret) | |
470 | return ret; | |
471 | ||
472 | if (!dst_use_tt) | |
473 | ttm_bo_tt_destroy(bo); | |
bfa3357e | 474 | ttm_resource_free(bo, &bo->resource); |
92afce90 DA |
475 | return 0; |
476 | } | |
477 | ||
13a8f46d DA |
478 | static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, |
479 | struct dma_fence *fence, | |
480 | bool dst_use_tt) | |
481 | { | |
482 | struct ttm_buffer_object *ghost_obj; | |
483 | int ret; | |
484 | ||
485 | /** | |
486 | * This should help pipeline ordinary buffer moves. | |
487 | * | |
488 | * Hang old buffer memory on a new buffer object, | |
489 | * and leave it to be released when the GPU | |
490 | * operation has completed. | |
491 | */ | |
492 | ||
493 | dma_fence_put(bo->moving); | |
494 | bo->moving = dma_fence_get(fence); | |
495 | ||
496 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | |
497 | if (ret) | |
498 | return ret; | |
499 | ||
500 | dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); | |
501 | ||
502 | /** | |
503 | * If we're not moving to fixed memory, the TTM object | |
504 | * needs to stay alive. Otherwhise hang it on the ghost | |
505 | * bo to be unbound and destroyed. | |
506 | */ | |
507 | ||
508 | if (dst_use_tt) | |
509 | ghost_obj->ttm = NULL; | |
510 | else | |
511 | bo->ttm = NULL; | |
72db41c9 | 512 | bo->resource = NULL; |
13a8f46d DA |
513 | |
514 | dma_resv_unlock(&ghost_obj->base._resv); | |
515 | ttm_bo_put(ghost_obj); | |
516 | return 0; | |
517 | } | |
518 | ||
e46f468f DA |
519 | static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, |
520 | struct dma_fence *fence) | |
ba4e7d97 | 521 | { |
8af8a109 | 522 | struct ttm_device *bdev = bo->bdev; |
d3116756 CK |
523 | struct ttm_resource_manager *from; |
524 | ||
525 | from = ttm_manager_type(bdev, bo->resource->mem_type); | |
ba4e7d97 | 526 | |
e46f468f DA |
527 | /** |
528 | * BO doesn't have a TTM we need to bind/unbind. Just remember | |
529 | * this eviction and free up the allocation | |
530 | */ | |
531 | spin_lock(&from->move_lock); | |
532 | if (!from->move || dma_fence_is_later(fence, from->move)) { | |
533 | dma_fence_put(from->move); | |
534 | from->move = dma_fence_get(fence); | |
535 | } | |
536 | spin_unlock(&from->move_lock); | |
ba4e7d97 | 537 | |
bfa3357e | 538 | ttm_resource_free(bo, &bo->resource); |
110b20c3 | 539 | |
e46f468f DA |
540 | dma_fence_put(bo->moving); |
541 | bo->moving = dma_fence_get(fence); | |
ba4e7d97 | 542 | } |
3ddf4ad9 | 543 | |
e46f468f DA |
544 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
545 | struct dma_fence *fence, | |
546 | bool evict, | |
547 | bool pipeline, | |
548 | struct ttm_resource *new_mem) | |
3ddf4ad9 | 549 | { |
8af8a109 | 550 | struct ttm_device *bdev = bo->bdev; |
d3116756 | 551 | struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type); |
e46f468f DA |
552 | struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); |
553 | int ret = 0; | |
3ddf4ad9 | 554 | |
52791eee | 555 | dma_resv_add_excl_fence(bo->base.resv, fence); |
e46f468f DA |
556 | if (!evict) |
557 | ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); | |
558 | else if (!from->use_tt && pipeline) | |
559 | ttm_bo_move_pipeline_evict(bo, fence); | |
560 | else | |
561 | ret = ttm_bo_wait_free_node(bo, man->use_tt); | |
3ddf4ad9 | 562 | |
e46f468f DA |
563 | if (ret) |
564 | return ret; | |
3ddf4ad9 | 565 | |
2ee476f7 | 566 | ttm_bo_assign_mem(bo, new_mem); |
3ddf4ad9 CK |
567 | |
568 | return 0; | |
569 | } | |
e46f468f | 570 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
5d951098 | 571 | |
a3be8cd7 TH |
572 | /** |
573 | * ttm_bo_pipeline_gutting - purge the contents of a bo | |
574 | * @bo: The buffer object | |
575 | * | |
576 | * Purge the contents of a bo, async if the bo is not idle. | |
577 | * After a successful call, the bo is left unpopulated in | |
578 | * system placement. The function may wait uninterruptible | |
579 | * for idle on OOM. | |
580 | * | |
581 | * Return: 0 if successful, negative error code on failure. | |
582 | */ | |
5d951098 CK |
583 | int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) |
584 | { | |
d79025c7 | 585 | static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM }; |
5d951098 | 586 | struct ttm_buffer_object *ghost; |
fc357bc8 | 587 | struct ttm_resource *sys_res; |
a3be8cd7 | 588 | struct ttm_tt *ttm; |
5d951098 CK |
589 | int ret; |
590 | ||
fc357bc8 CK |
591 | ret = ttm_resource_alloc(bo, &sys_mem, &sys_res); |
592 | if (ret) | |
593 | return ret; | |
594 | ||
a3be8cd7 TH |
595 | /* If already idle, no need for ghost object dance. */ |
596 | ret = ttm_bo_wait(bo, false, true); | |
597 | if (ret != -EBUSY) { | |
598 | if (!bo->ttm) { | |
599 | /* See comment below about clearing. */ | |
600 | ret = ttm_tt_create(bo, true); | |
601 | if (ret) | |
fc357bc8 | 602 | goto error_free_sys_mem; |
a3be8cd7 TH |
603 | } else { |
604 | ttm_tt_unpopulate(bo->bdev, bo->ttm); | |
605 | if (bo->type == ttm_bo_type_device) | |
606 | ttm_tt_mark_for_clear(bo->ttm); | |
607 | } | |
608 | ttm_resource_free(bo, &bo->resource); | |
fc357bc8 CK |
609 | ttm_bo_assign_mem(bo, sys_res); |
610 | return 0; | |
a3be8cd7 TH |
611 | } |
612 | ||
613 | /* | |
614 | * We need an unpopulated ttm_tt after giving our current one, | |
615 | * if any, to the ghost object. And we can't afford to fail | |
616 | * creating one *after* the operation. If the bo subsequently gets | |
617 | * resurrected, make sure it's cleared (if ttm_bo_type_device) | |
618 | * to avoid leaking sensitive information to user-space. | |
619 | */ | |
620 | ||
621 | ttm = bo->ttm; | |
622 | bo->ttm = NULL; | |
623 | ret = ttm_tt_create(bo, true); | |
624 | swap(bo->ttm, ttm); | |
5d951098 | 625 | if (ret) |
fc357bc8 | 626 | goto error_free_sys_mem; |
5d951098 | 627 | |
a3be8cd7 | 628 | ret = ttm_buffer_object_transfer(bo, &ghost); |
fc357bc8 CK |
629 | if (ret) |
630 | goto error_destroy_tt; | |
a3be8cd7 | 631 | |
ef383218 | 632 | ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); |
5d951098 CK |
633 | /* Last resort, wait for the BO to be idle when we are OOM */ |
634 | if (ret) | |
635 | ttm_bo_wait(bo, false, false); | |
636 | ||
ef383218 | 637 | dma_resv_unlock(&ghost->base._resv); |
f4490759 | 638 | ttm_bo_put(ghost); |
a3be8cd7 | 639 | bo->ttm = ttm; |
fc357bc8 CK |
640 | bo->resource = NULL; |
641 | ttm_bo_assign_mem(bo, sys_res); | |
642 | return 0; | |
643 | ||
644 | error_destroy_tt: | |
645 | ttm_tt_destroy(bo->bdev, ttm); | |
5d951098 | 646 | |
fc357bc8 CK |
647 | error_free_sys_mem: |
648 | ttm_resource_free(bo, &sys_res); | |
649 | return ret; | |
5d951098 | 650 | } |