drm/ttm: update bulk move object of ghost BO
[linux-2.6-block.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
CommitLineData
1297bf2e 1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
ba4e7d97
TH
2/**************************************************************************
3 *
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
760285e7
DH
32#include <drm/ttm/ttm_bo_driver.h>
33#include <drm/ttm/ttm_placement.h>
053c5769 34#include <drm/drm_cache.h>
72525b3f 35#include <drm/drm_vma_manager.h>
7938f421 36#include <linux/iosys-map.h>
ba4e7d97
TH
37#include <linux/io.h>
38#include <linux/highmem.h>
39#include <linux/wait.h>
5a0e3ad6 40#include <linux/slab.h>
ba4e7d97 41#include <linux/vmalloc.h>
ba4e7d97 42#include <linux/module.h>
52791eee 43#include <linux/dma-resv.h>
ba4e7d97 44
5452cf44
CK
45struct ttm_transfer_obj {
46 struct ttm_buffer_object base;
47 struct ttm_buffer_object *bo;
48};
49
8af8a109 50int ttm_mem_io_reserve(struct ttm_device *bdev,
2966141a 51 struct ttm_resource *mem)
eba67093 52{
54d04ea8 53 if (mem->bus.offset || mem->bus.addr)
c1c440d4 54 return 0;
eba67093 55
fe662d84 56 mem->bus.is_iomem = false;
8af8a109 57 if (!bdev->funcs->io_mem_reserve)
eba67093 58 return 0;
eba67093 59
8af8a109 60 return bdev->funcs->io_mem_reserve(bdev, mem);
eba67093
TH
61}
62
8af8a109 63void ttm_mem_io_free(struct ttm_device *bdev,
2966141a 64 struct ttm_resource *mem)
eba67093 65{
2dbd9c27
PS
66 if (!mem)
67 return;
68
54d04ea8 69 if (!mem->bus.offset && !mem->bus.addr)
c1c440d4 70 return;
eba67093 71
8af8a109
CK
72 if (bdev->funcs->io_mem_free)
73 bdev->funcs->io_mem_free(bdev, mem);
c1c440d4 74
fe662d84
CK
75 mem->bus.offset = 0;
76 mem->bus.addr = NULL;
82c5da6b
JG
77}
78
3bf3710e
TH
79/**
80 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
66907633
TH
81 * @clear: Whether to clear rather than copy.
82 * @num_pages: Number of pages of the operation.
83 * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
3bf3710e
TH
84 * @src_iter: A struct ttm_kmap_iter representing the source resource.
85 *
86 * This function is intended to be able to move out async under a
87 * dma-fence if desired.
88 */
66907633 89void ttm_move_memcpy(bool clear,
3bf3710e
TH
90 u32 num_pages,
91 struct ttm_kmap_iter *dst_iter,
92 struct ttm_kmap_iter *src_iter)
ba4e7d97 93{
3bf3710e
TH
94 const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
95 const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
7938f421 96 struct iosys_map src_map, dst_map;
3bf3710e 97 pgoff_t i;
ba4e7d97 98
3bf3710e
TH
99 /* Single TTM move. NOP */
100 if (dst_ops->maps_tt && src_ops->maps_tt)
101 return;
ebb21aa1 102
3bf3710e 103 /* Don't move nonexistent data. Clear destination instead. */
66907633 104 if (clear) {
3bf3710e
TH
105 for (i = 0; i < num_pages; ++i) {
106 dst_ops->map_local(dst_iter, &dst_map, i);
107 if (dst_map.is_iomem)
108 memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
109 else
110 memset(dst_map.vaddr, 0, PAGE_SIZE);
111 if (dst_ops->unmap_local)
112 dst_ops->unmap_local(dst_iter, &dst_map);
82c5da6b 113 }
3bf3710e 114 return;
ba4e7d97 115 }
ba4e7d97 116
3bf3710e
TH
117 for (i = 0; i < num_pages; ++i) {
118 dst_ops->map_local(dst_iter, &dst_map, i);
119 src_ops->map_local(src_iter, &src_map, i);
120
053c5769
TH
121 drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
122
3bf3710e
TH
123 if (src_ops->unmap_local)
124 src_ops->unmap_local(src_iter, &src_map);
125 if (dst_ops->unmap_local)
126 dst_ops->unmap_local(dst_iter, &dst_map);
127 }
ba4e7d97 128}
3bf3710e 129EXPORT_SYMBOL(ttm_move_memcpy);
ba4e7d97
TH
130
131int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
3e98d829 132 struct ttm_operation_ctx *ctx,
3bf3710e 133 struct ttm_resource *dst_mem)
ba4e7d97 134{
8af8a109 135 struct ttm_device *bdev = bo->bdev;
3bf3710e
TH
136 struct ttm_resource_manager *dst_man =
137 ttm_manager_type(bo->bdev, dst_mem->mem_type);
ba4e7d97 138 struct ttm_tt *ttm = bo->ttm;
3bf3710e
TH
139 struct ttm_resource *src_mem = bo->resource;
140 struct ttm_resource_manager *src_man =
141 ttm_manager_type(bdev, src_mem->mem_type);
3bf3710e
TH
142 union {
143 struct ttm_kmap_iter_tt tt;
144 struct ttm_kmap_iter_linear_io io;
145 } _dst_iter, _src_iter;
146 struct ttm_kmap_iter *dst_iter, *src_iter;
66907633 147 bool clear;
3bf3710e
TH
148 int ret = 0;
149
43d46f0b 150 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
3bf3710e 151 dst_man->use_tt)) {
0a667b50 152 ret = ttm_tt_populate(bdev, ttm, ctx);
da95c788 153 if (ret)
3bf3710e 154 return ret;
b1e5f172
JG
155 }
156
3bf3710e
TH
157 dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
158 if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
159 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
160 if (IS_ERR(dst_iter))
161 return PTR_ERR(dst_iter);
162
163 src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
164 if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
165 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
166 if (IS_ERR(src_iter)) {
167 ret = PTR_ERR(src_iter);
168 goto out_src_iter;
bfa3357e 169 }
2ee476f7 170
66907633 171 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
1176d15f 172 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
66907633 173 ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
ba4e7d97 174
3bf3710e 175 if (!src_iter->ops->maps_tt)
efcefc71
TH
176 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
177 ttm_bo_move_sync_cleanup(bo, dst_mem);
178
3bf3710e
TH
179out_src_iter:
180 if (!dst_iter->ops->maps_tt)
181 ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
ba4e7d97 182
ba4e7d97
TH
183 return ret;
184}
185EXPORT_SYMBOL(ttm_bo_move_memcpy);
186
187static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
188{
5452cf44
CK
189 struct ttm_transfer_obj *fbo;
190
191 fbo = container_of(bo, struct ttm_transfer_obj, base);
0db55f9a 192 dma_resv_fini(&fbo->base.base._resv);
f4490759 193 ttm_bo_put(fbo->bo);
5452cf44 194 kfree(fbo);
ba4e7d97
TH
195}
196
197/**
198 * ttm_buffer_object_transfer
199 *
200 * @bo: A pointer to a struct ttm_buffer_object.
201 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
202 * holding the data of @bo with the old placement.
203 *
204 * This is a utility function that may be called after an accelerated move
205 * has been scheduled. A new buffer object is created as a placeholder for
206 * the old data while it's being copied. When that buffer object is idle,
207 * it can be destroyed, releasing the space of the old placement.
208 * Returns:
209 * !0: Failure.
210 */
211
212static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
213 struct ttm_buffer_object **new_obj)
214{
5452cf44 215 struct ttm_transfer_obj *fbo;
5e338405 216 int ret;
ba4e7d97 217
ff7c60c5 218 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
ba4e7d97
TH
219 if (!fbo)
220 return -ENOMEM;
221
5452cf44 222 fbo->base = *bo;
d6e820fc 223
ba4e7d97
TH
224 /**
225 * Fix up members that we shouldn't copy directly:
226 * TODO: Explicit member copy would probably be better here.
227 */
228
8af8a109 229 atomic_inc(&ttm_glob.bo_count);
5452cf44 230 INIT_LIST_HEAD(&fbo->base.ddestroy);
b96f3e7c 231 drm_vma_node_reset(&fbo->base.base.vma_node);
5452cf44 232
5452cf44
CK
233 kref_init(&fbo->base.kref);
234 fbo->base.destroy = &ttm_transfered_destroy;
b73cd1e2 235 fbo->base.pin_count = 0;
5b34406f 236 if (bo->type != ttm_bo_type_sg)
ef383218
CK
237 fbo->base.base.resv = &fbo->base.base._resv;
238
fda8d552
CK
239 if (fbo->base.resource) {
240 ttm_resource_set_bo(fbo->base.resource, &fbo->base);
241 bo->resource = NULL;
d91c411c
ZY
242 ttm_bo_set_bulk_move(&fbo->base, NULL);
243 } else {
244 fbo->base.bulk_move = NULL;
fda8d552
CK
245 }
246
ef383218 247 dma_resv_init(&fbo->base.base._resv);
8c8c0620 248 fbo->base.base.dev = NULL;
ef383218 249 ret = dma_resv_trylock(&fbo->base.base._resv);
5e338405 250 WARN_ON(!ret);
ba4e7d97 251
c8d4c18b
CK
252 ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
253 if (ret) {
254 kfree(fbo);
255 return ret;
256 }
257
258 ttm_bo_get(bo);
259 fbo->bo = bo;
260
b73cd1e2
CK
261 ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
262
5452cf44 263 *new_obj = &fbo->base;
ba4e7d97
TH
264 return 0;
265}
266
867bcecd
CK
267pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
268 pgprot_t tmp)
ba4e7d97 269{
867bcecd
CK
270 struct ttm_resource_manager *man;
271 enum ttm_caching caching;
272
273 man = ttm_manager_type(bo->bdev, res->mem_type);
274 caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
275
3bf3710e 276 return ttm_prot_from_caching(caching, tmp);
ba4e7d97 277}
4bfd75cb 278EXPORT_SYMBOL(ttm_io_prot);
ba4e7d97
TH
279
280static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
82c5da6b
JG
281 unsigned long offset,
282 unsigned long size,
ba4e7d97
TH
283 struct ttm_bo_kmap_obj *map)
284{
d3116756 285 struct ttm_resource *mem = bo->resource;
ba4e7d97 286
d3116756 287 if (bo->resource->bus.addr) {
ba4e7d97 288 map->bo_kmap_type = ttm_bo_map_premapped;
d3116756 289 map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
ba4e7d97 290 } else {
d3116756
CK
291 resource_size_t res = bo->resource->bus.offset + offset;
292
ba4e7d97 293 map->bo_kmap_type = ttm_bo_map_iomap;
ce65b874 294 if (mem->bus.caching == ttm_write_combined)
d3116756 295 map->virtual = ioremap_wc(res, size);
b849bec2
OZ
296#ifdef CONFIG_X86
297 else if (mem->bus.caching == ttm_cached)
d3116756 298 map->virtual = ioremap_cache(res, size);
b849bec2 299#endif
ba4e7d97 300 else
d3116756 301 map->virtual = ioremap(res, size);
ba4e7d97
TH
302 }
303 return (!map->virtual) ? -ENOMEM : 0;
304}
305
306static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
307 unsigned long start_page,
308 unsigned long num_pages,
309 struct ttm_bo_kmap_obj *map)
310{
d3116756 311 struct ttm_resource *mem = bo->resource;
d0cef9fa
RH
312 struct ttm_operation_ctx ctx = {
313 .interruptible = false,
314 .no_wait_gpu = false
315 };
62975d27 316 struct ttm_tt *ttm = bo->ttm;
d0cef9fa 317 pgprot_t prot;
b1e5f172 318 int ret;
ba4e7d97 319
62975d27 320 BUG_ON(!ttm);
b1e5f172 321
0a667b50 322 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
25893a14
CK
323 if (ret)
324 return ret;
b1e5f172 325
ce65b874 326 if (num_pages == 1 && ttm->caching == ttm_cached) {
ba4e7d97
TH
327 /*
328 * We're mapping a single page, and the desired
329 * page protection is consistent with the bo.
330 */
331
332 map->bo_kmap_type = ttm_bo_map_kmap;
b1e5f172 333 map->page = ttm->pages[start_page];
ba4e7d97
TH
334 map->virtual = kmap(map->page);
335 } else {
ba4e7d97
TH
336 /*
337 * We need to use vmap to get the desired page protection
af901ca1 338 * or to make the buffer object look contiguous.
ba4e7d97 339 */
867bcecd 340 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
ba4e7d97
TH
341 map->bo_kmap_type = ttm_bo_map_vmap;
342 map->virtual = vmap(ttm->pages + start_page, num_pages,
343 0, prot);
344 }
345 return (!map->virtual) ? -ENOMEM : 0;
346}
347
348int ttm_bo_kmap(struct ttm_buffer_object *bo,
349 unsigned long start_page, unsigned long num_pages,
350 struct ttm_bo_kmap_obj *map)
351{
82c5da6b 352 unsigned long offset, size;
ba4e7d97 353 int ret;
ba4e7d97 354
ba4e7d97 355 map->virtual = NULL;
82c5da6b 356 map->bo = bo;
d3116756 357 if (num_pages > bo->resource->num_pages)
ba4e7d97 358 return -EINVAL;
d3116756 359 if ((start_page + num_pages) > bo->resource->num_pages)
ba4e7d97 360 return -EINVAL;
02b29caf 361
d3116756 362 ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
ba4e7d97
TH
363 if (ret)
364 return ret;
d3116756 365 if (!bo->resource->bus.is_iomem) {
ba4e7d97
TH
366 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
367 } else {
82c5da6b
JG
368 offset = start_page << PAGE_SHIFT;
369 size = num_pages << PAGE_SHIFT;
370 return ttm_bo_ioremap(bo, offset, size, map);
ba4e7d97
TH
371 }
372}
373EXPORT_SYMBOL(ttm_bo_kmap);
374
375void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
376{
377 if (!map->virtual)
378 return;
379 switch (map->bo_kmap_type) {
380 case ttm_bo_map_iomap:
381 iounmap(map->virtual);
382 break;
383 case ttm_bo_map_vmap:
384 vunmap(map->virtual);
385 break;
386 case ttm_bo_map_kmap:
387 kunmap(map->page);
388 break;
389 case ttm_bo_map_premapped:
390 break;
391 default:
392 BUG();
393 }
d3116756 394 ttm_mem_io_free(map->bo->bdev, map->bo->resource);
ba4e7d97
TH
395 map->virtual = NULL;
396 map->page = NULL;
397}
398EXPORT_SYMBOL(ttm_bo_kunmap);
399
7938f421 400int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
43676605 401{
d3116756 402 struct ttm_resource *mem = bo->resource;
43676605
TZ
403 int ret;
404
405 ret = ttm_mem_io_reserve(bo->bdev, mem);
406 if (ret)
407 return ret;
408
409 if (mem->bus.is_iomem) {
410 void __iomem *vaddr_iomem;
43676605
TZ
411
412 if (mem->bus.addr)
413 vaddr_iomem = (void __iomem *)mem->bus.addr;
414 else if (mem->bus.caching == ttm_write_combined)
e11bfb99
CK
415 vaddr_iomem = ioremap_wc(mem->bus.offset,
416 bo->base.size);
b849bec2
OZ
417#ifdef CONFIG_X86
418 else if (mem->bus.caching == ttm_cached)
419 vaddr_iomem = ioremap_cache(mem->bus.offset,
420 bo->base.size);
421#endif
43676605 422 else
e11bfb99 423 vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
43676605
TZ
424
425 if (!vaddr_iomem)
426 return -ENOMEM;
427
7938f421 428 iosys_map_set_vaddr_iomem(map, vaddr_iomem);
43676605
TZ
429
430 } else {
431 struct ttm_operation_ctx ctx = {
432 .interruptible = false,
433 .no_wait_gpu = false
434 };
435 struct ttm_tt *ttm = bo->ttm;
436 pgprot_t prot;
437 void *vaddr;
438
439 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
440 if (ret)
441 return ret;
442
443 /*
444 * We need to use vmap to get the desired page protection
445 * or to make the buffer object look contiguous.
446 */
447 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
e11bfb99 448 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
43676605
TZ
449 if (!vaddr)
450 return -ENOMEM;
451
7938f421 452 iosys_map_set_vaddr(map, vaddr);
43676605
TZ
453 }
454
455 return 0;
456}
457EXPORT_SYMBOL(ttm_bo_vmap);
458
7938f421 459void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
43676605 460{
d3116756 461 struct ttm_resource *mem = bo->resource;
43676605 462
7938f421 463 if (iosys_map_is_null(map))
43676605
TZ
464 return;
465
466 if (!map->is_iomem)
467 vunmap(map->vaddr);
468 else if (!mem->bus.addr)
469 iounmap(map->vaddr_iomem);
7938f421 470 iosys_map_clear(map);
43676605 471
d3116756 472 ttm_mem_io_free(bo->bdev, bo->resource);
43676605
TZ
473}
474EXPORT_SYMBOL(ttm_bo_vunmap);
475
92afce90
DA
476static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
477 bool dst_use_tt)
478{
479 int ret;
480 ret = ttm_bo_wait(bo, false, false);
481 if (ret)
482 return ret;
483
484 if (!dst_use_tt)
485 ttm_bo_tt_destroy(bo);
bfa3357e 486 ttm_resource_free(bo, &bo->resource);
92afce90
DA
487 return 0;
488}
489
13a8f46d
DA
490static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
491 struct dma_fence *fence,
492 bool dst_use_tt)
493{
494 struct ttm_buffer_object *ghost_obj;
495 int ret;
496
497 /**
498 * This should help pipeline ordinary buffer moves.
499 *
500 * Hang old buffer memory on a new buffer object,
501 * and leave it to be released when the GPU
502 * operation has completed.
503 */
504
13a8f46d
DA
505 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
506 if (ret)
507 return ret;
508
73511edf 509 dma_resv_add_fence(&ghost_obj->base._resv, fence,
b29895e1 510 DMA_RESV_USAGE_KERNEL);
13a8f46d
DA
511
512 /**
513 * If we're not moving to fixed memory, the TTM object
514 * needs to stay alive. Otherwhise hang it on the ghost
515 * bo to be unbound and destroyed.
516 */
517
518 if (dst_use_tt)
519 ghost_obj->ttm = NULL;
520 else
521 bo->ttm = NULL;
522
523 dma_resv_unlock(&ghost_obj->base._resv);
524 ttm_bo_put(ghost_obj);
525 return 0;
526}
527
e46f468f
DA
528static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
529 struct dma_fence *fence)
ba4e7d97 530{
8af8a109 531 struct ttm_device *bdev = bo->bdev;
d3116756
CK
532 struct ttm_resource_manager *from;
533
534 from = ttm_manager_type(bdev, bo->resource->mem_type);
ba4e7d97 535
e46f468f
DA
536 /**
537 * BO doesn't have a TTM we need to bind/unbind. Just remember
538 * this eviction and free up the allocation
539 */
540 spin_lock(&from->move_lock);
541 if (!from->move || dma_fence_is_later(fence, from->move)) {
542 dma_fence_put(from->move);
543 from->move = dma_fence_get(fence);
544 }
545 spin_unlock(&from->move_lock);
ba4e7d97 546
bfa3357e 547 ttm_resource_free(bo, &bo->resource);
ba4e7d97 548}
3ddf4ad9 549
e46f468f
DA
550int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
551 struct dma_fence *fence,
552 bool evict,
553 bool pipeline,
554 struct ttm_resource *new_mem)
3ddf4ad9 555{
8af8a109 556 struct ttm_device *bdev = bo->bdev;
d3116756 557 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
e46f468f
DA
558 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
559 int ret = 0;
3ddf4ad9 560
b29895e1 561 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
e46f468f
DA
562 if (!evict)
563 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
564 else if (!from->use_tt && pipeline)
565 ttm_bo_move_pipeline_evict(bo, fence);
566 else
567 ret = ttm_bo_wait_free_node(bo, man->use_tt);
3ddf4ad9 568
e46f468f
DA
569 if (ret)
570 return ret;
3ddf4ad9 571
2ee476f7 572 ttm_bo_assign_mem(bo, new_mem);
3ddf4ad9
CK
573
574 return 0;
575}
e46f468f 576EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
5d951098 577
c6346218
MA
578void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
579 struct ttm_resource *new_mem)
580{
581 struct ttm_device *bdev = bo->bdev;
582 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
583 int ret;
584
585 ret = ttm_bo_wait_free_node(bo, man->use_tt);
586 if (WARN_ON(ret))
587 return;
588
589 ttm_bo_assign_mem(bo, new_mem);
590}
591EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
592
a3be8cd7
TH
593/**
594 * ttm_bo_pipeline_gutting - purge the contents of a bo
595 * @bo: The buffer object
596 *
597 * Purge the contents of a bo, async if the bo is not idle.
598 * After a successful call, the bo is left unpopulated in
599 * system placement. The function may wait uninterruptible
600 * for idle on OOM.
601 *
602 * Return: 0 if successful, negative error code on failure.
603 */
5d951098
CK
604int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
605{
d79025c7 606 static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
5d951098 607 struct ttm_buffer_object *ghost;
fc357bc8 608 struct ttm_resource *sys_res;
a3be8cd7 609 struct ttm_tt *ttm;
5d951098
CK
610 int ret;
611
fc357bc8
CK
612 ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
613 if (ret)
614 return ret;
615
a3be8cd7
TH
616 /* If already idle, no need for ghost object dance. */
617 ret = ttm_bo_wait(bo, false, true);
618 if (ret != -EBUSY) {
619 if (!bo->ttm) {
620 /* See comment below about clearing. */
621 ret = ttm_tt_create(bo, true);
622 if (ret)
fc357bc8 623 goto error_free_sys_mem;
a3be8cd7
TH
624 } else {
625 ttm_tt_unpopulate(bo->bdev, bo->ttm);
626 if (bo->type == ttm_bo_type_device)
627 ttm_tt_mark_for_clear(bo->ttm);
628 }
629 ttm_resource_free(bo, &bo->resource);
fc357bc8
CK
630 ttm_bo_assign_mem(bo, sys_res);
631 return 0;
a3be8cd7
TH
632 }
633
634 /*
635 * We need an unpopulated ttm_tt after giving our current one,
636 * if any, to the ghost object. And we can't afford to fail
637 * creating one *after* the operation. If the bo subsequently gets
638 * resurrected, make sure it's cleared (if ttm_bo_type_device)
639 * to avoid leaking sensitive information to user-space.
640 */
641
642 ttm = bo->ttm;
643 bo->ttm = NULL;
644 ret = ttm_tt_create(bo, true);
645 swap(bo->ttm, ttm);
5d951098 646 if (ret)
fc357bc8 647 goto error_free_sys_mem;
5d951098 648
a3be8cd7 649 ret = ttm_buffer_object_transfer(bo, &ghost);
fc357bc8
CK
650 if (ret)
651 goto error_destroy_tt;
a3be8cd7 652
ef383218 653 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
5d951098
CK
654 /* Last resort, wait for the BO to be idle when we are OOM */
655 if (ret)
656 ttm_bo_wait(bo, false, false);
657
ef383218 658 dma_resv_unlock(&ghost->base._resv);
f4490759 659 ttm_bo_put(ghost);
a3be8cd7 660 bo->ttm = ttm;
fc357bc8
CK
661 ttm_bo_assign_mem(bo, sys_res);
662 return 0;
663
664error_destroy_tt:
665 ttm_tt_destroy(bo->bdev, ttm);
5d951098 666
fc357bc8
CK
667error_free_sys_mem:
668 ttm_resource_free(bo, &sys_res);
669 return ret;
5d951098 670}