Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
CommitLineData
1297bf2e 1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
ba4e7d97
TH
2/**************************************************************************
3 *
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
70d645de 31#include <linux/swap.h>
59437c79
TZ
32#include <linux/vmalloc.h>
33
a3185f91 34#include <drm/ttm/ttm_bo.h>
760285e7 35#include <drm/ttm/ttm_placement.h>
a3185f91
CK
36#include <drm/ttm/ttm_tt.h>
37
053c5769 38#include <drm/drm_cache.h>
ba4e7d97 39
5452cf44
CK
40struct ttm_transfer_obj {
41 struct ttm_buffer_object base;
42 struct ttm_buffer_object *bo;
43};
44
8af8a109 45int ttm_mem_io_reserve(struct ttm_device *bdev,
2966141a 46 struct ttm_resource *mem)
eba67093 47{
54d04ea8 48 if (mem->bus.offset || mem->bus.addr)
c1c440d4 49 return 0;
eba67093 50
fe662d84 51 mem->bus.is_iomem = false;
8af8a109 52 if (!bdev->funcs->io_mem_reserve)
eba67093 53 return 0;
eba67093 54
8af8a109 55 return bdev->funcs->io_mem_reserve(bdev, mem);
eba67093
TH
56}
57
8af8a109 58void ttm_mem_io_free(struct ttm_device *bdev,
2966141a 59 struct ttm_resource *mem)
eba67093 60{
2dbd9c27
PS
61 if (!mem)
62 return;
63
54d04ea8 64 if (!mem->bus.offset && !mem->bus.addr)
c1c440d4 65 return;
eba67093 66
8af8a109
CK
67 if (bdev->funcs->io_mem_free)
68 bdev->funcs->io_mem_free(bdev, mem);
c1c440d4 69
fe662d84
CK
70 mem->bus.offset = 0;
71 mem->bus.addr = NULL;
82c5da6b
JG
72}
73
3bf3710e
TH
74/**
75 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
66907633
TH
76 * @clear: Whether to clear rather than copy.
77 * @num_pages: Number of pages of the operation.
78 * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
3bf3710e
TH
79 * @src_iter: A struct ttm_kmap_iter representing the source resource.
80 *
81 * This function is intended to be able to move out async under a
82 * dma-fence if desired.
83 */
66907633 84void ttm_move_memcpy(bool clear,
3bf3710e
TH
85 u32 num_pages,
86 struct ttm_kmap_iter *dst_iter,
87 struct ttm_kmap_iter *src_iter)
ba4e7d97 88{
3bf3710e
TH
89 const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
90 const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
7938f421 91 struct iosys_map src_map, dst_map;
3bf3710e 92 pgoff_t i;
ba4e7d97 93
3bf3710e
TH
94 /* Single TTM move. NOP */
95 if (dst_ops->maps_tt && src_ops->maps_tt)
96 return;
ebb21aa1 97
3bf3710e 98 /* Don't move nonexistent data. Clear destination instead. */
66907633 99 if (clear) {
3bf3710e
TH
100 for (i = 0; i < num_pages; ++i) {
101 dst_ops->map_local(dst_iter, &dst_map, i);
102 if (dst_map.is_iomem)
103 memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
104 else
105 memset(dst_map.vaddr, 0, PAGE_SIZE);
106 if (dst_ops->unmap_local)
107 dst_ops->unmap_local(dst_iter, &dst_map);
82c5da6b 108 }
3bf3710e 109 return;
ba4e7d97 110 }
ba4e7d97 111
3bf3710e
TH
112 for (i = 0; i < num_pages; ++i) {
113 dst_ops->map_local(dst_iter, &dst_map, i);
114 src_ops->map_local(src_iter, &src_map, i);
115
053c5769
TH
116 drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
117
3bf3710e
TH
118 if (src_ops->unmap_local)
119 src_ops->unmap_local(src_iter, &src_map);
120 if (dst_ops->unmap_local)
121 dst_ops->unmap_local(dst_iter, &dst_map);
122 }
ba4e7d97 123}
3bf3710e 124EXPORT_SYMBOL(ttm_move_memcpy);
ba4e7d97 125
a3185f91
CK
126/**
127 * ttm_bo_move_memcpy
128 *
129 * @bo: A pointer to a struct ttm_buffer_object.
d3e83448
CK
130 * @ctx: operation context
131 * @dst_mem: struct ttm_resource indicating where to move.
a3185f91
CK
132 *
133 * Fallback move function for a mappable buffer object in mappable memory.
134 * The function will, if successful,
135 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
136 * and update the (@bo)->mem placement flags. If unsuccessful, the old
137 * data remains untouched, and it's up to the caller to free the
138 * memory space indicated by @new_mem.
139 * Returns:
140 * !0: Failure.
141 */
ba4e7d97 142int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
3e98d829 143 struct ttm_operation_ctx *ctx,
3bf3710e 144 struct ttm_resource *dst_mem)
ba4e7d97 145{
8af8a109 146 struct ttm_device *bdev = bo->bdev;
3bf3710e
TH
147 struct ttm_resource_manager *dst_man =
148 ttm_manager_type(bo->bdev, dst_mem->mem_type);
ba4e7d97 149 struct ttm_tt *ttm = bo->ttm;
3bf3710e 150 struct ttm_resource *src_mem = bo->resource;
4d8f6854 151 struct ttm_resource_manager *src_man;
3bf3710e
TH
152 union {
153 struct ttm_kmap_iter_tt tt;
154 struct ttm_kmap_iter_linear_io io;
155 } _dst_iter, _src_iter;
156 struct ttm_kmap_iter *dst_iter, *src_iter;
66907633 157 bool clear;
3bf3710e
TH
158 int ret = 0;
159
c604d319
MA
160 if (WARN_ON(!src_mem))
161 return -EINVAL;
4d8f6854
CK
162
163 src_man = ttm_manager_type(bdev, src_mem->mem_type);
43d46f0b 164 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
3bf3710e 165 dst_man->use_tt)) {
fc5d9667 166 ret = ttm_bo_populate(bo, ctx);
da95c788 167 if (ret)
3bf3710e 168 return ret;
b1e5f172
JG
169 }
170
3bf3710e
TH
171 dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
172 if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
173 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
174 if (IS_ERR(dst_iter))
175 return PTR_ERR(dst_iter);
176
177 src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
178 if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
179 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
180 if (IS_ERR(src_iter)) {
181 ret = PTR_ERR(src_iter);
182 goto out_src_iter;
bfa3357e 183 }
2ee476f7 184
66907633 185 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
1176d15f 186 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
040b35c1 187 ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
ba4e7d97 188
3bf3710e 189 if (!src_iter->ops->maps_tt)
efcefc71
TH
190 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
191 ttm_bo_move_sync_cleanup(bo, dst_mem);
192
3bf3710e
TH
193out_src_iter:
194 if (!dst_iter->ops->maps_tt)
195 ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
ba4e7d97 196
ba4e7d97
TH
197 return ret;
198}
199EXPORT_SYMBOL(ttm_bo_move_memcpy);
200
201static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
202{
5452cf44
CK
203 struct ttm_transfer_obj *fbo;
204
205 fbo = container_of(bo, struct ttm_transfer_obj, base);
0db55f9a 206 dma_resv_fini(&fbo->base.base._resv);
f4490759 207 ttm_bo_put(fbo->bo);
5452cf44 208 kfree(fbo);
ba4e7d97
TH
209}
210
211/**
212 * ttm_buffer_object_transfer
213 *
214 * @bo: A pointer to a struct ttm_buffer_object.
215 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
216 * holding the data of @bo with the old placement.
217 *
218 * This is a utility function that may be called after an accelerated move
219 * has been scheduled. A new buffer object is created as a placeholder for
220 * the old data while it's being copied. When that buffer object is idle,
221 * it can be destroyed, releasing the space of the old placement.
222 * Returns:
223 * !0: Failure.
224 */
225
226static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
227 struct ttm_buffer_object **new_obj)
228{
5452cf44 229 struct ttm_transfer_obj *fbo;
5e338405 230 int ret;
ba4e7d97 231
ff7c60c5 232 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
ba4e7d97
TH
233 if (!fbo)
234 return -ENOMEM;
235
5452cf44 236 fbo->base = *bo;
d6e820fc 237
ba4e7d97
TH
238 /**
239 * Fix up members that we shouldn't copy directly:
240 * TODO: Explicit member copy would probably be better here.
241 */
242
8af8a109 243 atomic_inc(&ttm_glob.bo_count);
b96f3e7c 244 drm_vma_node_reset(&fbo->base.base.vma_node);
5452cf44 245
5452cf44
CK
246 kref_init(&fbo->base.kref);
247 fbo->base.destroy = &ttm_transfered_destroy;
b73cd1e2 248 fbo->base.pin_count = 0;
5b34406f 249 if (bo->type != ttm_bo_type_sg)
ef383218
CK
250 fbo->base.base.resv = &fbo->base.base._resv;
251
e3d3fd1c
CK
252 dma_resv_init(&fbo->base.base._resv);
253 fbo->base.base.dev = NULL;
254 ret = dma_resv_trylock(&fbo->base.base._resv);
255 WARN_ON(!ret);
256
97e000ac
CK
257 ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
258 if (ret) {
259 dma_resv_unlock(&fbo->base.base._resv);
260 kfree(fbo);
261 return ret;
262 }
263
fda8d552
CK
264 if (fbo->base.resource) {
265 ttm_resource_set_bo(fbo->base.resource, &fbo->base);
266 bo->resource = NULL;
d91c411c
ZY
267 ttm_bo_set_bulk_move(&fbo->base, NULL);
268 } else {
269 fbo->base.bulk_move = NULL;
fda8d552
CK
270 }
271
c8d4c18b
CK
272 ttm_bo_get(bo);
273 fbo->bo = bo;
274
b73cd1e2
CK
275 ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
276
5452cf44 277 *new_obj = &fbo->base;
ba4e7d97
TH
278 return 0;
279}
280
a3185f91
CK
281/**
282 * ttm_io_prot
283 *
d3e83448
CK
284 * @bo: ttm buffer object
285 * @res: ttm resource object
a3185f91
CK
286 * @tmp: Page protection flag for a normal, cached mapping.
287 *
288 * Utility function that returns the pgprot_t that should be used for
289 * setting up a PTE with the caching model indicated by @c_state.
290 */
867bcecd
CK
291pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
292 pgprot_t tmp)
ba4e7d97 293{
867bcecd
CK
294 struct ttm_resource_manager *man;
295 enum ttm_caching caching;
296
297 man = ttm_manager_type(bo->bdev, res->mem_type);
71ce0463
ZR
298 if (man->use_tt) {
299 caching = bo->ttm->caching;
300 if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
301 tmp = pgprot_decrypted(tmp);
302 } else {
303 caching = res->bus.caching;
304 }
867bcecd 305
3bf3710e 306 return ttm_prot_from_caching(caching, tmp);
ba4e7d97 307}
4bfd75cb 308EXPORT_SYMBOL(ttm_io_prot);
ba4e7d97
TH
309
310static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
82c5da6b
JG
311 unsigned long offset,
312 unsigned long size,
ba4e7d97
TH
313 struct ttm_bo_kmap_obj *map)
314{
d3116756 315 struct ttm_resource *mem = bo->resource;
ba4e7d97 316
d3116756 317 if (bo->resource->bus.addr) {
ba4e7d97 318 map->bo_kmap_type = ttm_bo_map_premapped;
d3116756 319 map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
ba4e7d97 320 } else {
d3116756
CK
321 resource_size_t res = bo->resource->bus.offset + offset;
322
ba4e7d97 323 map->bo_kmap_type = ttm_bo_map_iomap;
ce65b874 324 if (mem->bus.caching == ttm_write_combined)
d3116756 325 map->virtual = ioremap_wc(res, size);
b849bec2
OZ
326#ifdef CONFIG_X86
327 else if (mem->bus.caching == ttm_cached)
d3116756 328 map->virtual = ioremap_cache(res, size);
b849bec2 329#endif
ba4e7d97 330 else
d3116756 331 map->virtual = ioremap(res, size);
ba4e7d97
TH
332 }
333 return (!map->virtual) ? -ENOMEM : 0;
334}
335
336static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
337 unsigned long start_page,
338 unsigned long num_pages,
339 struct ttm_bo_kmap_obj *map)
340{
d3116756 341 struct ttm_resource *mem = bo->resource;
d0cef9fa
RH
342 struct ttm_operation_ctx ctx = {
343 .interruptible = false,
344 .no_wait_gpu = false
345 };
62975d27 346 struct ttm_tt *ttm = bo->ttm;
71ce0463
ZR
347 struct ttm_resource_manager *man =
348 ttm_manager_type(bo->bdev, bo->resource->mem_type);
d0cef9fa 349 pgprot_t prot;
b1e5f172 350 int ret;
ba4e7d97 351
62975d27 352 BUG_ON(!ttm);
b1e5f172 353
fc5d9667 354 ret = ttm_bo_populate(bo, &ctx);
25893a14
CK
355 if (ret)
356 return ret;
b1e5f172 357
71ce0463
ZR
358 if (num_pages == 1 && ttm->caching == ttm_cached &&
359 !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
ba4e7d97
TH
360 /*
361 * We're mapping a single page, and the desired
362 * page protection is consistent with the bo.
363 */
364
365 map->bo_kmap_type = ttm_bo_map_kmap;
b1e5f172 366 map->page = ttm->pages[start_page];
ba4e7d97
TH
367 map->virtual = kmap(map->page);
368 } else {
ba4e7d97
TH
369 /*
370 * We need to use vmap to get the desired page protection
af901ca1 371 * or to make the buffer object look contiguous.
ba4e7d97 372 */
867bcecd 373 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
ba4e7d97
TH
374 map->bo_kmap_type = ttm_bo_map_vmap;
375 map->virtual = vmap(ttm->pages + start_page, num_pages,
376 0, prot);
377 }
378 return (!map->virtual) ? -ENOMEM : 0;
379}
380
a3185f91
CK
381/**
382 * ttm_bo_kmap
383 *
384 * @bo: The buffer object.
385 * @start_page: The first page to map.
386 * @num_pages: Number of pages to map.
387 * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
388 *
389 * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
390 * data in the buffer object. The ttm_kmap_obj_virtual function can then be
391 * used to obtain a virtual address to the data.
392 *
393 * Returns
394 * -ENOMEM: Out of memory.
395 * -EINVAL: Invalid range.
396 */
ba4e7d97
TH
397int ttm_bo_kmap(struct ttm_buffer_object *bo,
398 unsigned long start_page, unsigned long num_pages,
399 struct ttm_bo_kmap_obj *map)
400{
82c5da6b 401 unsigned long offset, size;
ba4e7d97 402 int ret;
ba4e7d97 403
ba4e7d97 404 map->virtual = NULL;
82c5da6b 405 map->bo = bo;
e3c92eb4 406 if (num_pages > PFN_UP(bo->resource->size))
ba4e7d97 407 return -EINVAL;
e3c92eb4 408 if ((start_page + num_pages) > PFN_UP(bo->resource->size))
ba4e7d97 409 return -EINVAL;
02b29caf 410
d3116756 411 ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
ba4e7d97
TH
412 if (ret)
413 return ret;
d3116756 414 if (!bo->resource->bus.is_iomem) {
ba4e7d97
TH
415 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
416 } else {
82c5da6b
JG
417 offset = start_page << PAGE_SHIFT;
418 size = num_pages << PAGE_SHIFT;
419 return ttm_bo_ioremap(bo, offset, size, map);
ba4e7d97
TH
420 }
421}
422EXPORT_SYMBOL(ttm_bo_kmap);
423
a3185f91
CK
424/**
425 * ttm_bo_kunmap
426 *
427 * @map: Object describing the map to unmap.
428 *
429 * Unmaps a kernel map set up by ttm_bo_kmap.
430 */
ba4e7d97
TH
431void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
432{
433 if (!map->virtual)
434 return;
435 switch (map->bo_kmap_type) {
436 case ttm_bo_map_iomap:
437 iounmap(map->virtual);
438 break;
439 case ttm_bo_map_vmap:
440 vunmap(map->virtual);
441 break;
442 case ttm_bo_map_kmap:
443 kunmap(map->page);
444 break;
445 case ttm_bo_map_premapped:
446 break;
447 default:
448 BUG();
449 }
d3116756 450 ttm_mem_io_free(map->bo->bdev, map->bo->resource);
ba4e7d97
TH
451 map->virtual = NULL;
452 map->page = NULL;
453}
454EXPORT_SYMBOL(ttm_bo_kunmap);
455
a3185f91
CK
456/**
457 * ttm_bo_vmap
458 *
459 * @bo: The buffer object.
460 * @map: pointer to a struct iosys_map representing the map.
461 *
462 * Sets up a kernel virtual mapping, using ioremap or vmap to the
463 * data in the buffer object. The parameter @map returns the virtual
464 * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
465 *
466 * Returns
467 * -ENOMEM: Out of memory.
468 * -EINVAL: Invalid range.
469 */
7938f421 470int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
43676605 471{
d3116756 472 struct ttm_resource *mem = bo->resource;
43676605
TZ
473 int ret;
474
d6d36cf2
CK
475 dma_resv_assert_held(bo->base.resv);
476
43676605
TZ
477 ret = ttm_mem_io_reserve(bo->bdev, mem);
478 if (ret)
479 return ret;
480
481 if (mem->bus.is_iomem) {
482 void __iomem *vaddr_iomem;
43676605
TZ
483
484 if (mem->bus.addr)
485 vaddr_iomem = (void __iomem *)mem->bus.addr;
486 else if (mem->bus.caching == ttm_write_combined)
e11bfb99
CK
487 vaddr_iomem = ioremap_wc(mem->bus.offset,
488 bo->base.size);
b849bec2
OZ
489#ifdef CONFIG_X86
490 else if (mem->bus.caching == ttm_cached)
491 vaddr_iomem = ioremap_cache(mem->bus.offset,
492 bo->base.size);
493#endif
43676605 494 else
e11bfb99 495 vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
43676605
TZ
496
497 if (!vaddr_iomem)
498 return -ENOMEM;
499
7938f421 500 iosys_map_set_vaddr_iomem(map, vaddr_iomem);
43676605
TZ
501
502 } else {
503 struct ttm_operation_ctx ctx = {
504 .interruptible = false,
505 .no_wait_gpu = false
506 };
507 struct ttm_tt *ttm = bo->ttm;
508 pgprot_t prot;
509 void *vaddr;
510
fc5d9667 511 ret = ttm_bo_populate(bo, &ctx);
43676605
TZ
512 if (ret)
513 return ret;
514
515 /*
516 * We need to use vmap to get the desired page protection
517 * or to make the buffer object look contiguous.
518 */
519 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
e11bfb99 520 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
43676605
TZ
521 if (!vaddr)
522 return -ENOMEM;
523
7938f421 524 iosys_map_set_vaddr(map, vaddr);
43676605
TZ
525 }
526
527 return 0;
528}
529EXPORT_SYMBOL(ttm_bo_vmap);
530
a3185f91
CK
531/**
532 * ttm_bo_vunmap
533 *
534 * @bo: The buffer object.
535 * @map: Object describing the map to unmap.
536 *
537 * Unmaps a kernel map set up by ttm_bo_vmap().
538 */
7938f421 539void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
43676605 540{
d3116756 541 struct ttm_resource *mem = bo->resource;
43676605 542
d6d36cf2
CK
543 dma_resv_assert_held(bo->base.resv);
544
7938f421 545 if (iosys_map_is_null(map))
43676605
TZ
546 return;
547
548 if (!map->is_iomem)
549 vunmap(map->vaddr);
550 else if (!mem->bus.addr)
551 iounmap(map->vaddr_iomem);
7938f421 552 iosys_map_clear(map);
43676605 553
d3116756 554 ttm_mem_io_free(bo->bdev, bo->resource);
43676605
TZ
555}
556EXPORT_SYMBOL(ttm_bo_vunmap);
557
92afce90
DA
558static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
559 bool dst_use_tt)
560{
13acb368
CK
561 long ret;
562
563 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
564 false, 15 * HZ);
565 if (ret == 0)
566 return -EBUSY;
567 if (ret < 0)
92afce90
DA
568 return ret;
569
570 if (!dst_use_tt)
571 ttm_bo_tt_destroy(bo);
bfa3357e 572 ttm_resource_free(bo, &bo->resource);
92afce90
DA
573 return 0;
574}
575
13a8f46d
DA
576static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
577 struct dma_fence *fence,
578 bool dst_use_tt)
579{
580 struct ttm_buffer_object *ghost_obj;
581 int ret;
582
583 /**
584 * This should help pipeline ordinary buffer moves.
585 *
586 * Hang old buffer memory on a new buffer object,
587 * and leave it to be released when the GPU
588 * operation has completed.
589 */
590
13a8f46d
DA
591 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
592 if (ret)
593 return ret;
594
73511edf 595 dma_resv_add_fence(&ghost_obj->base._resv, fence,
b29895e1 596 DMA_RESV_USAGE_KERNEL);
13a8f46d
DA
597
598 /**
599 * If we're not moving to fixed memory, the TTM object
600 * needs to stay alive. Otherwhise hang it on the ghost
601 * bo to be unbound and destroyed.
602 */
603
604 if (dst_use_tt)
605 ghost_obj->ttm = NULL;
606 else
607 bo->ttm = NULL;
608
609 dma_resv_unlock(&ghost_obj->base._resv);
610 ttm_bo_put(ghost_obj);
611 return 0;
612}
613
e46f468f
DA
614static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
615 struct dma_fence *fence)
ba4e7d97 616{
8af8a109 617 struct ttm_device *bdev = bo->bdev;
d3116756
CK
618 struct ttm_resource_manager *from;
619
620 from = ttm_manager_type(bdev, bo->resource->mem_type);
ba4e7d97 621
e46f468f
DA
622 /**
623 * BO doesn't have a TTM we need to bind/unbind. Just remember
624 * this eviction and free up the allocation
625 */
626 spin_lock(&from->move_lock);
627 if (!from->move || dma_fence_is_later(fence, from->move)) {
628 dma_fence_put(from->move);
629 from->move = dma_fence_get(fence);
630 }
631 spin_unlock(&from->move_lock);
ba4e7d97 632
bfa3357e 633 ttm_resource_free(bo, &bo->resource);
ba4e7d97 634}
3ddf4ad9 635
a3185f91 636/**
d3e83448 637 * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
a3185f91
CK
638 *
639 * @bo: A pointer to a struct ttm_buffer_object.
640 * @fence: A fence object that signals when moving is complete.
641 * @evict: This is an evict move. Don't return until the buffer is idle.
642 * @pipeline: evictions are to be pipelined.
643 * @new_mem: struct ttm_resource indicating where to move.
644 *
645 * Accelerated move function to be called when an accelerated move
646 * has been scheduled. The function will create a new temporary buffer object
647 * representing the old placement, and put the sync object on both buffer
648 * objects. After that the newly created buffer object is unref'd to be
649 * destroyed when the move is complete. This will help pipeline
650 * buffer moves.
651 */
e46f468f
DA
652int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
653 struct dma_fence *fence,
654 bool evict,
655 bool pipeline,
656 struct ttm_resource *new_mem)
3ddf4ad9 657{
8af8a109 658 struct ttm_device *bdev = bo->bdev;
d3116756 659 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
e46f468f
DA
660 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
661 int ret = 0;
3ddf4ad9 662
b29895e1 663 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
e46f468f
DA
664 if (!evict)
665 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
666 else if (!from->use_tt && pipeline)
667 ttm_bo_move_pipeline_evict(bo, fence);
668 else
669 ret = ttm_bo_wait_free_node(bo, man->use_tt);
3ddf4ad9 670
e46f468f
DA
671 if (ret)
672 return ret;
3ddf4ad9 673
2ee476f7 674 ttm_bo_assign_mem(bo, new_mem);
3ddf4ad9
CK
675
676 return 0;
677}
e46f468f 678EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
5d951098 679
a3185f91 680/**
d3e83448 681 * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
a3185f91
CK
682 *
683 * @bo: A pointer to a struct ttm_buffer_object.
684 * @new_mem: struct ttm_resource indicating where to move.
685 *
686 * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
687 * by the caller to be idle. Typically used after memcpy buffer moves.
688 */
c6346218
MA
689void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
690 struct ttm_resource *new_mem)
691{
692 struct ttm_device *bdev = bo->bdev;
693 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
694 int ret;
695
696 ret = ttm_bo_wait_free_node(bo, man->use_tt);
697 if (WARN_ON(ret))
698 return;
699
700 ttm_bo_assign_mem(bo, new_mem);
701}
702EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
703
a3be8cd7
TH
704/**
705 * ttm_bo_pipeline_gutting - purge the contents of a bo
706 * @bo: The buffer object
707 *
708 * Purge the contents of a bo, async if the bo is not idle.
709 * After a successful call, the bo is left unpopulated in
710 * system placement. The function may wait uninterruptible
711 * for idle on OOM.
712 *
713 * Return: 0 if successful, negative error code on failure.
714 */
5d951098
CK
715int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
716{
717 struct ttm_buffer_object *ghost;
a3be8cd7 718 struct ttm_tt *ttm;
5d951098
CK
719 int ret;
720
a3be8cd7 721 /* If already idle, no need for ghost object dance. */
13acb368 722 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
a3be8cd7
TH
723 if (!bo->ttm) {
724 /* See comment below about clearing. */
725 ret = ttm_tt_create(bo, true);
726 if (ret)
c7ea16f6 727 return ret;
a3be8cd7
TH
728 } else {
729 ttm_tt_unpopulate(bo->bdev, bo->ttm);
730 if (bo->type == ttm_bo_type_device)
731 ttm_tt_mark_for_clear(bo->ttm);
732 }
733 ttm_resource_free(bo, &bo->resource);
fc357bc8 734 return 0;
a3be8cd7
TH
735 }
736
737 /*
738 * We need an unpopulated ttm_tt after giving our current one,
739 * if any, to the ghost object. And we can't afford to fail
740 * creating one *after* the operation. If the bo subsequently gets
741 * resurrected, make sure it's cleared (if ttm_bo_type_device)
742 * to avoid leaking sensitive information to user-space.
743 */
744
745 ttm = bo->ttm;
746 bo->ttm = NULL;
747 ret = ttm_tt_create(bo, true);
748 swap(bo->ttm, ttm);
5d951098 749 if (ret)
c7ea16f6 750 return ret;
5d951098 751
a3be8cd7 752 ret = ttm_buffer_object_transfer(bo, &ghost);
fc357bc8
CK
753 if (ret)
754 goto error_destroy_tt;
a3be8cd7 755
ef383218 756 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
5d951098 757 /* Last resort, wait for the BO to be idle when we are OOM */
13acb368
CK
758 if (ret) {
759 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
760 false, MAX_SCHEDULE_TIMEOUT);
761 }
5d951098 762
ef383218 763 dma_resv_unlock(&ghost->base._resv);
f4490759 764 ttm_bo_put(ghost);
a3be8cd7 765 bo->ttm = ttm;
fc357bc8
CK
766 return 0;
767
768error_destroy_tt:
769 ttm_tt_destroy(bo->bdev, ttm);
fc357bc8 770 return ret;
5d951098 771}
da966b82 772
f3bcfd04 773static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
da966b82
TH
774 struct ttm_buffer_object *bo,
775 bool *needs_unlock)
776{
da966b82
TH
777 *needs_unlock = false;
778
779 if (dma_resv_trylock(bo->base.resv)) {
780 *needs_unlock = true;
781 return true;
782 }
783
784 if (bo->base.resv == ctx->resv && ctx->allow_res_evict) {
785 dma_resv_assert_held(bo->base.resv);
786 return true;
787 }
788
789 return false;
790}
791
792static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
793 struct ttm_buffer_object *bo,
794 bool *needs_unlock)
795{
796 struct dma_resv *resv = bo->base.resv;
797 int ret;
798
799 if (walk->ctx->interruptible)
800 ret = dma_resv_lock_interruptible(resv, walk->ticket);
801 else
802 ret = dma_resv_lock(resv, walk->ticket);
803
804 if (!ret) {
805 *needs_unlock = true;
806 /*
807 * Only a single ticketlock per loop. Ticketlocks are prone
808 * to return -EDEADLK causing the eviction to fail, so
809 * after waiting for the ticketlock, revert back to
810 * trylocking for this walk.
811 */
812 walk->ticket = NULL;
813 } else if (ret == -EDEADLK) {
814 /* Caller needs to exit the ww transaction. */
815 ret = -ENOSPC;
816 }
817
818 return ret;
819}
820
821static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
822{
823 if (locked)
824 dma_resv_unlock(bo->base.resv);
825}
826
827/**
828 * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
829 * valid items.
830 * @walk: describe the walks and actions taken
831 * @bdev: The TTM device.
832 * @man: The struct ttm_resource manager whose LRU lists we're walking.
833 * @target: The end condition for the walk.
834 *
835 * The LRU lists of @man are walk, and for each struct ttm_resource encountered,
836 * the corresponding ttm_buffer_object is locked and taken a reference on, and
837 * the LRU lock is dropped. the LRU lock may be dropped before locking and, in
838 * that case, it's verified that the item actually remains on the LRU list after
839 * the lock, and that the buffer object didn't switch resource in between.
840 *
841 * With a locked object, the actions indicated by @walk->process_bo are
842 * performed, and after that, the bo is unlocked, the refcount dropped and the
843 * next struct ttm_resource is processed. Here, the walker relies on
844 * TTM's restartable LRU list implementation.
845 *
846 * Typically @walk->process_bo() would return the number of pages evicted,
847 * swapped or shrunken, so that when the total exceeds @target, or when the
848 * LRU list has been walked in full, iteration is terminated. It's also terminated
849 * on error. Note that the definition of @target is done by the caller, it
850 * could have a different meaning than the number of pages.
851 *
852 * Note that the way dma_resv individualization is done, locking needs to be done
853 * either with the LRU lock held (trylocking only) or with a reference on the
854 * object.
855 *
856 * Return: The progress made towards target or negative error code on error.
857 */
858s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
859 struct ttm_resource_manager *man, s64 target)
860{
861 struct ttm_resource_cursor cursor;
862 struct ttm_resource *res;
863 s64 progress = 0;
864 s64 lret;
865
866 spin_lock(&bdev->lru_lock);
1f463794
TH
867 ttm_resource_cursor_init(&cursor, man);
868 ttm_resource_manager_for_each_res(&cursor, res) {
da966b82
TH
869 struct ttm_buffer_object *bo = res->bo;
870 bool bo_needs_unlock = false;
871 bool bo_locked = false;
872 int mem_type;
873
874 /*
875 * Attempt a trylock before taking a reference on the bo,
876 * since if we do it the other way around, and the trylock fails,
877 * we need to drop the lru lock to put the bo.
878 */
f3bcfd04 879 if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock))
da966b82
TH
880 bo_locked = true;
881 else if (!walk->ticket || walk->ctx->no_wait_gpu ||
882 walk->trylock_only)
883 continue;
884
885 if (!ttm_bo_get_unless_zero(bo)) {
886 ttm_lru_walk_unlock(bo, bo_needs_unlock);
887 continue;
888 }
889
890 mem_type = res->mem_type;
891 spin_unlock(&bdev->lru_lock);
892
893 lret = 0;
894 if (!bo_locked)
895 lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock);
896
897 /*
898 * Note that in between the release of the lru lock and the
899 * ticketlock, the bo may have switched resource,
900 * and also memory type, since the resource may have been
901 * freed and allocated again with a different memory type.
902 * In that case, just skip it.
903 */
904 if (!lret && bo->resource && bo->resource->mem_type == mem_type)
905 lret = walk->ops->process_bo(walk, bo);
906
907 ttm_lru_walk_unlock(bo, bo_needs_unlock);
908 ttm_bo_put(bo);
909 if (lret == -EBUSY || lret == -EALREADY)
910 lret = 0;
911 progress = (lret < 0) ? lret : progress + lret;
912
913 spin_lock(&bdev->lru_lock);
914 if (progress < 0 || progress >= target)
915 break;
916 }
10efe34d 917 ttm_resource_cursor_fini(&cursor);
da966b82
TH
918 spin_unlock(&bdev->lru_lock);
919
920 return progress;
921}
f3bcfd04
TH
922EXPORT_SYMBOL(ttm_lru_walk_for_evict);
923
924static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs)
925{
926 struct ttm_buffer_object *bo = curs->bo;
927
928 if (bo) {
929 if (curs->needs_unlock)
930 dma_resv_unlock(bo->base.resv);
931 ttm_bo_put(bo);
932 curs->bo = NULL;
933 }
934}
935
936/**
937 * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor
938 * and clean up any iteration it was used for.
939 * @curs: The cursor.
940 */
941void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs)
942{
943 spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
944
945 ttm_bo_lru_cursor_cleanup_bo(curs);
946 spin_lock(lru_lock);
947 ttm_resource_cursor_fini(&curs->res_curs);
948 spin_unlock(lru_lock);
949}
950EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);
951
952/**
953 * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
954 * @curs: The ttm_bo_lru_cursor to initialize.
955 * @man: The ttm resource_manager whose LRU lists to iterate over.
956 * @ctx: The ttm_operation_ctx to govern the locking.
957 *
958 * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking
959 * or prelocked buffer objects are available as detailed by
960 * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not
961 * supported.
962 *
963 * Return: Pointer to @curs. The function does not fail.
964 */
965struct ttm_bo_lru_cursor *
966ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
967 struct ttm_resource_manager *man,
968 struct ttm_operation_ctx *ctx)
969{
970 memset(curs, 0, sizeof(*curs));
971 ttm_resource_cursor_init(&curs->res_curs, man);
972 curs->ctx = ctx;
973
974 return curs;
975}
976EXPORT_SYMBOL(ttm_bo_lru_cursor_init);
977
978static struct ttm_buffer_object *
979ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs)
980{
981 struct ttm_buffer_object *bo = res->bo;
982
983 if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock))
984 return NULL;
985
986 if (!ttm_bo_get_unless_zero(bo)) {
987 if (curs->needs_unlock)
988 dma_resv_unlock(bo->base.resv);
989 return NULL;
990 }
991
992 curs->bo = bo;
993 return bo;
994}
995
996/**
997 * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists
998 * to find and lock buffer object.
999 * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and
1000 * ttm_bo_lru_cursor_first().
1001 *
1002 * Return: A pointer to a locked and reference-counted buffer object,
1003 * or NULL if none could be found and looping should be terminated.
1004 */
1005struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
1006{
1007 spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
1008 struct ttm_resource *res = NULL;
1009 struct ttm_buffer_object *bo;
1010
1011 ttm_bo_lru_cursor_cleanup_bo(curs);
1012
1013 spin_lock(lru_lock);
1014 for (;;) {
1015 res = ttm_resource_manager_next(&curs->res_curs);
1016 if (!res)
1017 break;
1018
1019 bo = ttm_bo_from_res_reserved(res, curs);
1020 if (bo)
1021 break;
1022 }
1023
1024 spin_unlock(lru_lock);
1025 return res ? bo : NULL;
1026}
1027EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
1028
1029/**
1030 * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists
1031 * to find and lock buffer object.
1032 * @curs: The cursor initialized using ttm_bo_lru_cursor_init().
1033 *
1034 * Return: A pointer to a locked and reference-counted buffer object,
1035 * or NULL if none could be found and looping should be terminated.
1036 */
1037struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
1038{
1039 spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
1040 struct ttm_buffer_object *bo;
1041 struct ttm_resource *res;
1042
1043 spin_lock(lru_lock);
1044 res = ttm_resource_manager_first(&curs->res_curs);
1045 if (!res) {
1046 spin_unlock(lru_lock);
1047 return NULL;
1048 }
1049
1050 bo = ttm_bo_from_res_reserved(res, curs);
1051 spin_unlock(lru_lock);
1052
1053 return bo ? bo : ttm_bo_lru_cursor_next(curs);
1054}
1055EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
70d645de
TH
1056
1057/**
1058 * ttm_bo_shrink() - Helper to shrink a ttm buffer object.
1059 * @ctx: The struct ttm_operation_ctx used for the shrinking operation.
1060 * @bo: The buffer object.
1061 * @flags: Flags governing the shrinking behaviour.
1062 *
1063 * The function uses the ttm_tt_back_up functionality to back up or
1064 * purge a struct ttm_tt. If the bo is not in system, it's first
1065 * moved there.
1066 *
1067 * Return: The number of pages shrunken or purged, or
1068 * negative error code on failure.
1069 */
1070long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
1071 const struct ttm_bo_shrink_flags flags)
1072{
1073 static const struct ttm_place sys_placement_flags = {
1074 .fpfn = 0,
1075 .lpfn = 0,
1076 .mem_type = TTM_PL_SYSTEM,
1077 .flags = 0,
1078 };
1079 static struct ttm_placement sys_placement = {
1080 .num_placement = 1,
1081 .placement = &sys_placement_flags,
1082 };
1083 struct ttm_tt *tt = bo->ttm;
1084 long lret;
1085
1086 dma_resv_assert_held(bo->base.resv);
1087
1088 if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) {
1089 int ret = ttm_bo_validate(bo, &sys_placement, ctx);
1090
1091 /* Consider -ENOMEM and -ENOSPC non-fatal. */
1092 if (ret) {
1093 if (ret == -ENOMEM || ret == -ENOSPC)
1094 ret = -EBUSY;
1095 return ret;
1096 }
1097 }
1098
1099 ttm_bo_unmap_virtual(bo);
1100 lret = ttm_bo_wait_ctx(bo, ctx);
1101 if (lret < 0)
1102 return lret;
1103
1104 if (bo->bulk_move) {
1105 spin_lock(&bo->bdev->lru_lock);
1106 ttm_resource_del_bulk_move(bo->resource, bo);
1107 spin_unlock(&bo->bdev->lru_lock);
1108 }
1109
1110 lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags)
1111 {.purge = flags.purge,
1112 .writeback = flags.writeback});
1113
1114 if (lret <= 0 && bo->bulk_move) {
1115 spin_lock(&bo->bdev->lru_lock);
1116 ttm_resource_add_bulk_move(bo->resource, bo);
1117 spin_unlock(&bo->bdev->lru_lock);
1118 }
1119
1120 if (lret < 0 && lret != -EINTR)
1121 return -EBUSY;
1122
1123 return lret;
1124}
1125EXPORT_SYMBOL(ttm_bo_shrink);
1126
1127/**
1128 * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking
1129 * @ctx: The struct ttm_operation_ctx governing the shrinking.
1130 * @bo: The candidate for shrinking.
1131 *
1132 * Check whether the object, given the information available to TTM,
1133 * is suitable for shinking, This function can and should be used
1134 * before attempting to shrink an object.
1135 *
1136 * Return: true if suitable. false if not.
1137 */
1138bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
1139{
1140 return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count &&
1141 (!ctx->no_wait_gpu ||
1142 dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP));
1143}
1144EXPORT_SYMBOL(ttm_bo_shrink_suitable);
1145
1146/**
1147 * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU
1148 * during shrinking
1149 *
1150 * In some situations, like direct reclaim, waiting (in particular gpu waiting)
1151 * should be avoided since it may stall a system that could otherwise make progress
1152 * shrinking something else less time consuming.
1153 *
1154 * Return: true if gpu waiting should be avoided, false if not.
1155 */
1156bool ttm_bo_shrink_avoid_wait(void)
1157{
1158 return !current_is_kswapd();
1159}
1160EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait);