Commit | Line | Data |
---|---|---|
1297bf2e | 1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
ba4e7d97 TH |
2 | /************************************************************************** |
3 | * | |
4 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | **************************************************************************/ | |
28 | /* | |
29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
30 | */ | |
31 | ||
760285e7 DH |
32 | #include <drm/ttm/ttm_bo_driver.h> |
33 | #include <drm/ttm/ttm_placement.h> | |
72525b3f | 34 | #include <drm/drm_vma_manager.h> |
43676605 | 35 | #include <linux/dma-buf-map.h> |
ba4e7d97 TH |
36 | #include <linux/io.h> |
37 | #include <linux/highmem.h> | |
38 | #include <linux/wait.h> | |
5a0e3ad6 | 39 | #include <linux/slab.h> |
ba4e7d97 | 40 | #include <linux/vmalloc.h> |
ba4e7d97 | 41 | #include <linux/module.h> |
52791eee | 42 | #include <linux/dma-resv.h> |
ba4e7d97 | 43 | |
5452cf44 CK |
44 | struct ttm_transfer_obj { |
45 | struct ttm_buffer_object base; | |
46 | struct ttm_buffer_object *bo; | |
47 | }; | |
48 | ||
afe6804c | 49 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
2966141a | 50 | struct ttm_resource *mem) |
eba67093 | 51 | { |
54d04ea8 | 52 | if (mem->bus.offset || mem->bus.addr) |
c1c440d4 | 53 | return 0; |
eba67093 | 54 | |
fe662d84 | 55 | mem->bus.is_iomem = false; |
eba67093 TH |
56 | if (!bdev->driver->io_mem_reserve) |
57 | return 0; | |
eba67093 | 58 | |
fe662d84 | 59 | return bdev->driver->io_mem_reserve(bdev, mem); |
eba67093 TH |
60 | } |
61 | ||
afe6804c | 62 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
2966141a | 63 | struct ttm_resource *mem) |
eba67093 | 64 | { |
54d04ea8 | 65 | if (!mem->bus.offset && !mem->bus.addr) |
c1c440d4 | 66 | return; |
eba67093 | 67 | |
fe662d84 CK |
68 | if (bdev->driver->io_mem_free) |
69 | bdev->driver->io_mem_free(bdev, mem); | |
c1c440d4 | 70 | |
fe662d84 CK |
71 | mem->bus.offset = 0; |
72 | mem->bus.addr = NULL; | |
82c5da6b JG |
73 | } |
74 | ||
2966141a DA |
75 | static int ttm_resource_ioremap(struct ttm_bo_device *bdev, |
76 | struct ttm_resource *mem, | |
c1c440d4 | 77 | void **virtual) |
ba4e7d97 | 78 | { |
ba4e7d97 TH |
79 | int ret; |
80 | void *addr; | |
81 | ||
82 | *virtual = NULL; | |
82c5da6b | 83 | ret = ttm_mem_io_reserve(bdev, mem); |
9e51159c | 84 | if (ret || !mem->bus.is_iomem) |
ba4e7d97 TH |
85 | return ret; |
86 | ||
82c5da6b JG |
87 | if (mem->bus.addr) { |
88 | addr = mem->bus.addr; | |
89 | } else { | |
ebb21aa1 DA |
90 | size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; |
91 | ||
ce65b874 | 92 | if (mem->bus.caching == ttm_write_combined) |
54d04ea8 | 93 | addr = ioremap_wc(mem->bus.offset, bus_size); |
ba4e7d97 | 94 | else |
54d04ea8 | 95 | addr = ioremap(mem->bus.offset, bus_size); |
82c5da6b JG |
96 | if (!addr) { |
97 | ttm_mem_io_free(bdev, mem); | |
ba4e7d97 | 98 | return -ENOMEM; |
82c5da6b | 99 | } |
ba4e7d97 TH |
100 | } |
101 | *virtual = addr; | |
102 | return 0; | |
103 | } | |
104 | ||
2966141a DA |
105 | static void ttm_resource_iounmap(struct ttm_bo_device *bdev, |
106 | struct ttm_resource *mem, | |
c1c440d4 | 107 | void *virtual) |
ba4e7d97 | 108 | { |
0c321c79 | 109 | if (virtual && mem->bus.addr == NULL) |
ba4e7d97 | 110 | iounmap(virtual); |
82c5da6b | 111 | ttm_mem_io_free(bdev, mem); |
ba4e7d97 TH |
112 | } |
113 | ||
114 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | |
115 | { | |
116 | uint32_t *dstP = | |
117 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); | |
118 | uint32_t *srcP = | |
119 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); | |
120 | ||
121 | int i; | |
122 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) | |
123 | iowrite32(ioread32(srcP++), dstP++); | |
124 | return 0; | |
125 | } | |
126 | ||
127 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |
542c6f6d TH |
128 | unsigned long page, |
129 | pgprot_t prot) | |
ba4e7d97 | 130 | { |
b1e5f172 | 131 | struct page *d = ttm->pages[page]; |
ba4e7d97 TH |
132 | void *dst; |
133 | ||
134 | if (!d) | |
135 | return -ENOMEM; | |
136 | ||
137 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); | |
915ecc22 | 138 | dst = kmap_atomic_prot(d, prot); |
ba4e7d97 TH |
139 | if (!dst) |
140 | return -ENOMEM; | |
141 | ||
142 | memcpy_fromio(dst, src, PAGE_SIZE); | |
542c6f6d | 143 | |
915ecc22 | 144 | kunmap_atomic(dst); |
542c6f6d | 145 | |
ba4e7d97 TH |
146 | return 0; |
147 | } | |
148 | ||
149 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |
542c6f6d TH |
150 | unsigned long page, |
151 | pgprot_t prot) | |
ba4e7d97 | 152 | { |
b1e5f172 | 153 | struct page *s = ttm->pages[page]; |
ba4e7d97 TH |
154 | void *src; |
155 | ||
156 | if (!s) | |
157 | return -ENOMEM; | |
158 | ||
159 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); | |
915ecc22 | 160 | src = kmap_atomic_prot(s, prot); |
ba4e7d97 TH |
161 | if (!src) |
162 | return -ENOMEM; | |
163 | ||
164 | memcpy_toio(dst, src, PAGE_SIZE); | |
542c6f6d | 165 | |
915ecc22 | 166 | kunmap_atomic(src); |
542c6f6d | 167 | |
ba4e7d97 TH |
168 | return 0; |
169 | } | |
170 | ||
171 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |
3e98d829 | 172 | struct ttm_operation_ctx *ctx, |
2966141a | 173 | struct ttm_resource *new_mem) |
ba4e7d97 TH |
174 | { |
175 | struct ttm_bo_device *bdev = bo->bdev; | |
9de59bc2 | 176 | struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); |
ba4e7d97 | 177 | struct ttm_tt *ttm = bo->ttm; |
2966141a DA |
178 | struct ttm_resource *old_mem = &bo->mem; |
179 | struct ttm_resource old_copy = *old_mem; | |
ba4e7d97 TH |
180 | void *old_iomap; |
181 | void *new_iomap; | |
182 | int ret; | |
ba4e7d97 | 183 | unsigned long i; |
ba4e7d97 | 184 | |
0ef1ed81 | 185 | ret = ttm_bo_wait_ctx(bo, ctx); |
77dfc28b CK |
186 | if (ret) |
187 | return ret; | |
188 | ||
2966141a | 189 | ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap); |
ba4e7d97 TH |
190 | if (ret) |
191 | return ret; | |
2966141a | 192 | ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap); |
ba4e7d97 TH |
193 | if (ret) |
194 | goto out; | |
195 | ||
da95c788 TH |
196 | /* |
197 | * Single TTM move. NOP. | |
198 | */ | |
ba4e7d97 TH |
199 | if (old_iomap == NULL && new_iomap == NULL) |
200 | goto out2; | |
da95c788 TH |
201 | |
202 | /* | |
0bc25425 | 203 | * Don't move nonexistent data. Clear destination instead. |
da95c788 | 204 | */ |
0bc25425 | 205 | if (old_iomap == NULL && |
7eec9151 | 206 | (ttm == NULL || (!ttm_tt_is_populated(ttm) && |
2e6d8b46 | 207 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { |
0bc25425 | 208 | memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
ba4e7d97 | 209 | goto out2; |
0bc25425 | 210 | } |
ba4e7d97 | 211 | |
da95c788 TH |
212 | /* |
213 | * TTM might be null for moves within the same region. | |
9a0599dd | 214 | */ |
25893a14 | 215 | if (ttm) { |
0a667b50 | 216 | ret = ttm_tt_populate(bdev, ttm, ctx); |
da95c788 | 217 | if (ret) |
b1e5f172 JG |
218 | goto out1; |
219 | } | |
220 | ||
ba4e7d97 | 221 | for (i = 0; i < new_mem->num_pages; ++i) { |
542c6f6d | 222 | if (old_iomap == NULL) { |
867bcecd | 223 | pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL); |
64a87088 | 224 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, i, |
542c6f6d TH |
225 | prot); |
226 | } else if (new_iomap == NULL) { | |
867bcecd | 227 | pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL); |
64a87088 | 228 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, i, |
542c6f6d | 229 | prot); |
449f797a | 230 | } else { |
64a87088 | 231 | ret = ttm_copy_io_page(new_iomap, old_iomap, i); |
449f797a | 232 | } |
da95c788 | 233 | if (ret) |
ba4e7d97 TH |
234 | goto out1; |
235 | } | |
236 | mb(); | |
237 | out2: | |
eba67093 | 238 | old_copy = *old_mem; |
2ee476f7 DA |
239 | |
240 | ttm_bo_assign_mem(bo, new_mem); | |
ba4e7d97 | 241 | |
2ff6e69c DA |
242 | if (!man->use_tt) |
243 | ttm_bo_tt_destroy(bo); | |
ba4e7d97 TH |
244 | |
245 | out1: | |
2966141a | 246 | ttm_resource_iounmap(bdev, old_mem, new_iomap); |
ba4e7d97 | 247 | out: |
2966141a | 248 | ttm_resource_iounmap(bdev, &old_copy, old_iomap); |
da95c788 TH |
249 | |
250 | /* | |
251 | * On error, keep the mm node! | |
252 | */ | |
253 | if (!ret) | |
b2458726 | 254 | ttm_resource_free(bo, &old_copy); |
ba4e7d97 TH |
255 | return ret; |
256 | } | |
257 | EXPORT_SYMBOL(ttm_bo_move_memcpy); | |
258 | ||
259 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) | |
260 | { | |
5452cf44 CK |
261 | struct ttm_transfer_obj *fbo; |
262 | ||
263 | fbo = container_of(bo, struct ttm_transfer_obj, base); | |
f4490759 | 264 | ttm_bo_put(fbo->bo); |
5452cf44 | 265 | kfree(fbo); |
ba4e7d97 TH |
266 | } |
267 | ||
268 | /** | |
269 | * ttm_buffer_object_transfer | |
270 | * | |
271 | * @bo: A pointer to a struct ttm_buffer_object. | |
272 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, | |
273 | * holding the data of @bo with the old placement. | |
274 | * | |
275 | * This is a utility function that may be called after an accelerated move | |
276 | * has been scheduled. A new buffer object is created as a placeholder for | |
277 | * the old data while it's being copied. When that buffer object is idle, | |
278 | * it can be destroyed, releasing the space of the old placement. | |
279 | * Returns: | |
280 | * !0: Failure. | |
281 | */ | |
282 | ||
283 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |
284 | struct ttm_buffer_object **new_obj) | |
285 | { | |
5452cf44 | 286 | struct ttm_transfer_obj *fbo; |
5e338405 | 287 | int ret; |
ba4e7d97 | 288 | |
ff7c60c5 | 289 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
ba4e7d97 TH |
290 | if (!fbo) |
291 | return -ENOMEM; | |
292 | ||
5452cf44 | 293 | fbo->base = *bo; |
d6e820fc CK |
294 | |
295 | ttm_bo_get(bo); | |
8129fdad | 296 | fbo->bo = bo; |
ba4e7d97 TH |
297 | |
298 | /** | |
299 | * Fix up members that we shouldn't copy directly: | |
300 | * TODO: Explicit member copy would probably be better here. | |
301 | */ | |
302 | ||
97588b5b | 303 | atomic_inc(&ttm_bo_glob.bo_count); |
5452cf44 CK |
304 | INIT_LIST_HEAD(&fbo->base.ddestroy); |
305 | INIT_LIST_HEAD(&fbo->base.lru); | |
306 | INIT_LIST_HEAD(&fbo->base.swap); | |
5452cf44 | 307 | fbo->base.moving = NULL; |
b96f3e7c | 308 | drm_vma_node_reset(&fbo->base.base.vma_node); |
5452cf44 | 309 | |
5452cf44 CK |
310 | kref_init(&fbo->base.kref); |
311 | fbo->base.destroy = &ttm_transfered_destroy; | |
312 | fbo->base.acc_size = 0; | |
b73cd1e2 | 313 | fbo->base.pin_count = 0; |
5b34406f | 314 | if (bo->type != ttm_bo_type_sg) |
ef383218 CK |
315 | fbo->base.base.resv = &fbo->base.base._resv; |
316 | ||
317 | dma_resv_init(&fbo->base.base._resv); | |
8c8c0620 | 318 | fbo->base.base.dev = NULL; |
ef383218 | 319 | ret = dma_resv_trylock(&fbo->base.base._resv); |
5e338405 | 320 | WARN_ON(!ret); |
ba4e7d97 | 321 | |
b73cd1e2 CK |
322 | ttm_bo_move_to_lru_tail_unlocked(&fbo->base); |
323 | ||
5452cf44 | 324 | *new_obj = &fbo->base; |
ba4e7d97 TH |
325 | return 0; |
326 | } | |
327 | ||
867bcecd CK |
328 | pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, |
329 | pgprot_t tmp) | |
ba4e7d97 | 330 | { |
867bcecd CK |
331 | struct ttm_resource_manager *man; |
332 | enum ttm_caching caching; | |
333 | ||
334 | man = ttm_manager_type(bo->bdev, res->mem_type); | |
335 | caching = man->use_tt ? bo->ttm->caching : res->bus.caching; | |
336 | ||
94318d50 | 337 | /* Cached mappings need no adjustment */ |
867bcecd | 338 | if (caching == ttm_cached) |
94318d50 BH |
339 | return tmp; |
340 | ||
ba4e7d97 | 341 | #if defined(__i386__) || defined(__x86_64__) |
867bcecd | 342 | if (caching == ttm_write_combined) |
ba4e7d97 TH |
343 | tmp = pgprot_writecombine(tmp); |
344 | else if (boot_cpu_data.x86 > 3) | |
345 | tmp = pgprot_noncached(tmp); | |
ba4e7d97 | 346 | #endif |
f135b978 | 347 | #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ |
8a08e50c | 348 | defined(__powerpc__) || defined(__mips__) |
867bcecd | 349 | if (caching == ttm_write_combined) |
ba4e7d97 TH |
350 | tmp = pgprot_writecombine(tmp); |
351 | else | |
352 | tmp = pgprot_noncached(tmp); | |
353 | #endif | |
8a08e50c | 354 | #if defined(__sparc__) |
94318d50 | 355 | tmp = pgprot_noncached(tmp); |
ba4e7d97 TH |
356 | #endif |
357 | return tmp; | |
358 | } | |
4bfd75cb | 359 | EXPORT_SYMBOL(ttm_io_prot); |
ba4e7d97 TH |
360 | |
361 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | |
82c5da6b JG |
362 | unsigned long offset, |
363 | unsigned long size, | |
ba4e7d97 TH |
364 | struct ttm_bo_kmap_obj *map) |
365 | { | |
2966141a | 366 | struct ttm_resource *mem = &bo->mem; |
ba4e7d97 | 367 | |
82c5da6b | 368 | if (bo->mem.bus.addr) { |
ba4e7d97 | 369 | map->bo_kmap_type = ttm_bo_map_premapped; |
82c5da6b | 370 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
ba4e7d97 TH |
371 | } else { |
372 | map->bo_kmap_type = ttm_bo_map_iomap; | |
ce65b874 | 373 | if (mem->bus.caching == ttm_write_combined) |
54d04ea8 | 374 | map->virtual = ioremap_wc(bo->mem.bus.offset + offset, |
82c5da6b | 375 | size); |
ba4e7d97 | 376 | else |
54d04ea8 | 377 | map->virtual = ioremap(bo->mem.bus.offset + offset, |
c1c440d4 | 378 | size); |
ba4e7d97 TH |
379 | } |
380 | return (!map->virtual) ? -ENOMEM : 0; | |
381 | } | |
382 | ||
383 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, | |
384 | unsigned long start_page, | |
385 | unsigned long num_pages, | |
386 | struct ttm_bo_kmap_obj *map) | |
387 | { | |
2966141a | 388 | struct ttm_resource *mem = &bo->mem; |
d0cef9fa RH |
389 | struct ttm_operation_ctx ctx = { |
390 | .interruptible = false, | |
391 | .no_wait_gpu = false | |
392 | }; | |
62975d27 | 393 | struct ttm_tt *ttm = bo->ttm; |
d0cef9fa | 394 | pgprot_t prot; |
b1e5f172 | 395 | int ret; |
ba4e7d97 | 396 | |
62975d27 | 397 | BUG_ON(!ttm); |
b1e5f172 | 398 | |
0a667b50 | 399 | ret = ttm_tt_populate(bo->bdev, ttm, &ctx); |
25893a14 CK |
400 | if (ret) |
401 | return ret; | |
b1e5f172 | 402 | |
ce65b874 | 403 | if (num_pages == 1 && ttm->caching == ttm_cached) { |
ba4e7d97 TH |
404 | /* |
405 | * We're mapping a single page, and the desired | |
406 | * page protection is consistent with the bo. | |
407 | */ | |
408 | ||
409 | map->bo_kmap_type = ttm_bo_map_kmap; | |
b1e5f172 | 410 | map->page = ttm->pages[start_page]; |
ba4e7d97 TH |
411 | map->virtual = kmap(map->page); |
412 | } else { | |
ba4e7d97 TH |
413 | /* |
414 | * We need to use vmap to get the desired page protection | |
af901ca1 | 415 | * or to make the buffer object look contiguous. |
ba4e7d97 | 416 | */ |
867bcecd | 417 | prot = ttm_io_prot(bo, mem, PAGE_KERNEL); |
ba4e7d97 TH |
418 | map->bo_kmap_type = ttm_bo_map_vmap; |
419 | map->virtual = vmap(ttm->pages + start_page, num_pages, | |
420 | 0, prot); | |
421 | } | |
422 | return (!map->virtual) ? -ENOMEM : 0; | |
423 | } | |
424 | ||
425 | int ttm_bo_kmap(struct ttm_buffer_object *bo, | |
426 | unsigned long start_page, unsigned long num_pages, | |
427 | struct ttm_bo_kmap_obj *map) | |
428 | { | |
82c5da6b | 429 | unsigned long offset, size; |
ba4e7d97 | 430 | int ret; |
ba4e7d97 | 431 | |
ba4e7d97 | 432 | map->virtual = NULL; |
82c5da6b | 433 | map->bo = bo; |
ba4e7d97 TH |
434 | if (num_pages > bo->num_pages) |
435 | return -EINVAL; | |
436 | if (start_page > bo->num_pages) | |
437 | return -EINVAL; | |
02b29caf | 438 | |
82c5da6b | 439 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
ba4e7d97 TH |
440 | if (ret) |
441 | return ret; | |
82c5da6b | 442 | if (!bo->mem.bus.is_iomem) { |
ba4e7d97 TH |
443 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
444 | } else { | |
82c5da6b JG |
445 | offset = start_page << PAGE_SHIFT; |
446 | size = num_pages << PAGE_SHIFT; | |
447 | return ttm_bo_ioremap(bo, offset, size, map); | |
ba4e7d97 TH |
448 | } |
449 | } | |
450 | EXPORT_SYMBOL(ttm_bo_kmap); | |
451 | ||
452 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |
453 | { | |
454 | if (!map->virtual) | |
455 | return; | |
456 | switch (map->bo_kmap_type) { | |
457 | case ttm_bo_map_iomap: | |
458 | iounmap(map->virtual); | |
459 | break; | |
460 | case ttm_bo_map_vmap: | |
461 | vunmap(map->virtual); | |
462 | break; | |
463 | case ttm_bo_map_kmap: | |
464 | kunmap(map->page); | |
465 | break; | |
466 | case ttm_bo_map_premapped: | |
467 | break; | |
468 | default: | |
469 | BUG(); | |
470 | } | |
eba67093 | 471 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); |
ba4e7d97 TH |
472 | map->virtual = NULL; |
473 | map->page = NULL; | |
474 | } | |
475 | EXPORT_SYMBOL(ttm_bo_kunmap); | |
476 | ||
43676605 TZ |
477 | int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) |
478 | { | |
479 | struct ttm_resource *mem = &bo->mem; | |
480 | int ret; | |
481 | ||
482 | ret = ttm_mem_io_reserve(bo->bdev, mem); | |
483 | if (ret) | |
484 | return ret; | |
485 | ||
486 | if (mem->bus.is_iomem) { | |
487 | void __iomem *vaddr_iomem; | |
488 | size_t size = bo->num_pages << PAGE_SHIFT; | |
489 | ||
490 | if (mem->bus.addr) | |
491 | vaddr_iomem = (void __iomem *)mem->bus.addr; | |
492 | else if (mem->bus.caching == ttm_write_combined) | |
493 | vaddr_iomem = ioremap_wc(mem->bus.offset, size); | |
494 | else | |
495 | vaddr_iomem = ioremap(mem->bus.offset, size); | |
496 | ||
497 | if (!vaddr_iomem) | |
498 | return -ENOMEM; | |
499 | ||
500 | dma_buf_map_set_vaddr_iomem(map, vaddr_iomem); | |
501 | ||
502 | } else { | |
503 | struct ttm_operation_ctx ctx = { | |
504 | .interruptible = false, | |
505 | .no_wait_gpu = false | |
506 | }; | |
507 | struct ttm_tt *ttm = bo->ttm; | |
508 | pgprot_t prot; | |
509 | void *vaddr; | |
510 | ||
511 | ret = ttm_tt_populate(bo->bdev, ttm, &ctx); | |
512 | if (ret) | |
513 | return ret; | |
514 | ||
515 | /* | |
516 | * We need to use vmap to get the desired page protection | |
517 | * or to make the buffer object look contiguous. | |
518 | */ | |
519 | prot = ttm_io_prot(bo, mem, PAGE_KERNEL); | |
520 | vaddr = vmap(ttm->pages, bo->num_pages, 0, prot); | |
521 | if (!vaddr) | |
522 | return -ENOMEM; | |
523 | ||
524 | dma_buf_map_set_vaddr(map, vaddr); | |
525 | } | |
526 | ||
527 | return 0; | |
528 | } | |
529 | EXPORT_SYMBOL(ttm_bo_vmap); | |
530 | ||
531 | void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) | |
532 | { | |
533 | struct ttm_resource *mem = &bo->mem; | |
534 | ||
535 | if (dma_buf_map_is_null(map)) | |
536 | return; | |
537 | ||
538 | if (!map->is_iomem) | |
539 | vunmap(map->vaddr); | |
540 | else if (!mem->bus.addr) | |
541 | iounmap(map->vaddr_iomem); | |
542 | dma_buf_map_clear(map); | |
543 | ||
544 | ttm_mem_io_free(bo->bdev, &bo->mem); | |
545 | } | |
546 | EXPORT_SYMBOL(ttm_bo_vunmap); | |
547 | ||
92afce90 DA |
548 | static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, |
549 | bool dst_use_tt) | |
550 | { | |
551 | int ret; | |
552 | ret = ttm_bo_wait(bo, false, false); | |
553 | if (ret) | |
554 | return ret; | |
555 | ||
556 | if (!dst_use_tt) | |
557 | ttm_bo_tt_destroy(bo); | |
d1934d2b | 558 | ttm_resource_free(bo, &bo->mem); |
92afce90 DA |
559 | return 0; |
560 | } | |
561 | ||
13a8f46d DA |
562 | static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, |
563 | struct dma_fence *fence, | |
564 | bool dst_use_tt) | |
565 | { | |
566 | struct ttm_buffer_object *ghost_obj; | |
567 | int ret; | |
568 | ||
569 | /** | |
570 | * This should help pipeline ordinary buffer moves. | |
571 | * | |
572 | * Hang old buffer memory on a new buffer object, | |
573 | * and leave it to be released when the GPU | |
574 | * operation has completed. | |
575 | */ | |
576 | ||
577 | dma_fence_put(bo->moving); | |
578 | bo->moving = dma_fence_get(fence); | |
579 | ||
580 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | |
581 | if (ret) | |
582 | return ret; | |
583 | ||
584 | dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); | |
585 | ||
586 | /** | |
587 | * If we're not moving to fixed memory, the TTM object | |
588 | * needs to stay alive. Otherwhise hang it on the ghost | |
589 | * bo to be unbound and destroyed. | |
590 | */ | |
591 | ||
592 | if (dst_use_tt) | |
593 | ghost_obj->ttm = NULL; | |
594 | else | |
595 | bo->ttm = NULL; | |
596 | ||
597 | dma_resv_unlock(&ghost_obj->base._resv); | |
598 | ttm_bo_put(ghost_obj); | |
599 | return 0; | |
600 | } | |
601 | ||
e46f468f DA |
602 | static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, |
603 | struct dma_fence *fence) | |
ba4e7d97 TH |
604 | { |
605 | struct ttm_bo_device *bdev = bo->bdev; | |
e46f468f | 606 | struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); |
ba4e7d97 | 607 | |
e46f468f DA |
608 | /** |
609 | * BO doesn't have a TTM we need to bind/unbind. Just remember | |
610 | * this eviction and free up the allocation | |
611 | */ | |
612 | spin_lock(&from->move_lock); | |
613 | if (!from->move || dma_fence_is_later(fence, from->move)) { | |
614 | dma_fence_put(from->move); | |
615 | from->move = dma_fence_get(fence); | |
616 | } | |
617 | spin_unlock(&from->move_lock); | |
ba4e7d97 | 618 | |
d1934d2b | 619 | ttm_resource_free(bo, &bo->mem); |
110b20c3 | 620 | |
e46f468f DA |
621 | dma_fence_put(bo->moving); |
622 | bo->moving = dma_fence_get(fence); | |
ba4e7d97 | 623 | } |
3ddf4ad9 | 624 | |
e46f468f DA |
625 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
626 | struct dma_fence *fence, | |
627 | bool evict, | |
628 | bool pipeline, | |
629 | struct ttm_resource *new_mem) | |
3ddf4ad9 CK |
630 | { |
631 | struct ttm_bo_device *bdev = bo->bdev; | |
2ee476f7 | 632 | struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); |
e46f468f DA |
633 | struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); |
634 | int ret = 0; | |
3ddf4ad9 | 635 | |
52791eee | 636 | dma_resv_add_excl_fence(bo->base.resv, fence); |
e46f468f DA |
637 | if (!evict) |
638 | ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); | |
639 | else if (!from->use_tt && pipeline) | |
640 | ttm_bo_move_pipeline_evict(bo, fence); | |
641 | else | |
642 | ret = ttm_bo_wait_free_node(bo, man->use_tt); | |
3ddf4ad9 | 643 | |
e46f468f DA |
644 | if (ret) |
645 | return ret; | |
3ddf4ad9 | 646 | |
2ee476f7 | 647 | ttm_bo_assign_mem(bo, new_mem); |
3ddf4ad9 CK |
648 | |
649 | return 0; | |
650 | } | |
e46f468f | 651 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
5d951098 CK |
652 | |
653 | int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) | |
654 | { | |
655 | struct ttm_buffer_object *ghost; | |
656 | int ret; | |
657 | ||
658 | ret = ttm_buffer_object_transfer(bo, &ghost); | |
659 | if (ret) | |
660 | return ret; | |
661 | ||
ef383218 | 662 | ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); |
5d951098 CK |
663 | /* Last resort, wait for the BO to be idle when we are OOM */ |
664 | if (ret) | |
665 | ttm_bo_wait(bo, false, false); | |
666 | ||
667 | memset(&bo->mem, 0, sizeof(bo->mem)); | |
668 | bo->mem.mem_type = TTM_PL_SYSTEM; | |
669 | bo->ttm = NULL; | |
670 | ||
ef383218 | 671 | dma_resv_unlock(&ghost->base._resv); |
f4490759 | 672 | ttm_bo_put(ghost); |
5d951098 CK |
673 | |
674 | return 0; | |
675 | } |