Commit | Line | Data |
---|---|---|
1297bf2e | 1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
ba4e7d97 TH |
2 | /************************************************************************** |
3 | * | |
4 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | **************************************************************************/ | |
28 | /* | |
29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
30 | */ | |
31 | ||
760285e7 DH |
32 | #include <drm/ttm/ttm_bo_driver.h> |
33 | #include <drm/ttm/ttm_placement.h> | |
72525b3f | 34 | #include <drm/drm_vma_manager.h> |
ba4e7d97 TH |
35 | #include <linux/io.h> |
36 | #include <linux/highmem.h> | |
37 | #include <linux/wait.h> | |
5a0e3ad6 | 38 | #include <linux/slab.h> |
ba4e7d97 | 39 | #include <linux/vmalloc.h> |
ba4e7d97 | 40 | #include <linux/module.h> |
52791eee | 41 | #include <linux/dma-resv.h> |
ba4e7d97 | 42 | |
5452cf44 CK |
43 | struct ttm_transfer_obj { |
44 | struct ttm_buffer_object base; | |
45 | struct ttm_buffer_object *bo; | |
46 | }; | |
47 | ||
ba4e7d97 TH |
48 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
49 | { | |
42311ff9 | 50 | ttm_bo_mem_put(bo, &bo->mem); |
ba4e7d97 TH |
51 | } |
52 | ||
53 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |
3e98d829 | 54 | struct ttm_operation_ctx *ctx, |
4e2f0caa | 55 | struct ttm_mem_reg *new_mem) |
ba4e7d97 TH |
56 | { |
57 | struct ttm_tt *ttm = bo->ttm; | |
58 | struct ttm_mem_reg *old_mem = &bo->mem; | |
ba4e7d97 TH |
59 | int ret; |
60 | ||
61 | if (old_mem->mem_type != TTM_PL_SYSTEM) { | |
3e98d829 | 62 | ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); |
7b8082bc MD |
63 | |
64 | if (unlikely(ret != 0)) { | |
65 | if (ret != -ERESTARTSYS) | |
66 | pr_err("Failed to expire sync object before unbinding TTM\n"); | |
67 | return ret; | |
68 | } | |
69 | ||
2ff2bf1e | 70 | ttm_tt_unbind(ttm); |
ba4e7d97 TH |
71 | ttm_bo_free_old_node(bo); |
72 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, | |
73 | TTM_PL_MASK_MEM); | |
74 | old_mem->mem_type = TTM_PL_SYSTEM; | |
ba4e7d97 TH |
75 | } |
76 | ||
77 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); | |
78 | if (unlikely(ret != 0)) | |
79 | return ret; | |
80 | ||
81 | if (new_mem->mem_type != TTM_PL_SYSTEM) { | |
993baf15 | 82 | ret = ttm_tt_bind(ttm, new_mem, ctx); |
ba4e7d97 TH |
83 | if (unlikely(ret != 0)) |
84 | return ret; | |
85 | } | |
86 | ||
87 | *old_mem = *new_mem; | |
88 | new_mem->mm_node = NULL; | |
110b20c3 | 89 | |
ba4e7d97 TH |
90 | return 0; |
91 | } | |
92 | EXPORT_SYMBOL(ttm_bo_move_ttm); | |
93 | ||
eba67093 | 94 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
82c5da6b | 95 | { |
eba67093 TH |
96 | if (likely(man->io_reserve_fastpath)) |
97 | return 0; | |
98 | ||
99 | if (interruptible) | |
100 | return mutex_lock_interruptible(&man->io_reserve_mutex); | |
101 | ||
102 | mutex_lock(&man->io_reserve_mutex); | |
103 | return 0; | |
104 | } | |
afe6804c | 105 | EXPORT_SYMBOL(ttm_mem_io_lock); |
82c5da6b | 106 | |
eba67093 TH |
107 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
108 | { | |
109 | if (likely(man->io_reserve_fastpath)) | |
110 | return; | |
111 | ||
112 | mutex_unlock(&man->io_reserve_mutex); | |
113 | } | |
afe6804c | 114 | EXPORT_SYMBOL(ttm_mem_io_unlock); |
eba67093 TH |
115 | |
116 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) | |
117 | { | |
118 | struct ttm_buffer_object *bo; | |
119 | ||
120 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) | |
121 | return -EAGAIN; | |
122 | ||
123 | bo = list_first_entry(&man->io_reserve_lru, | |
124 | struct ttm_buffer_object, | |
125 | io_reserve_lru); | |
126 | list_del_init(&bo->io_reserve_lru); | |
127 | ttm_bo_unmap_virtual_locked(bo); | |
128 | ||
129 | return 0; | |
130 | } | |
131 | ||
afe6804c DA |
132 | |
133 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, | |
134 | struct ttm_mem_reg *mem) | |
eba67093 TH |
135 | { |
136 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
137 | int ret = 0; | |
138 | ||
139 | if (!bdev->driver->io_mem_reserve) | |
140 | return 0; | |
141 | if (likely(man->io_reserve_fastpath)) | |
142 | return bdev->driver->io_mem_reserve(bdev, mem); | |
143 | ||
144 | if (bdev->driver->io_mem_reserve && | |
145 | mem->bus.io_reserved_count++ == 0) { | |
146 | retry: | |
0c321c79 | 147 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
eba67093 TH |
148 | if (ret == -EAGAIN) { |
149 | ret = ttm_mem_io_evict(man); | |
150 | if (ret == 0) | |
151 | goto retry; | |
152 | } | |
153 | } | |
154 | return ret; | |
155 | } | |
afe6804c | 156 | EXPORT_SYMBOL(ttm_mem_io_reserve); |
eba67093 | 157 | |
afe6804c DA |
158 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
159 | struct ttm_mem_reg *mem) | |
eba67093 TH |
160 | { |
161 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
162 | ||
163 | if (likely(man->io_reserve_fastpath)) | |
164 | return; | |
165 | ||
166 | if (bdev->driver->io_mem_reserve && | |
167 | --mem->bus.io_reserved_count == 0 && | |
168 | bdev->driver->io_mem_free) | |
169 | bdev->driver->io_mem_free(bdev, mem); | |
170 | ||
171 | } | |
afe6804c | 172 | EXPORT_SYMBOL(ttm_mem_io_free); |
eba67093 TH |
173 | |
174 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) | |
175 | { | |
176 | struct ttm_mem_reg *mem = &bo->mem; | |
177 | int ret; | |
178 | ||
179 | if (!mem->bus.io_reserved_vm) { | |
180 | struct ttm_mem_type_manager *man = | |
181 | &bo->bdev->man[mem->mem_type]; | |
182 | ||
183 | ret = ttm_mem_io_reserve(bo->bdev, mem); | |
82c5da6b JG |
184 | if (unlikely(ret != 0)) |
185 | return ret; | |
eba67093 TH |
186 | mem->bus.io_reserved_vm = true; |
187 | if (man->use_io_reserve_lru) | |
188 | list_add_tail(&bo->io_reserve_lru, | |
189 | &man->io_reserve_lru); | |
82c5da6b JG |
190 | } |
191 | return 0; | |
192 | } | |
193 | ||
eba67093 | 194 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
82c5da6b | 195 | { |
eba67093 TH |
196 | struct ttm_mem_reg *mem = &bo->mem; |
197 | ||
198 | if (mem->bus.io_reserved_vm) { | |
199 | mem->bus.io_reserved_vm = false; | |
200 | list_del_init(&bo->io_reserve_lru); | |
201 | ttm_mem_io_free(bo->bdev, mem); | |
82c5da6b JG |
202 | } |
203 | } | |
204 | ||
dcbff15a | 205 | static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
ba4e7d97 TH |
206 | void **virtual) |
207 | { | |
eba67093 | 208 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
ba4e7d97 TH |
209 | int ret; |
210 | void *addr; | |
211 | ||
212 | *virtual = NULL; | |
eba67093 | 213 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 214 | ret = ttm_mem_io_reserve(bdev, mem); |
eba67093 | 215 | ttm_mem_io_unlock(man); |
9e51159c | 216 | if (ret || !mem->bus.is_iomem) |
ba4e7d97 TH |
217 | return ret; |
218 | ||
82c5da6b JG |
219 | if (mem->bus.addr) { |
220 | addr = mem->bus.addr; | |
221 | } else { | |
ba4e7d97 | 222 | if (mem->placement & TTM_PL_FLAG_WC) |
82c5da6b | 223 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
ba4e7d97 | 224 | else |
82c5da6b JG |
225 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
226 | if (!addr) { | |
eba67093 | 227 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 228 | ttm_mem_io_free(bdev, mem); |
eba67093 | 229 | ttm_mem_io_unlock(man); |
ba4e7d97 | 230 | return -ENOMEM; |
82c5da6b | 231 | } |
ba4e7d97 TH |
232 | } |
233 | *virtual = addr; | |
234 | return 0; | |
235 | } | |
236 | ||
dcbff15a | 237 | static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
ba4e7d97 TH |
238 | void *virtual) |
239 | { | |
240 | struct ttm_mem_type_manager *man; | |
241 | ||
242 | man = &bdev->man[mem->mem_type]; | |
243 | ||
0c321c79 | 244 | if (virtual && mem->bus.addr == NULL) |
ba4e7d97 | 245 | iounmap(virtual); |
eba67093 | 246 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 247 | ttm_mem_io_free(bdev, mem); |
eba67093 | 248 | ttm_mem_io_unlock(man); |
ba4e7d97 TH |
249 | } |
250 | ||
251 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | |
252 | { | |
253 | uint32_t *dstP = | |
254 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); | |
255 | uint32_t *srcP = | |
256 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); | |
257 | ||
258 | int i; | |
259 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) | |
260 | iowrite32(ioread32(srcP++), dstP++); | |
261 | return 0; | |
262 | } | |
263 | ||
403c1826 TH |
264 | #ifdef CONFIG_X86 |
265 | #define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot) | |
266 | #define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr) | |
267 | #else | |
268 | #define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot) | |
269 | #define __ttm_kunmap_atomic(__addr) vunmap(__addr) | |
270 | #endif | |
271 | ||
9c11fcf1 TH |
272 | |
273 | /** | |
274 | * ttm_kmap_atomic_prot - Efficient kernel map of a single page with | |
275 | * specified page protection. | |
276 | * | |
277 | * @page: The page to map. | |
278 | * @prot: The page protection. | |
279 | * | |
280 | * This function maps a TTM page using the kmap_atomic api if available, | |
281 | * otherwise falls back to vmap. The user must make sure that the | |
282 | * specified page does not have an aliased mapping with a different caching | |
283 | * policy unless the architecture explicitly allows it. Also mapping and | |
284 | * unmapping using this api must be correctly nested. Unmapping should | |
285 | * occur in the reverse order of mapping. | |
286 | */ | |
287 | void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot) | |
403c1826 TH |
288 | { |
289 | if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) | |
290 | return kmap_atomic(page); | |
291 | else | |
292 | return __ttm_kmap_atomic_prot(page, prot); | |
293 | } | |
9c11fcf1 | 294 | EXPORT_SYMBOL(ttm_kmap_atomic_prot); |
403c1826 | 295 | |
9c11fcf1 TH |
296 | /** |
297 | * ttm_kunmap_atomic_prot - Unmap a page that was mapped using | |
298 | * ttm_kmap_atomic_prot. | |
299 | * | |
300 | * @addr: The virtual address from the map. | |
301 | * @prot: The page protection. | |
302 | */ | |
303 | void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot) | |
403c1826 TH |
304 | { |
305 | if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) | |
306 | kunmap_atomic(addr); | |
307 | else | |
308 | __ttm_kunmap_atomic(addr); | |
309 | } | |
9c11fcf1 | 310 | EXPORT_SYMBOL(ttm_kunmap_atomic_prot); |
403c1826 | 311 | |
ba4e7d97 | 312 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
542c6f6d TH |
313 | unsigned long page, |
314 | pgprot_t prot) | |
ba4e7d97 | 315 | { |
b1e5f172 | 316 | struct page *d = ttm->pages[page]; |
ba4e7d97 TH |
317 | void *dst; |
318 | ||
319 | if (!d) | |
320 | return -ENOMEM; | |
321 | ||
322 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); | |
403c1826 | 323 | dst = ttm_kmap_atomic_prot(d, prot); |
ba4e7d97 TH |
324 | if (!dst) |
325 | return -ENOMEM; | |
326 | ||
327 | memcpy_fromio(dst, src, PAGE_SIZE); | |
542c6f6d | 328 | |
403c1826 | 329 | ttm_kunmap_atomic_prot(dst, prot); |
542c6f6d | 330 | |
ba4e7d97 TH |
331 | return 0; |
332 | } | |
333 | ||
334 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |
542c6f6d TH |
335 | unsigned long page, |
336 | pgprot_t prot) | |
ba4e7d97 | 337 | { |
b1e5f172 | 338 | struct page *s = ttm->pages[page]; |
ba4e7d97 TH |
339 | void *src; |
340 | ||
341 | if (!s) | |
342 | return -ENOMEM; | |
343 | ||
344 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); | |
403c1826 | 345 | src = ttm_kmap_atomic_prot(s, prot); |
ba4e7d97 TH |
346 | if (!src) |
347 | return -ENOMEM; | |
348 | ||
349 | memcpy_toio(dst, src, PAGE_SIZE); | |
542c6f6d | 350 | |
403c1826 | 351 | ttm_kunmap_atomic_prot(src, prot); |
542c6f6d | 352 | |
ba4e7d97 TH |
353 | return 0; |
354 | } | |
355 | ||
356 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |
3e98d829 | 357 | struct ttm_operation_ctx *ctx, |
9d87fa21 | 358 | struct ttm_mem_reg *new_mem) |
ba4e7d97 TH |
359 | { |
360 | struct ttm_bo_device *bdev = bo->bdev; | |
361 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | |
362 | struct ttm_tt *ttm = bo->ttm; | |
363 | struct ttm_mem_reg *old_mem = &bo->mem; | |
e22469ca | 364 | struct ttm_mem_reg old_copy = *old_mem; |
ba4e7d97 TH |
365 | void *old_iomap; |
366 | void *new_iomap; | |
367 | int ret; | |
ba4e7d97 TH |
368 | unsigned long i; |
369 | unsigned long page; | |
370 | unsigned long add = 0; | |
371 | int dir; | |
372 | ||
3e98d829 | 373 | ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); |
77dfc28b CK |
374 | if (ret) |
375 | return ret; | |
376 | ||
ba4e7d97 TH |
377 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); |
378 | if (ret) | |
379 | return ret; | |
380 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); | |
381 | if (ret) | |
382 | goto out; | |
383 | ||
da95c788 TH |
384 | /* |
385 | * Single TTM move. NOP. | |
386 | */ | |
ba4e7d97 TH |
387 | if (old_iomap == NULL && new_iomap == NULL) |
388 | goto out2; | |
da95c788 TH |
389 | |
390 | /* | |
0bc25425 | 391 | * Don't move nonexistent data. Clear destination instead. |
da95c788 | 392 | */ |
0bc25425 | 393 | if (old_iomap == NULL && |
2e6d8b46 TH |
394 | (ttm == NULL || (ttm->state == tt_unpopulated && |
395 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { | |
0bc25425 | 396 | memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
ba4e7d97 | 397 | goto out2; |
0bc25425 | 398 | } |
ba4e7d97 | 399 | |
da95c788 TH |
400 | /* |
401 | * TTM might be null for moves within the same region. | |
9a0599dd | 402 | */ |
25893a14 CK |
403 | if (ttm) { |
404 | ret = ttm_tt_populate(ttm, ctx); | |
da95c788 | 405 | if (ret) |
b1e5f172 JG |
406 | goto out1; |
407 | } | |
408 | ||
ba4e7d97 TH |
409 | add = 0; |
410 | dir = 1; | |
411 | ||
412 | if ((old_mem->mem_type == new_mem->mem_type) && | |
d961db75 | 413 | (new_mem->start < old_mem->start + old_mem->size)) { |
ba4e7d97 TH |
414 | dir = -1; |
415 | add = new_mem->num_pages - 1; | |
416 | } | |
417 | ||
418 | for (i = 0; i < new_mem->num_pages; ++i) { | |
419 | page = i * dir + add; | |
542c6f6d TH |
420 | if (old_iomap == NULL) { |
421 | pgprot_t prot = ttm_io_prot(old_mem->placement, | |
422 | PAGE_KERNEL); | |
423 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, | |
424 | prot); | |
425 | } else if (new_iomap == NULL) { | |
426 | pgprot_t prot = ttm_io_prot(new_mem->placement, | |
427 | PAGE_KERNEL); | |
428 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, | |
429 | prot); | |
449f797a | 430 | } else { |
ba4e7d97 | 431 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
449f797a | 432 | } |
da95c788 | 433 | if (ret) |
ba4e7d97 TH |
434 | goto out1; |
435 | } | |
436 | mb(); | |
437 | out2: | |
eba67093 | 438 | old_copy = *old_mem; |
ba4e7d97 TH |
439 | *old_mem = *new_mem; |
440 | new_mem->mm_node = NULL; | |
ba4e7d97 | 441 | |
4279cb14 | 442 | if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { |
ba4e7d97 TH |
443 | ttm_tt_destroy(ttm); |
444 | bo->ttm = NULL; | |
445 | } | |
446 | ||
447 | out1: | |
eba67093 | 448 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
ba4e7d97 TH |
449 | out: |
450 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); | |
da95c788 TH |
451 | |
452 | /* | |
453 | * On error, keep the mm node! | |
454 | */ | |
455 | if (!ret) | |
456 | ttm_bo_mem_put(bo, &old_copy); | |
ba4e7d97 TH |
457 | return ret; |
458 | } | |
459 | EXPORT_SYMBOL(ttm_bo_move_memcpy); | |
460 | ||
461 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) | |
462 | { | |
5452cf44 CK |
463 | struct ttm_transfer_obj *fbo; |
464 | ||
465 | fbo = container_of(bo, struct ttm_transfer_obj, base); | |
f4490759 | 466 | ttm_bo_put(fbo->bo); |
5452cf44 | 467 | kfree(fbo); |
ba4e7d97 TH |
468 | } |
469 | ||
470 | /** | |
471 | * ttm_buffer_object_transfer | |
472 | * | |
473 | * @bo: A pointer to a struct ttm_buffer_object. | |
474 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, | |
475 | * holding the data of @bo with the old placement. | |
476 | * | |
477 | * This is a utility function that may be called after an accelerated move | |
478 | * has been scheduled. A new buffer object is created as a placeholder for | |
479 | * the old data while it's being copied. When that buffer object is idle, | |
480 | * it can be destroyed, releasing the space of the old placement. | |
481 | * Returns: | |
482 | * !0: Failure. | |
483 | */ | |
484 | ||
485 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |
486 | struct ttm_buffer_object **new_obj) | |
487 | { | |
5452cf44 | 488 | struct ttm_transfer_obj *fbo; |
5e338405 | 489 | int ret; |
ba4e7d97 | 490 | |
ff7c60c5 | 491 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
ba4e7d97 TH |
492 | if (!fbo) |
493 | return -ENOMEM; | |
494 | ||
5452cf44 | 495 | fbo->base = *bo; |
d6e820fc CK |
496 | fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT; |
497 | ||
498 | ttm_bo_get(bo); | |
8129fdad | 499 | fbo->bo = bo; |
ba4e7d97 TH |
500 | |
501 | /** | |
502 | * Fix up members that we shouldn't copy directly: | |
503 | * TODO: Explicit member copy would probably be better here. | |
504 | */ | |
505 | ||
97588b5b | 506 | atomic_inc(&ttm_bo_glob.bo_count); |
5452cf44 CK |
507 | INIT_LIST_HEAD(&fbo->base.ddestroy); |
508 | INIT_LIST_HEAD(&fbo->base.lru); | |
509 | INIT_LIST_HEAD(&fbo->base.swap); | |
510 | INIT_LIST_HEAD(&fbo->base.io_reserve_lru); | |
511 | mutex_init(&fbo->base.wu_mutex); | |
512 | fbo->base.moving = NULL; | |
b96f3e7c | 513 | drm_vma_node_reset(&fbo->base.base.vma_node); |
5452cf44 CK |
514 | |
515 | kref_init(&fbo->base.list_kref); | |
516 | kref_init(&fbo->base.kref); | |
517 | fbo->base.destroy = &ttm_transfered_destroy; | |
518 | fbo->base.acc_size = 0; | |
e532a135 | 519 | fbo->base.base.resv = &fbo->base.base._resv; |
52791eee CK |
520 | dma_resv_init(fbo->base.base.resv); |
521 | ret = dma_resv_trylock(fbo->base.base.resv); | |
5e338405 | 522 | WARN_ON(!ret); |
ba4e7d97 | 523 | |
5452cf44 | 524 | *new_obj = &fbo->base; |
ba4e7d97 TH |
525 | return 0; |
526 | } | |
527 | ||
528 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | |
529 | { | |
94318d50 BH |
530 | /* Cached mappings need no adjustment */ |
531 | if (caching_flags & TTM_PL_FLAG_CACHED) | |
532 | return tmp; | |
533 | ||
ba4e7d97 TH |
534 | #if defined(__i386__) || defined(__x86_64__) |
535 | if (caching_flags & TTM_PL_FLAG_WC) | |
536 | tmp = pgprot_writecombine(tmp); | |
537 | else if (boot_cpu_data.x86 > 3) | |
538 | tmp = pgprot_noncached(tmp); | |
ba4e7d97 | 539 | #endif |
f135b978 | 540 | #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ |
8a08e50c | 541 | defined(__powerpc__) || defined(__mips__) |
ba4e7d97 TH |
542 | if (caching_flags & TTM_PL_FLAG_WC) |
543 | tmp = pgprot_writecombine(tmp); | |
544 | else | |
545 | tmp = pgprot_noncached(tmp); | |
546 | #endif | |
8a08e50c | 547 | #if defined(__sparc__) |
94318d50 | 548 | tmp = pgprot_noncached(tmp); |
ba4e7d97 TH |
549 | #endif |
550 | return tmp; | |
551 | } | |
4bfd75cb | 552 | EXPORT_SYMBOL(ttm_io_prot); |
ba4e7d97 TH |
553 | |
554 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | |
82c5da6b JG |
555 | unsigned long offset, |
556 | unsigned long size, | |
ba4e7d97 TH |
557 | struct ttm_bo_kmap_obj *map) |
558 | { | |
ba4e7d97 | 559 | struct ttm_mem_reg *mem = &bo->mem; |
ba4e7d97 | 560 | |
82c5da6b | 561 | if (bo->mem.bus.addr) { |
ba4e7d97 | 562 | map->bo_kmap_type = ttm_bo_map_premapped; |
82c5da6b | 563 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
ba4e7d97 TH |
564 | } else { |
565 | map->bo_kmap_type = ttm_bo_map_iomap; | |
566 | if (mem->placement & TTM_PL_FLAG_WC) | |
82c5da6b JG |
567 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
568 | size); | |
ba4e7d97 | 569 | else |
82c5da6b JG |
570 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
571 | size); | |
ba4e7d97 TH |
572 | } |
573 | return (!map->virtual) ? -ENOMEM : 0; | |
574 | } | |
575 | ||
576 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, | |
577 | unsigned long start_page, | |
578 | unsigned long num_pages, | |
579 | struct ttm_bo_kmap_obj *map) | |
580 | { | |
d0cef9fa RH |
581 | struct ttm_mem_reg *mem = &bo->mem; |
582 | struct ttm_operation_ctx ctx = { | |
583 | .interruptible = false, | |
584 | .no_wait_gpu = false | |
585 | }; | |
ba4e7d97 | 586 | struct ttm_tt *ttm = bo->ttm; |
d0cef9fa | 587 | pgprot_t prot; |
b1e5f172 | 588 | int ret; |
ba4e7d97 TH |
589 | |
590 | BUG_ON(!ttm); | |
b1e5f172 | 591 | |
25893a14 CK |
592 | ret = ttm_tt_populate(ttm, &ctx); |
593 | if (ret) | |
594 | return ret; | |
b1e5f172 | 595 | |
ba4e7d97 TH |
596 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { |
597 | /* | |
598 | * We're mapping a single page, and the desired | |
599 | * page protection is consistent with the bo. | |
600 | */ | |
601 | ||
602 | map->bo_kmap_type = ttm_bo_map_kmap; | |
b1e5f172 | 603 | map->page = ttm->pages[start_page]; |
ba4e7d97 TH |
604 | map->virtual = kmap(map->page); |
605 | } else { | |
ba4e7d97 TH |
606 | /* |
607 | * We need to use vmap to get the desired page protection | |
af901ca1 | 608 | * or to make the buffer object look contiguous. |
ba4e7d97 | 609 | */ |
94318d50 | 610 | prot = ttm_io_prot(mem->placement, PAGE_KERNEL); |
ba4e7d97 TH |
611 | map->bo_kmap_type = ttm_bo_map_vmap; |
612 | map->virtual = vmap(ttm->pages + start_page, num_pages, | |
613 | 0, prot); | |
614 | } | |
615 | return (!map->virtual) ? -ENOMEM : 0; | |
616 | } | |
617 | ||
618 | int ttm_bo_kmap(struct ttm_buffer_object *bo, | |
619 | unsigned long start_page, unsigned long num_pages, | |
620 | struct ttm_bo_kmap_obj *map) | |
621 | { | |
eba67093 TH |
622 | struct ttm_mem_type_manager *man = |
623 | &bo->bdev->man[bo->mem.mem_type]; | |
82c5da6b | 624 | unsigned long offset, size; |
ba4e7d97 | 625 | int ret; |
ba4e7d97 | 626 | |
ba4e7d97 | 627 | map->virtual = NULL; |
82c5da6b | 628 | map->bo = bo; |
ba4e7d97 TH |
629 | if (num_pages > bo->num_pages) |
630 | return -EINVAL; | |
631 | if (start_page > bo->num_pages) | |
632 | return -EINVAL; | |
02b29caf | 633 | |
eba67093 | 634 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 635 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
eba67093 | 636 | ttm_mem_io_unlock(man); |
ba4e7d97 TH |
637 | if (ret) |
638 | return ret; | |
82c5da6b | 639 | if (!bo->mem.bus.is_iomem) { |
ba4e7d97 TH |
640 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
641 | } else { | |
82c5da6b JG |
642 | offset = start_page << PAGE_SHIFT; |
643 | size = num_pages << PAGE_SHIFT; | |
644 | return ttm_bo_ioremap(bo, offset, size, map); | |
ba4e7d97 TH |
645 | } |
646 | } | |
647 | EXPORT_SYMBOL(ttm_bo_kmap); | |
648 | ||
649 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |
650 | { | |
eba67093 TH |
651 | struct ttm_buffer_object *bo = map->bo; |
652 | struct ttm_mem_type_manager *man = | |
653 | &bo->bdev->man[bo->mem.mem_type]; | |
654 | ||
ba4e7d97 TH |
655 | if (!map->virtual) |
656 | return; | |
657 | switch (map->bo_kmap_type) { | |
658 | case ttm_bo_map_iomap: | |
659 | iounmap(map->virtual); | |
660 | break; | |
661 | case ttm_bo_map_vmap: | |
662 | vunmap(map->virtual); | |
663 | break; | |
664 | case ttm_bo_map_kmap: | |
665 | kunmap(map->page); | |
666 | break; | |
667 | case ttm_bo_map_premapped: | |
668 | break; | |
669 | default: | |
670 | BUG(); | |
671 | } | |
eba67093 TH |
672 | (void) ttm_mem_io_lock(man, false); |
673 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); | |
674 | ttm_mem_io_unlock(man); | |
ba4e7d97 TH |
675 | map->virtual = NULL; |
676 | map->page = NULL; | |
677 | } | |
678 | EXPORT_SYMBOL(ttm_bo_kunmap); | |
679 | ||
ba4e7d97 | 680 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
f54d1867 | 681 | struct dma_fence *fence, |
97a875cb | 682 | bool evict, |
ba4e7d97 TH |
683 | struct ttm_mem_reg *new_mem) |
684 | { | |
685 | struct ttm_bo_device *bdev = bo->bdev; | |
ba4e7d97 TH |
686 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
687 | struct ttm_mem_reg *old_mem = &bo->mem; | |
688 | int ret; | |
ba4e7d97 | 689 | struct ttm_buffer_object *ghost_obj; |
ba4e7d97 | 690 | |
52791eee | 691 | dma_resv_add_excl_fence(bo->base.resv, fence); |
ba4e7d97 | 692 | if (evict) { |
8aa6d4fc | 693 | ret = ttm_bo_wait(bo, false, false); |
ba4e7d97 TH |
694 | if (ret) |
695 | return ret; | |
696 | ||
4279cb14 | 697 | if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { |
ba4e7d97 TH |
698 | ttm_tt_destroy(bo->ttm); |
699 | bo->ttm = NULL; | |
700 | } | |
eac20953 | 701 | ttm_bo_free_old_node(bo); |
ba4e7d97 TH |
702 | } else { |
703 | /** | |
704 | * This should help pipeline ordinary buffer moves. | |
705 | * | |
706 | * Hang old buffer memory on a new buffer object, | |
707 | * and leave it to be released when the GPU | |
708 | * operation has completed. | |
709 | */ | |
710 | ||
f54d1867 CW |
711 | dma_fence_put(bo->moving); |
712 | bo->moving = dma_fence_get(fence); | |
ba4e7d97 | 713 | |
ff7c60c5 | 714 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
ba4e7d97 TH |
715 | if (ret) |
716 | return ret; | |
717 | ||
52791eee | 718 | dma_resv_add_excl_fence(ghost_obj->base.resv, fence); |
f2c24b83 | 719 | |
ba4e7d97 TH |
720 | /** |
721 | * If we're not moving to fixed memory, the TTM object | |
722 | * needs to stay alive. Otherwhise hang it on the ghost | |
723 | * bo to be unbound and destroyed. | |
724 | */ | |
725 | ||
726 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) | |
727 | ghost_obj->ttm = NULL; | |
728 | else | |
729 | bo->ttm = NULL; | |
730 | ||
731 | ttm_bo_unreserve(ghost_obj); | |
f4490759 | 732 | ttm_bo_put(ghost_obj); |
ba4e7d97 TH |
733 | } |
734 | ||
735 | *old_mem = *new_mem; | |
736 | new_mem->mm_node = NULL; | |
110b20c3 | 737 | |
ba4e7d97 TH |
738 | return 0; |
739 | } | |
740 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); | |
3ddf4ad9 CK |
741 | |
742 | int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | |
f54d1867 | 743 | struct dma_fence *fence, bool evict, |
3ddf4ad9 CK |
744 | struct ttm_mem_reg *new_mem) |
745 | { | |
746 | struct ttm_bo_device *bdev = bo->bdev; | |
747 | struct ttm_mem_reg *old_mem = &bo->mem; | |
748 | ||
749 | struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type]; | |
750 | struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type]; | |
751 | ||
752 | int ret; | |
753 | ||
52791eee | 754 | dma_resv_add_excl_fence(bo->base.resv, fence); |
3ddf4ad9 CK |
755 | |
756 | if (!evict) { | |
757 | struct ttm_buffer_object *ghost_obj; | |
758 | ||
759 | /** | |
760 | * This should help pipeline ordinary buffer moves. | |
761 | * | |
762 | * Hang old buffer memory on a new buffer object, | |
763 | * and leave it to be released when the GPU | |
764 | * operation has completed. | |
765 | */ | |
766 | ||
f54d1867 CW |
767 | dma_fence_put(bo->moving); |
768 | bo->moving = dma_fence_get(fence); | |
3ddf4ad9 CK |
769 | |
770 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | |
771 | if (ret) | |
772 | return ret; | |
773 | ||
52791eee | 774 | dma_resv_add_excl_fence(ghost_obj->base.resv, fence); |
3ddf4ad9 CK |
775 | |
776 | /** | |
777 | * If we're not moving to fixed memory, the TTM object | |
778 | * needs to stay alive. Otherwhise hang it on the ghost | |
779 | * bo to be unbound and destroyed. | |
780 | */ | |
781 | ||
782 | if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED)) | |
783 | ghost_obj->ttm = NULL; | |
784 | else | |
785 | bo->ttm = NULL; | |
786 | ||
787 | ttm_bo_unreserve(ghost_obj); | |
f4490759 | 788 | ttm_bo_put(ghost_obj); |
3ddf4ad9 CK |
789 | |
790 | } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { | |
791 | ||
792 | /** | |
793 | * BO doesn't have a TTM we need to bind/unbind. Just remember | |
794 | * this eviction and free up the allocation | |
795 | */ | |
796 | ||
797 | spin_lock(&from->move_lock); | |
f54d1867 CW |
798 | if (!from->move || dma_fence_is_later(fence, from->move)) { |
799 | dma_fence_put(from->move); | |
800 | from->move = dma_fence_get(fence); | |
3ddf4ad9 CK |
801 | } |
802 | spin_unlock(&from->move_lock); | |
803 | ||
804 | ttm_bo_free_old_node(bo); | |
805 | ||
f54d1867 CW |
806 | dma_fence_put(bo->moving); |
807 | bo->moving = dma_fence_get(fence); | |
3ddf4ad9 CK |
808 | |
809 | } else { | |
810 | /** | |
811 | * Last resort, wait for the move to be completed. | |
812 | * | |
813 | * Should never happen in pratice. | |
814 | */ | |
815 | ||
816 | ret = ttm_bo_wait(bo, false, false); | |
817 | if (ret) | |
818 | return ret; | |
819 | ||
820 | if (to->flags & TTM_MEMTYPE_FLAG_FIXED) { | |
821 | ttm_tt_destroy(bo->ttm); | |
822 | bo->ttm = NULL; | |
823 | } | |
824 | ttm_bo_free_old_node(bo); | |
825 | } | |
826 | ||
827 | *old_mem = *new_mem; | |
828 | new_mem->mm_node = NULL; | |
829 | ||
830 | return 0; | |
831 | } | |
832 | EXPORT_SYMBOL(ttm_bo_pipeline_move); | |
5d951098 CK |
833 | |
834 | int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) | |
835 | { | |
836 | struct ttm_buffer_object *ghost; | |
837 | int ret; | |
838 | ||
839 | ret = ttm_buffer_object_transfer(bo, &ghost); | |
840 | if (ret) | |
841 | return ret; | |
842 | ||
52791eee | 843 | ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv); |
5d951098 CK |
844 | /* Last resort, wait for the BO to be idle when we are OOM */ |
845 | if (ret) | |
846 | ttm_bo_wait(bo, false, false); | |
847 | ||
848 | memset(&bo->mem, 0, sizeof(bo->mem)); | |
849 | bo->mem.mem_type = TTM_PL_SYSTEM; | |
850 | bo->ttm = NULL; | |
851 | ||
852 | ttm_bo_unreserve(ghost); | |
f4490759 | 853 | ttm_bo_put(ghost); |
5d951098 CK |
854 | |
855 | return 0; | |
856 | } |