Merge remote-tracking branch 'asoc/topic/core' into asoc-next
[linux-2.6-block.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
CommitLineData
ba4e7d97
TH
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
760285e7
DH
31#include <drm/ttm/ttm_bo_driver.h>
32#include <drm/ttm/ttm_placement.h>
72525b3f 33#include <drm/drm_vma_manager.h>
ba4e7d97
TH
34#include <linux/io.h>
35#include <linux/highmem.h>
36#include <linux/wait.h>
5a0e3ad6 37#include <linux/slab.h>
ba4e7d97 38#include <linux/vmalloc.h>
ba4e7d97 39#include <linux/module.h>
f2c24b83 40#include <linux/reservation.h>
ba4e7d97
TH
41
42void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43{
42311ff9 44 ttm_bo_mem_put(bo, &bo->mem);
ba4e7d97
TH
45}
46
47int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
97a875cb 48 bool evict,
9d87fa21 49 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
ba4e7d97
TH
50{
51 struct ttm_tt *ttm = bo->ttm;
52 struct ttm_mem_reg *old_mem = &bo->mem;
ba4e7d97
TH
53 int ret;
54
55 if (old_mem->mem_type != TTM_PL_SYSTEM) {
56 ttm_tt_unbind(ttm);
57 ttm_bo_free_old_node(bo);
58 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
59 TTM_PL_MASK_MEM);
60 old_mem->mem_type = TTM_PL_SYSTEM;
ba4e7d97
TH
61 }
62
63 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
64 if (unlikely(ret != 0))
65 return ret;
66
67 if (new_mem->mem_type != TTM_PL_SYSTEM) {
68 ret = ttm_tt_bind(ttm, new_mem);
69 if (unlikely(ret != 0))
70 return ret;
71 }
72
73 *old_mem = *new_mem;
74 new_mem->mm_node = NULL;
110b20c3 75
ba4e7d97
TH
76 return 0;
77}
78EXPORT_SYMBOL(ttm_bo_move_ttm);
79
eba67093 80int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
82c5da6b 81{
eba67093
TH
82 if (likely(man->io_reserve_fastpath))
83 return 0;
84
85 if (interruptible)
86 return mutex_lock_interruptible(&man->io_reserve_mutex);
87
88 mutex_lock(&man->io_reserve_mutex);
89 return 0;
90}
afe6804c 91EXPORT_SYMBOL(ttm_mem_io_lock);
82c5da6b 92
eba67093
TH
93void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
94{
95 if (likely(man->io_reserve_fastpath))
96 return;
97
98 mutex_unlock(&man->io_reserve_mutex);
99}
afe6804c 100EXPORT_SYMBOL(ttm_mem_io_unlock);
eba67093
TH
101
102static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
103{
104 struct ttm_buffer_object *bo;
105
106 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
107 return -EAGAIN;
108
109 bo = list_first_entry(&man->io_reserve_lru,
110 struct ttm_buffer_object,
111 io_reserve_lru);
112 list_del_init(&bo->io_reserve_lru);
113 ttm_bo_unmap_virtual_locked(bo);
114
115 return 0;
116}
117
afe6804c
DA
118
119int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
120 struct ttm_mem_reg *mem)
eba67093
TH
121{
122 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
123 int ret = 0;
124
125 if (!bdev->driver->io_mem_reserve)
126 return 0;
127 if (likely(man->io_reserve_fastpath))
128 return bdev->driver->io_mem_reserve(bdev, mem);
129
130 if (bdev->driver->io_mem_reserve &&
131 mem->bus.io_reserved_count++ == 0) {
132retry:
0c321c79 133 ret = bdev->driver->io_mem_reserve(bdev, mem);
eba67093
TH
134 if (ret == -EAGAIN) {
135 ret = ttm_mem_io_evict(man);
136 if (ret == 0)
137 goto retry;
138 }
139 }
140 return ret;
141}
afe6804c 142EXPORT_SYMBOL(ttm_mem_io_reserve);
eba67093 143
afe6804c
DA
144void ttm_mem_io_free(struct ttm_bo_device *bdev,
145 struct ttm_mem_reg *mem)
eba67093
TH
146{
147 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
148
149 if (likely(man->io_reserve_fastpath))
150 return;
151
152 if (bdev->driver->io_mem_reserve &&
153 --mem->bus.io_reserved_count == 0 &&
154 bdev->driver->io_mem_free)
155 bdev->driver->io_mem_free(bdev, mem);
156
157}
afe6804c 158EXPORT_SYMBOL(ttm_mem_io_free);
eba67093
TH
159
160int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
161{
162 struct ttm_mem_reg *mem = &bo->mem;
163 int ret;
164
165 if (!mem->bus.io_reserved_vm) {
166 struct ttm_mem_type_manager *man =
167 &bo->bdev->man[mem->mem_type];
168
169 ret = ttm_mem_io_reserve(bo->bdev, mem);
82c5da6b
JG
170 if (unlikely(ret != 0))
171 return ret;
eba67093
TH
172 mem->bus.io_reserved_vm = true;
173 if (man->use_io_reserve_lru)
174 list_add_tail(&bo->io_reserve_lru,
175 &man->io_reserve_lru);
82c5da6b
JG
176 }
177 return 0;
178}
179
eba67093 180void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
82c5da6b 181{
eba67093
TH
182 struct ttm_mem_reg *mem = &bo->mem;
183
184 if (mem->bus.io_reserved_vm) {
185 mem->bus.io_reserved_vm = false;
186 list_del_init(&bo->io_reserve_lru);
187 ttm_mem_io_free(bo->bdev, mem);
82c5da6b
JG
188 }
189}
190
dcbff15a 191static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
ba4e7d97
TH
192 void **virtual)
193{
eba67093 194 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
ba4e7d97
TH
195 int ret;
196 void *addr;
197
198 *virtual = NULL;
eba67093 199 (void) ttm_mem_io_lock(man, false);
82c5da6b 200 ret = ttm_mem_io_reserve(bdev, mem);
eba67093 201 ttm_mem_io_unlock(man);
9e51159c 202 if (ret || !mem->bus.is_iomem)
ba4e7d97
TH
203 return ret;
204
82c5da6b
JG
205 if (mem->bus.addr) {
206 addr = mem->bus.addr;
207 } else {
ba4e7d97 208 if (mem->placement & TTM_PL_FLAG_WC)
82c5da6b 209 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
ba4e7d97 210 else
82c5da6b
JG
211 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
212 if (!addr) {
eba67093 213 (void) ttm_mem_io_lock(man, false);
82c5da6b 214 ttm_mem_io_free(bdev, mem);
eba67093 215 ttm_mem_io_unlock(man);
ba4e7d97 216 return -ENOMEM;
82c5da6b 217 }
ba4e7d97
TH
218 }
219 *virtual = addr;
220 return 0;
221}
222
dcbff15a 223static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
ba4e7d97
TH
224 void *virtual)
225{
226 struct ttm_mem_type_manager *man;
227
228 man = &bdev->man[mem->mem_type];
229
0c321c79 230 if (virtual && mem->bus.addr == NULL)
ba4e7d97 231 iounmap(virtual);
eba67093 232 (void) ttm_mem_io_lock(man, false);
82c5da6b 233 ttm_mem_io_free(bdev, mem);
eba67093 234 ttm_mem_io_unlock(man);
ba4e7d97
TH
235}
236
237static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
238{
239 uint32_t *dstP =
240 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
241 uint32_t *srcP =
242 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
243
244 int i;
245 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
246 iowrite32(ioread32(srcP++), dstP++);
247 return 0;
248}
249
250static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
542c6f6d
TH
251 unsigned long page,
252 pgprot_t prot)
ba4e7d97 253{
b1e5f172 254 struct page *d = ttm->pages[page];
ba4e7d97
TH
255 void *dst;
256
257 if (!d)
258 return -ENOMEM;
259
260 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
542c6f6d
TH
261
262#ifdef CONFIG_X86
3e4d3af5 263 dst = kmap_atomic_prot(d, prot);
542c6f6d 264#else
6d0897ba 265 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
266 dst = vmap(&d, 1, 0, prot);
267 else
268 dst = kmap(d);
269#endif
ba4e7d97
TH
270 if (!dst)
271 return -ENOMEM;
272
273 memcpy_fromio(dst, src, PAGE_SIZE);
542c6f6d
TH
274
275#ifdef CONFIG_X86
3e4d3af5 276 kunmap_atomic(dst);
542c6f6d 277#else
6d0897ba 278 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
279 vunmap(dst);
280 else
281 kunmap(d);
282#endif
283
ba4e7d97
TH
284 return 0;
285}
286
287static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
542c6f6d
TH
288 unsigned long page,
289 pgprot_t prot)
ba4e7d97 290{
b1e5f172 291 struct page *s = ttm->pages[page];
ba4e7d97
TH
292 void *src;
293
294 if (!s)
295 return -ENOMEM;
296
297 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
542c6f6d 298#ifdef CONFIG_X86
3e4d3af5 299 src = kmap_atomic_prot(s, prot);
542c6f6d 300#else
6d0897ba 301 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
302 src = vmap(&s, 1, 0, prot);
303 else
304 src = kmap(s);
305#endif
ba4e7d97
TH
306 if (!src)
307 return -ENOMEM;
308
309 memcpy_toio(dst, src, PAGE_SIZE);
542c6f6d
TH
310
311#ifdef CONFIG_X86
3e4d3af5 312 kunmap_atomic(src);
542c6f6d 313#else
6d0897ba 314 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
315 vunmap(src);
316 else
317 kunmap(s);
318#endif
319
ba4e7d97
TH
320 return 0;
321}
322
323int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
97a875cb 324 bool evict, bool no_wait_gpu,
9d87fa21 325 struct ttm_mem_reg *new_mem)
ba4e7d97
TH
326{
327 struct ttm_bo_device *bdev = bo->bdev;
328 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
329 struct ttm_tt *ttm = bo->ttm;
330 struct ttm_mem_reg *old_mem = &bo->mem;
e22469ca 331 struct ttm_mem_reg old_copy = *old_mem;
ba4e7d97
TH
332 void *old_iomap;
333 void *new_iomap;
334 int ret;
ba4e7d97
TH
335 unsigned long i;
336 unsigned long page;
337 unsigned long add = 0;
338 int dir;
339
340 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
341 if (ret)
342 return ret;
343 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
344 if (ret)
345 goto out;
346
da95c788
TH
347 /*
348 * Single TTM move. NOP.
349 */
ba4e7d97
TH
350 if (old_iomap == NULL && new_iomap == NULL)
351 goto out2;
da95c788
TH
352
353 /*
0bc25425 354 * Don't move nonexistent data. Clear destination instead.
da95c788 355 */
0bc25425 356 if (old_iomap == NULL &&
2e6d8b46
TH
357 (ttm == NULL || (ttm->state == tt_unpopulated &&
358 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
0bc25425 359 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
ba4e7d97 360 goto out2;
0bc25425 361 }
ba4e7d97 362
da95c788
TH
363 /*
364 * TTM might be null for moves within the same region.
9a0599dd
JB
365 */
366 if (ttm && ttm->state == tt_unpopulated) {
b1e5f172 367 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
da95c788 368 if (ret)
b1e5f172
JG
369 goto out1;
370 }
371
ba4e7d97
TH
372 add = 0;
373 dir = 1;
374
375 if ((old_mem->mem_type == new_mem->mem_type) &&
d961db75 376 (new_mem->start < old_mem->start + old_mem->size)) {
ba4e7d97
TH
377 dir = -1;
378 add = new_mem->num_pages - 1;
379 }
380
381 for (i = 0; i < new_mem->num_pages; ++i) {
382 page = i * dir + add;
542c6f6d
TH
383 if (old_iomap == NULL) {
384 pgprot_t prot = ttm_io_prot(old_mem->placement,
385 PAGE_KERNEL);
386 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
387 prot);
388 } else if (new_iomap == NULL) {
389 pgprot_t prot = ttm_io_prot(new_mem->placement,
390 PAGE_KERNEL);
391 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
392 prot);
393 } else
ba4e7d97 394 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
da95c788 395 if (ret)
ba4e7d97
TH
396 goto out1;
397 }
398 mb();
399out2:
eba67093 400 old_copy = *old_mem;
ba4e7d97
TH
401 *old_mem = *new_mem;
402 new_mem->mm_node = NULL;
ba4e7d97
TH
403
404 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
405 ttm_tt_unbind(ttm);
406 ttm_tt_destroy(ttm);
407 bo->ttm = NULL;
408 }
409
410out1:
eba67093 411 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
ba4e7d97
TH
412out:
413 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
da95c788
TH
414
415 /*
416 * On error, keep the mm node!
417 */
418 if (!ret)
419 ttm_bo_mem_put(bo, &old_copy);
ba4e7d97
TH
420 return ret;
421}
422EXPORT_SYMBOL(ttm_bo_move_memcpy);
423
424static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
425{
426 kfree(bo);
427}
428
429/**
430 * ttm_buffer_object_transfer
431 *
432 * @bo: A pointer to a struct ttm_buffer_object.
433 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
434 * holding the data of @bo with the old placement.
435 *
436 * This is a utility function that may be called after an accelerated move
437 * has been scheduled. A new buffer object is created as a placeholder for
438 * the old data while it's being copied. When that buffer object is idle,
439 * it can be destroyed, releasing the space of the old placement.
440 * Returns:
441 * !0: Failure.
442 */
443
444static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
445 struct ttm_buffer_object **new_obj)
446{
447 struct ttm_buffer_object *fbo;
5e338405 448 int ret;
ba4e7d97 449
ff7c60c5 450 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
ba4e7d97
TH
451 if (!fbo)
452 return -ENOMEM;
453
454 *fbo = *bo;
455
456 /**
457 * Fix up members that we shouldn't copy directly:
458 * TODO: Explicit member copy would probably be better here.
459 */
460
ba4e7d97
TH
461 INIT_LIST_HEAD(&fbo->ddestroy);
462 INIT_LIST_HEAD(&fbo->lru);
463 INIT_LIST_HEAD(&fbo->swap);
eba67093 464 INIT_LIST_HEAD(&fbo->io_reserve_lru);
72525b3f 465 drm_vma_node_reset(&fbo->vma_node);
0fbecd40 466 atomic_set(&fbo->cpu_writers, 0);
ba4e7d97 467
ba4e7d97
TH
468 kref_init(&fbo->list_kref);
469 kref_init(&fbo->kref);
470 fbo->destroy = &ttm_transfered_destroy;
57de4ba9 471 fbo->acc_size = 0;
5e338405
ML
472 fbo->resv = &fbo->ttm_resv;
473 reservation_object_init(fbo->resv);
474 ret = ww_mutex_trylock(&fbo->resv->lock);
475 WARN_ON(!ret);
ba4e7d97
TH
476
477 *new_obj = fbo;
478 return 0;
479}
480
481pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
482{
94318d50
BH
483 /* Cached mappings need no adjustment */
484 if (caching_flags & TTM_PL_FLAG_CACHED)
485 return tmp;
486
ba4e7d97
TH
487#if defined(__i386__) || defined(__x86_64__)
488 if (caching_flags & TTM_PL_FLAG_WC)
489 tmp = pgprot_writecombine(tmp);
490 else if (boot_cpu_data.x86 > 3)
491 tmp = pgprot_noncached(tmp);
ba4e7d97 492#endif
f135b978
AC
493#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
494 defined(__powerpc__)
ba4e7d97
TH
495 if (caching_flags & TTM_PL_FLAG_WC)
496 tmp = pgprot_writecombine(tmp);
497 else
498 tmp = pgprot_noncached(tmp);
499#endif
04cf55e1 500#if defined(__sparc__) || defined(__mips__)
94318d50 501 tmp = pgprot_noncached(tmp);
ba4e7d97
TH
502#endif
503 return tmp;
504}
4bfd75cb 505EXPORT_SYMBOL(ttm_io_prot);
ba4e7d97
TH
506
507static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
82c5da6b
JG
508 unsigned long offset,
509 unsigned long size,
ba4e7d97
TH
510 struct ttm_bo_kmap_obj *map)
511{
ba4e7d97 512 struct ttm_mem_reg *mem = &bo->mem;
ba4e7d97 513
82c5da6b 514 if (bo->mem.bus.addr) {
ba4e7d97 515 map->bo_kmap_type = ttm_bo_map_premapped;
82c5da6b 516 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
ba4e7d97
TH
517 } else {
518 map->bo_kmap_type = ttm_bo_map_iomap;
519 if (mem->placement & TTM_PL_FLAG_WC)
82c5da6b
JG
520 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
521 size);
ba4e7d97 522 else
82c5da6b
JG
523 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
524 size);
ba4e7d97
TH
525 }
526 return (!map->virtual) ? -ENOMEM : 0;
527}
528
529static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
530 unsigned long start_page,
531 unsigned long num_pages,
532 struct ttm_bo_kmap_obj *map)
533{
534 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
535 struct ttm_tt *ttm = bo->ttm;
b1e5f172 536 int ret;
ba4e7d97
TH
537
538 BUG_ON(!ttm);
b1e5f172
JG
539
540 if (ttm->state == tt_unpopulated) {
541 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
542 if (ret)
543 return ret;
544 }
545
ba4e7d97
TH
546 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
547 /*
548 * We're mapping a single page, and the desired
549 * page protection is consistent with the bo.
550 */
551
552 map->bo_kmap_type = ttm_bo_map_kmap;
b1e5f172 553 map->page = ttm->pages[start_page];
ba4e7d97
TH
554 map->virtual = kmap(map->page);
555 } else {
ba4e7d97
TH
556 /*
557 * We need to use vmap to get the desired page protection
af901ca1 558 * or to make the buffer object look contiguous.
ba4e7d97 559 */
94318d50 560 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
ba4e7d97
TH
561 map->bo_kmap_type = ttm_bo_map_vmap;
562 map->virtual = vmap(ttm->pages + start_page, num_pages,
563 0, prot);
564 }
565 return (!map->virtual) ? -ENOMEM : 0;
566}
567
568int ttm_bo_kmap(struct ttm_buffer_object *bo,
569 unsigned long start_page, unsigned long num_pages,
570 struct ttm_bo_kmap_obj *map)
571{
eba67093
TH
572 struct ttm_mem_type_manager *man =
573 &bo->bdev->man[bo->mem.mem_type];
82c5da6b 574 unsigned long offset, size;
ba4e7d97 575 int ret;
ba4e7d97
TH
576
577 BUG_ON(!list_empty(&bo->swap));
578 map->virtual = NULL;
82c5da6b 579 map->bo = bo;
ba4e7d97
TH
580 if (num_pages > bo->num_pages)
581 return -EINVAL;
582 if (start_page > bo->num_pages)
583 return -EINVAL;
584#if 0
4cda878b 585 if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
ba4e7d97
TH
586 return -EPERM;
587#endif
eba67093 588 (void) ttm_mem_io_lock(man, false);
82c5da6b 589 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
eba67093 590 ttm_mem_io_unlock(man);
ba4e7d97
TH
591 if (ret)
592 return ret;
82c5da6b 593 if (!bo->mem.bus.is_iomem) {
ba4e7d97
TH
594 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
595 } else {
82c5da6b
JG
596 offset = start_page << PAGE_SHIFT;
597 size = num_pages << PAGE_SHIFT;
598 return ttm_bo_ioremap(bo, offset, size, map);
ba4e7d97
TH
599 }
600}
601EXPORT_SYMBOL(ttm_bo_kmap);
602
603void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
604{
eba67093
TH
605 struct ttm_buffer_object *bo = map->bo;
606 struct ttm_mem_type_manager *man =
607 &bo->bdev->man[bo->mem.mem_type];
608
ba4e7d97
TH
609 if (!map->virtual)
610 return;
611 switch (map->bo_kmap_type) {
612 case ttm_bo_map_iomap:
613 iounmap(map->virtual);
614 break;
615 case ttm_bo_map_vmap:
616 vunmap(map->virtual);
617 break;
618 case ttm_bo_map_kmap:
619 kunmap(map->page);
620 break;
621 case ttm_bo_map_premapped:
622 break;
623 default:
624 BUG();
625 }
eba67093
TH
626 (void) ttm_mem_io_lock(man, false);
627 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
628 ttm_mem_io_unlock(man);
ba4e7d97
TH
629 map->virtual = NULL;
630 map->page = NULL;
631}
632EXPORT_SYMBOL(ttm_bo_kunmap);
633
ba4e7d97 634int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
f2c24b83 635 struct fence *fence,
97a875cb 636 bool evict,
9d87fa21 637 bool no_wait_gpu,
ba4e7d97
TH
638 struct ttm_mem_reg *new_mem)
639{
640 struct ttm_bo_device *bdev = bo->bdev;
ba4e7d97
TH
641 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
642 struct ttm_mem_reg *old_mem = &bo->mem;
643 int ret;
ba4e7d97 644 struct ttm_buffer_object *ghost_obj;
ba4e7d97 645
f2c24b83 646 reservation_object_add_excl_fence(bo->resv, fence);
ba4e7d97
TH
647 if (evict) {
648 ret = ttm_bo_wait(bo, false, false, false);
ba4e7d97
TH
649 if (ret)
650 return ret;
651
ba4e7d97
TH
652 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
653 (bo->ttm != NULL)) {
654 ttm_tt_unbind(bo->ttm);
655 ttm_tt_destroy(bo->ttm);
656 bo->ttm = NULL;
657 }
eac20953 658 ttm_bo_free_old_node(bo);
ba4e7d97
TH
659 } else {
660 /**
661 * This should help pipeline ordinary buffer moves.
662 *
663 * Hang old buffer memory on a new buffer object,
664 * and leave it to be released when the GPU
665 * operation has completed.
666 */
667
668 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
ba4e7d97 669
ff7c60c5 670 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
ba4e7d97
TH
671 if (ret)
672 return ret;
673
f2c24b83
ML
674 reservation_object_add_excl_fence(ghost_obj->resv, fence);
675
ba4e7d97
TH
676 /**
677 * If we're not moving to fixed memory, the TTM object
678 * needs to stay alive. Otherwhise hang it on the ghost
679 * bo to be unbound and destroyed.
680 */
681
682 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
683 ghost_obj->ttm = NULL;
684 else
685 bo->ttm = NULL;
686
687 ttm_bo_unreserve(ghost_obj);
688 ttm_bo_unref(&ghost_obj);
689 }
690
691 *old_mem = *new_mem;
692 new_mem->mm_node = NULL;
110b20c3 693
ba4e7d97
TH
694 return 0;
695}
696EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);